Boost是这几年比较热的一套机器学习算法,网上关于这种算法介绍的文章很多,我这几天闲来无事,用sklearn机器学习包里面的GBDT模型,对A股市场测了一把,下面是详细过程。 先载入各种包:
import numpy as np
import pandas as pd
import scipy.stats as st
import matplotlib.pyplot as plt
from sklearn import ensemble
from sklearn import datasets
from sklearn.utils import shuffle
from sklearn.metrics import mean_squared_error
下面是一个因子预处理函数,preprocess()是bigquant“学院”里面提供的一个因子预处理函数,这个函数对空值的处理是用np.nanmean()填充,这种方式是否合理,本文不讨论,姑且用着先。 有此疑问,主要是因为股灾期间千股停牌,那要填充不少空值,这对机器学习样本来说,似乎不是很合理。
def preprocess(df,factors):
## 1. 缺失值处理
for factor in factors:
# 缺失值处理
df[factor].fillna(np.nanmean(df[factor]), inplace=True)
# 极值处理
p_95 = np.percentile(df[factor], 95)
p_5 = np.percentile(df[factor], 5)
df[factor][df[factor] > p_95] = p_95
df[factor][df[factor] < p_5] = p_5
# 标准化处理
df[factor] = (df[factor] - df[factor].mean()) / df[factor].std()
return df
载入因子数据,这里选择了9个因子: 基本面因子:pb_lf_0、ps_ttm_0、pe_ttm_0 技术面因子:ta_atr_14_0、ta_atr_28_0、ta_rsi_14_0、ta_rsi_28_0 量价因子:avg_amount_10 资金流因子:avg_mf_net_amount_10
instruments = D.instruments()
# 起始日期
start_date = '2015-01-01'
# 结束日期
end_date = '2017-09-29'
rebalance_period = 10
factors = ['return_10','avg_amount_10','pb_lf_0','ps_ttm_0','ta_atr_14_0','ta_atr_28_0','ta_rsi_14_0','ta_rsi_28_0','pe_ttm_0','avg_mf_net_amount_10']
features_data = D.features(instruments=instruments, start_date=start_date, end_date=end_date, fields=factors)
mydata = features_data.set_index(['date','instrument'])[factors].unstack()
for col in mydata['return_10'].columns:
mydata['return_10'][col] = mydata['return_10'][col].shift(-rebalance_period)
_mydata = mydata.stack()
作为测试,先使用一天的数据作为训练集,紧接着下一天作为测试集,看看效果如何
# 训练集,一天
trainDate = '2015-01-05'
train_mydata = _mydata.ix[trainDate]
std_train_mydata = preprocess(train_mydata,factors)
train_x = std_train_mydata.drop(['return_10'],axis=1)
train_y = std_train_mydata['return_10']
# 测试集,一天
testDate = '2015-01-06'
test_mydata = _mydata.ix[testDate]
std_test_mydata = preprocess(_test_mydata,factors)
test_x = std_test_mydata.drop(['return_10'],axis=1)
test_y = std_test_mydata['return_10']
构造一个GBDT回归模型,参数基本按默认,只是loss函数使用huber(默认是:ls)
###############################################################################
# Fit regression model
# params = {'n_estimators': 500, 'max_depth': 4, 'min_samples_split': 2,
# 'learning_rate': 0.01, 'loss': 'ls'}
params = {'n_estimators': 500, 'max_depth': 4, 'min_samples_split': 2,
'learning_rate': 0.01, 'loss': 'huber','alpha':0.9}
clf = ensemble.GradientBoostingRegressor(**params)
clf.fit(train_x.values,train_y.values)
mse = mean_squared_error(test_y, clf.predict(test_x))
print("MSE: %.4f" % mse)
###############################################################################
# Plot training deviance
# compute test set deviance
test_score = np.zeros((params['n_estimators'],), dtype=np.float64)
for i, y_pred in enumerate(clf.staged_predict(test_x)):
test_score[i] = clf.loss_(test_y, y_pred)
plt.figure(figsize=(16, 6))
plt.subplot(1, 3, 1)
plt.title('Deviance')
plt.plot(np.arange(params['n_estimators']) + 1, clf.train_score_, 'b-',
label='Training Set Deviance')
plt.plot(np.arange(params['n_estimators']) + 1, test_score, 'r-',
label='Test Set Deviance')
plt.legend(loc='upper right')
plt.xlabel('Boosting Iterations')
plt.ylabel('Deviance')
###############################################################################
# Plot feature importance
feature_importance = clf.feature_importances_
# make importances relative to max importance
feature_importance = 100.0 * (feature_importance / feature_importance.max())
sorted_idx = np.argsort(feature_importance)
pos = np.arange(sorted_idx.shape[0]) + .5
plt.subplot(1, 3, 3)
plt.barh(pos, feature_importance[sorted_idx], align='center')
plt.yticks(pos, test_x.columns[sorted_idx])
plt.xlabel('Relative Importance')
plt.title('Variable Importance')
plt.show()
测试发现,500次迭代后,Deviance虽然依然在收敛,但收敛幅度已经很小了,整体效果可以说十分理想,因子Variable Importance显示,ta_rsi_28这个因子是这次预测中最重要的一个因子。 下图是预测值和实际值的可视化显示,预测基本靠谱,基本可以用预测值排序后,作为选股依据。
test_df = pd.DataFrame(index=test_y.index)
test_df['test_y'] = test_y
test_df['pred_y'] = clf.predict(test_x)
fig = plt.figure(figsize=(14,6))
fig.set_tight_layout(True)
ax1 = plt.subplot2grid((8,1), (0,0), rowspan=8)
ax2 = ax1.twinx()
ax1.grid()
test_df.plot(ax=ax1)
但一天的训练集,预测一天的数据,这个是否太儿戏?所以,下面我把训练集加长到半年时间,依然预测相邻的一天,看看效果如何。
# 半年训练集,一天测试集
trainbeginDate = '2015-01-05'
trainendDate = '2015-06-30'
train_mydata = mydata[(mydata.index>=trainbeginDate)&(mydata.index<=trainendDate)]
_train_mydata = train_mydata.stack()
std_train_mydata = preprocess(_train_mydata,factors)
train_x = std_train_mydata.drop(['return_10'],axis=1)
train_y = std_train_mydata['return_10']
testDate = '2015-07-01'
test_mydata = _mydata.ix[testDate]
std_test_mydata = preprocess(test_mydata,factors)
test_x = std_test_mydata.drop(['return_10'],axis=1)
test_y = std_test_mydata['return_10']
###############################################################################
# Fit regression model
# params = {'n_estimators': 500, 'max_depth': 4, 'min_samples_split': 2,
# 'learning_rate': 0.01, 'loss': 'ls'}
params = {'n_estimators': 500, 'max_depth': 4, 'min_samples_split': 2,
'learning_rate': 0.01, 'loss': 'huber','alpha':0.9}
clf = ensemble.GradientBoostingRegressor(**params)
clf.fit(train_x.values,train_y.values)
mse = mean_squared_error(test_y, clf.predict(test_x))
print("MSE: %.4f" % mse)
###############################################################################
# Plot training deviance
# compute test set deviance
test_score = np.zeros((params['n_estimators'],), dtype=np.float64)
for i, y_pred in enumerate(clf.staged_predict(test_x)):
test_score[i] = clf.loss_(test_y, y_pred)
plt.figure(figsize=(16, 6))
plt.subplot(1, 3, 1)
plt.title('Deviance')
plt.plot(np.arange(params['n_estimators']) + 1, clf.train_score_, 'b-',
label='Training Set Deviance')
plt.plot(np.arange(params['n_estimators']) + 1, test_score, 'r-',
label='Test Set Deviance')
plt.legend(loc='upper right')
plt.xlabel('Boosting Iterations')
plt.ylabel('Deviance')
###############################################################################
# Plot feature importance
feature_importance = clf.feature_importances_
# make importances relative to max importance
feature_importance = 100.0 * (feature_importance / feature_importance.max())
sorted_idx = np.argsort(feature_importance)
pos = np.arange(sorted_idx.shape[0]) + .5
plt.subplot(1, 3, 3)
plt.barh(pos, feature_importance[sorted_idx], align='center')
plt.yticks(pos, test_x.columns[sorted_idx])
plt.xlabel('Relative Importance')
plt.title('Variable Importance')
plt.show()
结果显示,拉稀了,直接飞了,很不靠谱。是不是我的训练集做错了,O(∩_∩)O哈哈~ 下面,我用同样的训练集,来预测相隔很远的一天数据:2015年上半年训练集,预测2017-09-05日的数据
testDate = '2017-09-05'
test_mydata = _mydata.ix[testDate]
std_test_mydata = preprocess(test_mydata,factors)
test_x = std_test_mydata.drop(['return_10'],axis=1)
test_y = std_test_mydata['return_10']
mse = mean_squared_error(test_y, clf.predict(test_x))
print("MSE: %.4f" % mse)
###############################################################################
# Plot training deviance
# compute test set deviance
test_score = np.zeros((params['n_estimators'],), dtype=np.float64)
for i, y_pred in enumerate(clf.staged_predict(test_x)):
test_score[i] = clf.loss_(test_y, y_pred)
plt.figure(figsize=(16, 6))
plt.subplot(1, 3, 1)
plt.title('Deviance')
plt.plot(np.arange(params['n_estimators']) + 1, clf.train_score_, 'b-',
label='Training Set Deviance')
plt.plot(np.arange(params['n_estimators']) + 1, test_score, 'r-',
label='Test Set Deviance')
plt.legend(loc='upper right')
plt.xlabel('Boosting Iterations')
plt.ylabel('Deviance')
###############################################################################
# Plot feature importance
feature_importance = clf.feature_importances_
# make importances relative to max importance
feature_importance = 100.0 * (feature_importance / feature_importance.max())
sorted_idx = np.argsort(feature_importance)
pos = np.arange(sorted_idx.shape[0]) + .5
plt.subplot(1, 3, 3)
plt.barh(pos, feature_importance[sorted_idx], align='center')
plt.yticks(pos, test_x.columns[sorted_idx])
plt.xlabel('Relative Importance')
plt.title('Variable Importance')
plt.show()
结果还是飞了,但居然比2015年上半年训练集预测2015-07-01的效果好。这个如何理解?O(∩_∩)O哈哈~ 下面,再往回做,依然用一天的训练集,预测一天的数据,2017-09-05训练集,预测2017-09-06日的数据
# 训练集,一天
trainDate = '2017-09-05'
train_mydata = _mydata.ix[trainDate]
std_train_mydata = preprocess(train_mydata,factors)
train_x = std_train_mydata.drop(['return_10'],axis=1)
train_y = std_train_mydata['return_10']
testDate = '2017-09-06'
test_mydata = _mydata.ix[testDate]
std_test_mydata = preprocess(test_mydata,factors)
test_x = std_test_mydata.drop(['return_10'],axis=1)
test_y = std_test_mydata['return_10']
###############################################################################
# Fit regression model
# params = {'n_estimators': 500, 'max_depth': 4, 'min_samples_split': 2,
# 'learning_rate': 0.01, 'loss': 'ls'}
params = {'n_estimators': 1000, 'max_depth': 4, 'min_samples_split': 2,
'learning_rate': 0.01, 'loss': 'huber','alpha':0.9}
clf = ensemble.GradientBoostingRegressor(**params)
clf.fit(train_x.values,train_y.values)
mse = mean_squared_error(test_y, clf.predict(test_x))
print("MSE: %.4f" % mse)
###############################################################################
# Plot training deviance
# compute test set deviance
test_score = np.zeros((params['n_estimators'],), dtype=np.float64)
for i, y_pred in enumerate(clf.staged_predict(test_x)):
test_score[i] = clf.loss_(test_y, y_pred)
plt.figure(figsize=(16, 6))
plt.subplot(1, 3, 1)
plt.title('Deviance')
plt.plot(np.arange(params['n_estimators']) + 1, clf.train_score_, 'b-',
label='Training Set Deviance')
plt.plot(np.arange(params['n_estimators']) + 1, test_score, 'r-',
label='Test Set Deviance')
plt.legend(loc='upper right')
plt.xlabel('Boosting Iterations')
plt.ylabel('Deviance')
###############################################################################
# Plot feature importance
feature_importance = clf.feature_importances_
# make importances relative to max importance
feature_importance = 100.0 * (feature_importance / feature_importance.max())
sorted_idx = np.argsort(feature_importance)
pos = np.arange(sorted_idx.shape[0]) + .5
plt.subplot(1, 3, 3)
plt.barh(pos, feature_importance[sorted_idx], align='center')
plt.yticks(pos, test_x.columns[sorted_idx])
plt.xlabel('Relative Importance')
plt.title('Variable Importance')
plt.show()
效果虽然没有2015-01-05日预测2015-01-06日的效果好,但起码1000次迭代之后,还是在收敛的,本次预测作用最大的因子是成交金额因子, 但每个因子的作用都很平均,这也符合目前市场的情况,两个字:纠结。没有主打因子,难以预测,是目前的主基调。
综上,本次测试结果显示两条结论: 1、训练集并不是越长越好,反而一天刚刚好,早知道这样,还不如直接线性回归了,O(∩_∩)O哈哈~ 2、同样的模型,不同时期,预测效果很不一样,好做的时候,机器学习很好预测,但我用传统量化也能取得不俗的战绩,不好做的时候,传统量化很难搞,但相同因子,机器学习也一样拉稀。 3、我的测试方法有没有搞对,还请bigquant大神指正。欢迎拍砖。