1) 添加对训练集的预测,用于判断是否是过拟合还是欠拟合,看效果 2) 添加中间展示和调优的部分,用于改进模型和判断程序是否正常运行 3) 进行一个完整的训练
m13.data.read_df().head()
# 本代码由可视化策略环境自动生成 2019年2月13日 20:42
# 本代码单元只能在可视化模式下编辑。您也可以拷贝代码,粘贴到新建的代码单元或者策略,然后修改。
# Python 代码入口函数,input_1/2/3 对应三个输入端,data_1/2/3 对应三个输出端
def m21_run_bigquant_run(input_1, input_2, input_3):
# 输入我们需要的特征名
# Input the features we need
Columns = ['rank_avg_amount_5', 'rank_avg_turn_5',
'rank_volatility_5_0', 'rank_swing_volatility_5_0',
'rank_avg_mf_net_amount_5', 'rank_beta_industry_5_0', 'rank_return_5',
'rank_return_2', 'mf_net_amount_1', 'return_5-1', 'return_10-1',
'return_20-1', 'avg_amount_0/avg_amount_5-1',
'avg_amount_5/avg_amount_20-1', 'rank_avg_amount_0-rank_avg_amount_5',
'rank_avg_amount_5-rank_avg_amount_10', 'rank_return_0-rank_return_5',
'rank_return_5-rank_return_10', 'beta_csi300_30_0/10',
'beta_csi300_60_0/10', 'swing_volatility_5_0/swing_volatility_30_0-1',
'swing_volatility_30_0/swing_volatility_60_0-1',
'ta_atr_14_0/ta_atr_28_0-1', 'ta_sma_5_0/ta_sma_20_0-1',
'ta_sma_10_0/ta_sma_20_0-1', 'ta_sma_20_0/ta_sma_30_0-1',
'ta_sma_30_0/ta_sma_60_0-1', 'ta_rsi_14_0/100', 'ta_rsi_28_0/100',
'ta_cci_14_0/500', 'ta_cci_28_0/500', 'beta_industry_30_0/10',
'beta_industry_60_0/10', 'ta_sma(amount_0, 10)/ta_sma(amount_0, 20)-1',
'ta_sma(amount_0, 20)/ta_sma(amount_0, 30)-1',
'ta_sma(amount_0, 30)/ta_sma(amount_0, 60)-1',
'ta_sma(amount_0, 50)/ta_sma(amount_0, 100)-1',
'ta_sma(turn_0, 10)/ta_sma(turn_0, 20)-1',
'ta_sma(turn_0, 20)/ta_sma(turn_0, 30)-1',
'ta_sma(turn_0, 30)/ta_sma(turn_0, 60)-1',
'ta_sma(turn_0, 50)/ta_sma(turn_0, 100)-1', 'high_0/low_0-1',
'close_0/open_0-1', 'shift(close_0,1)/close_0-1',
'shift(close_0,2)/close_0-1', 'shift(close_0,3)/close_0-1',
'shift(close_0,4)/close_0-1', 'shift(close_0,5)/close_0-1',
'shift(close_0,10)/close_0-1', 'shift(close_0,20)/close_0-1',
'ta_sma(high_0-low_0, 5)/ta_sma(high_0-low_0, 20)-1',
'ta_sma(high_0-low_0, 10)/ta_sma(high_0-low_0, 20)-1',
'ta_sma(high_0-low_0, 20)/ta_sma(high_0-low_0, 30)-1',
'ta_sma(high_0-low_0, 30)/ta_sma(high_0-low_0, 60)-1',
'ta_sma(high_0-low_0, 50)/ta_sma(high_0-low_0, 100)-1',
'std(close_0,5)/std(close_0,20)-1', 'std(close_0,10)/std(close_0,20)-1',
'std(close_0,20)/std(close_0,30)-1',
'std(close_0,30)/std(close_0,60)-1',
'std(close_0,50)/std(close_0,100)-1', 'shift(mf_net_amount_s_0,3)',
'shift(mf_net_amount_m_0,3)', 'shift(mf_net_amount_l_0,3)']
C = DataSource.write_pickle(Columns)
return Outputs(data_1=C, data_2=None, data_3=None)
# 后处理函数,可选。输入是主函数的输出,可以在这里对数据做处理,或者返回更友好的outputs数据格式。此函数输出不会被缓存。
def m21_post_run_bigquant_run(outputs):
return outputs
# 特征提取与转换
# Feature selection and transformation
def m4_run_bigquant_run(input_1, input_2, input_3):
# 包的加载
# Package loaded
# 这里有很多包其实在这个方法里是用不着的,比如我们仅仅用了GBDT,我们暂时也不需要做训练集和测试集的划分
import numpy as np
np.random.seed(10)
import matplotlib.pyplot as plt
from sklearn.datasets import make_classification
from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import (RandomTreesEmbedding, RandomForestClassifier,
GradientBoostingClassifier)
from sklearn.preprocessing import OneHotEncoder
from sklearn.model_selection import train_test_split
from sklearn.metrics import roc_curve
from sklearn.pipeline import make_pipeline #做模型之间的管子链接
## 设置机器学习的参数,区分预测集和训练集
## Set parameters and load data
Data = input_1.read_df() #获取全部数据
X = Data[input_2.read_pickle()]
y = pd.DataFrame(Data['label'])
#print(y.columns)
#print(X.columns)
n_estimator = 5
# 在这里我们实际上不需要做测试集和训练集的区分,因为本部分本来就是训练的部分
X_train = X
y_train = y
# 需要将对 LR和GBDT的训练集给区分开来
# It is important to train the ensemble of trees on a different subset of the training data than the linear regression model to avoid overfitting, in particular if the total number of leaves is similar to the number of training samples
X_train, X_train_lr, y_train, y_train_lr = train_test_split(X_train, y_train, test_size=0.7)
# Supervised transformation based on gradient boosted trees
# 这里是训练好的模型,GBDT模型,编码模型和逻辑回归模型
grd = GradientBoostingClassifier(n_estimators=n_estimator)
grd_enc = OneHotEncoder(categories='auto')
grd_lm = LogisticRegression(solver='lbfgs', max_iter=1000)
grd.fit(X_train, y_train)
grd_enc.fit(grd.apply(X_train)[:, :, 0])
grd_lm.fit(grd_enc.transform(grd.apply(X_train_lr)[:, :, 0]), y_train_lr)
Model = dict()
Model['grd'] = grd
Model['grd_enc'] = grd_enc
Model['grd_lm'] = grd_lm
T = DataSource.write_pickle(Model)
return Outputs(data_1=T, data_2=None, data_3=None)
# 后处理函数,可选。输入是主函数的输出,可以在这里对数据做处理,或者返回更友好的outputs数据格式。此函数输出不会被缓存。
def m4_post_run_bigquant_run(outputs):
return outputs
# 输入的是模型,输出的是预测部分
def m5_run_bigquant_run(input_1, input_2, input_3):
# 三个模型从这个字典结构里面拿出来
Model = input_1.read_pickle()
grd = Model['grd']
grd_enc = Model['grd_enc']
grd_lm = Model['grd_lm']
X_test = input_2.read_df()
X_test1 = X_test[input_3.read_pickle()]
y_pred_grd_lm = grd_lm.predict_proba(grd_enc.transform(grd.apply(X_test1)[:, :, 0]))[:, 1]
Y = pd.DataFrame(y_pred_grd_lm,columns=['prediction'])
Y['date'] = X_test['date']
Y['instrument'] = X_test['instrument']
Y = DataSource.write_df(Y)
return Outputs(data_1=Y, data_2=None, data_3=None)
# 后处理函数,可选。输入是主函数的输出,可以在这里对数据做处理,或者返回更友好的outputs数据格式。此函数输出不会被缓存。
def m5_post_run_bigquant_run(outputs):
return outputs
# Python 代码入口函数,input_1/2/3 对应三个输入端,data_1/2/3 对应三个输出端
def m6_run_bigquant_run(input_1, input_2, input_3):
import matplotlib.pyplot as plt
from sklearn.metrics import confusion_matrix
# 本部分的主要功能是对模型的预测效果进行分析
Data_pre = input_1.read_df()
Data_real = input_2.read_df()
Data = pd.merge(Data_pre,Data_real,how='inner',on = ['date','instrument'])
Pred = np.where(Data['prediction']>0.5,1,0)
Real = np.array(Data['label'])
cm = confusion_matrix(Real, Pred)
cm_normalized = cm.astype('float')/cm.sum(axis=1)[:, np.newaxis]
print(cm_normalized)
import seaborn as sn
df_cm = pd.DataFrame(cm_normalized)
plt.figure(figsize = (15,10))
sn.heatmap(df_cm, annot=True)
print('准确率')
c = (Real == Pred)
print(len(c[c])/len(c))
print('预测涨结果涨')
P = Real[c]
L11 = len(P[P==1])
print(L11)
print('预测跌结果跌')
L00 = len(P[P==0])
print(L00)
print('预测涨结果跌')
L10 = len(Real[Real==0])-len(P[P==0])
print(L10)
print('预测跌结果涨')
L01 = len(Real[Real==1])-len(P[P==1])
print(L01)
print('\n')
print('查准率\n')
print('预测涨的准确率\n')
print(L11/(L11+L10))
print('预测跌的准确率\n')
print(L00/(L01+L00))
print('\n')
print('查全率\n')
print('涨的股票中预测准确率\n')
print(L11/(L11+L01))
print('跌的股票中预测准确率\n')
print(L00/(L00+L10))
from sklearn.metrics import roc_curve
fpr_grd_lm, tpr_grd_lm, _ = roc_curve(Real, Pred)
plt.figure()
plt.plot(fpr_grd_lm, tpr_grd_lm, label='GBT')
plt.xlabel('False positive rate')
plt.ylabel('True positive rate')
plt.title('ROC curve')
plt.legend(loc='best')
plt.show()
return Outputs(data_1=DataSource.write_pickle(Real), data_2=DataSource.write_pickle(Pred), data_3=None)
# 后处理函数,可选。输入是主函数的输出,可以在这里对数据做处理,或者返回更友好的outputs数据格式。此函数输出不会被缓存。
def m6_post_run_bigquant_run(outputs):
return outputs
# 回测引擎:每日数据处理函数,每天执行一次
def m19_handle_data_bigquant_run(context, data):
# 按日期过滤得到今日的预测数据
ranker_prediction = context.ranker_prediction[
context.ranker_prediction.date == data.current_dt.strftime('%Y-%m-%d')]
# 1. 资金分配
# 平均持仓时间是hold_days,每日都将买入股票,每日预期使用 1/hold_days 的资金
# 实际操作中,会存在一定的买入误差,所以在前hold_days天,等量使用资金;之后,尽量使用剩余资金(这里设置最多用等量的1.5倍)
is_staging = context.trading_day_index < context.options['hold_days'] # 是否在建仓期间(前 hold_days 天)
cash_avg = context.portfolio.portfolio_value / context.options['hold_days']
cash_for_buy = min(context.portfolio.cash, (1 if is_staging else 1.5) * cash_avg)
cash_for_sell = cash_avg - (context.portfolio.cash - cash_for_buy)
positions = {e.symbol: p.amount * p.last_sale_price
for e, p in context.perf_tracker.position_tracker.positions.items()}
# 2. 生成卖出订单:hold_days天之后才开始卖出;对持仓的股票,按机器学习算法预测的排序末位淘汰
if not is_staging and cash_for_sell > 0:
equities = {e.symbol: e for e, p in context.perf_tracker.position_tracker.positions.items()}
instruments = [m for m in list(ranker_prediction[ranker_prediction.prediction<0.42].instrument) if m in equities]
# print('rank order for sell %s' % instruments)
for instrument in instruments:
context.order_target(context.symbol(instrument), 0)
cash_for_sell -= positions[instrument]
if cash_for_sell <= 0:
break
# 3. 生成买入订单:按机器学习算法预测的排序,买入前面的stock_count只股票
buy_instruments = list(ranker_prediction[ranker_prediction.prediction>0.66].instrument) #
buy_cash_weights = [1/len(buy_instruments) for k in range(len(buy_instruments))]
max_cash_per_instrument = context.portfolio.portfolio_value * context.max_cash_per_instrument
for i, instrument in enumerate(buy_instruments):
cash = cash_for_buy * buy_cash_weights[i]
if cash > max_cash_per_instrument - positions.get(instrument, 0):
# 确保股票持仓量不会超过每次股票最大的占用资金量
cash = max_cash_per_instrument - positions.get(instrument, 0)
if cash > 0:
context.order_value(context.symbol(instrument), cash)
# 回测引擎:准备数据,只执行一次
def m19_prepare_bigquant_run(context):
pass
# 回测引擎:初始化函数,只执行一次
def m19_initialize_bigquant_run(context):
# 加载预测数据
context.ranker_prediction = context.options['data'].read_df()
# 系统已经设置了默认的交易手续费和滑点,要修改手续费可使用如下函数
context.set_commission(PerOrder(buy_cost=0.0003, sell_cost=0.0013, min_cost=5))
# 预测数据,通过options传入进来,使用 read_df 函数,加载到内存 (DataFrame)
# 设置买入的股票数量,这里买入预测股票列表排名靠前的5只
stock_count = 5
# 每只的股票的权重,如下的权重分配会使得靠前的股票分配多一点的资金,[0.339160, 0.213986, 0.169580, ..]
context.stock_weights = T.norm([1 / math.log(i + 2) for i in range(0, stock_count)])
# 设置每只股票占用的最大资金比例
context.max_cash_per_instrument = 0.2
context.options['hold_days'] = 5
m1 = M.instruments.v2(
start_date='2010-01-01',
end_date='2016-01-01',
market='CN_STOCK_A',
instrument_list='',
max_count=0
)
m2 = M.advanced_auto_labeler.v2(
instruments=m1.data,
label_expr="""# #号开始的表示注释
# 0. 每行一个,顺序执行,从第二个开始,可以使用label字段
# 1. 可用数据字段见 https://bigquant.com/docs/data_history_data.html
# 添加benchmark_前缀,可使用对应的benchmark数据
# 2. 可用操作符和函数见 `表达式引擎 <https://bigquant.com/docs/big_expr.html>`_
# 计算收益:5日收盘价(作为卖出价格)除以明日开盘价(作为买入价格), 五日收益率为正数
where(shift(close, -5) / shift(open, -1)>1.001,1,0)
# 极值处理:用1%和99%分位的值做clip
#clip(label, all_quantile(label, 0.01), all_quantile(label, 0.99))
# 将分数映射到分类,这里使用20个分类
#all_wbins(label, 20)
# 过滤掉一字涨跌停的情况 (设置label为NaN,在后续处理和训练中会忽略NaN的label)
where(shift(high, -1) == shift(low, -1), NaN, label) # 一开盘就到了10%那里,既是high也是low
""",
start_date='',
end_date='',
benchmark='000300.SHA',
drop_na_label=True,
cast_label_int=True
)
m3 = M.input_features.v1(
features="""# #号开始的表示注释
# 多个特征,每行一个,可以包含基础特征和衍生特征
return_5-1
return_10-1
return_20-1
avg_amount_0/avg_amount_5-1
avg_amount_5/avg_amount_20-1
rank_avg_amount_0-rank_avg_amount_5
rank_avg_amount_5-rank_avg_amount_10
rank_return_0-rank_return_5
rank_return_5-rank_return_10
beta_csi300_30_0/10
beta_csi300_60_0/10
swing_volatility_5_0/swing_volatility_30_0-1
swing_volatility_30_0/swing_volatility_60_0-1
ta_atr_14_0/ta_atr_28_0-1
ta_sma_5_0/ta_sma_20_0-1
ta_sma_10_0/ta_sma_20_0-1
ta_sma_20_0/ta_sma_30_0-1
ta_sma_30_0/ta_sma_60_0-1
ta_rsi_14_0/100
ta_rsi_28_0/100
ta_cci_14_0/500
ta_cci_28_0/500
beta_industry_30_0/10
beta_industry_60_0/10
ta_sma(amount_0, 10)/ta_sma(amount_0, 20)-1
ta_sma(amount_0, 20)/ta_sma(amount_0, 30)-1
ta_sma(amount_0, 30)/ta_sma(amount_0, 60)-1
ta_sma(amount_0, 50)/ta_sma(amount_0, 100)-1
ta_sma(turn_0, 10)/ta_sma(turn_0, 20)-1
ta_sma(turn_0, 20)/ta_sma(turn_0, 30)-1
ta_sma(turn_0, 30)/ta_sma(turn_0, 60)-1
ta_sma(turn_0, 50)/ta_sma(turn_0, 100)-1
high_0/low_0-1
close_0/open_0-1
shift(close_0,1)/close_0-1
shift(close_0,2)/close_0-1
shift(close_0,3)/close_0-1
shift(close_0,4)/close_0-1
shift(close_0,5)/close_0-1
shift(close_0,10)/close_0-1
shift(close_0,20)/close_0-1
ta_sma(high_0-low_0, 5)/ta_sma(high_0-low_0, 20)-1
ta_sma(high_0-low_0, 10)/ta_sma(high_0-low_0, 20)-1
ta_sma(high_0-low_0, 20)/ta_sma(high_0-low_0, 30)-1
ta_sma(high_0-low_0, 30)/ta_sma(high_0-low_0, 60)-1
ta_sma(high_0-low_0, 50)/ta_sma(high_0-low_0, 100)-1
rank_avg_amount_5
rank_avg_turn_5
rank_volatility_5_0
rank_swing_volatility_5_0
rank_avg_mf_net_amount_5
rank_beta_industry_5_0
rank_return_5
rank_return_2
std(close_0,5)/std(close_0,20)-1
std(close_0,10)/std(close_0,20)-1
std(close_0,20)/std(close_0,30)-1
std(close_0,30)/std(close_0,60)-1
std(close_0,50)/std(close_0,100)-1
mf_net_amount_1
shift(mf_net_amount_s_0,3)
shift(mf_net_amount_m_0,3)
shift(mf_net_amount_l_0,3)"""
)
m15 = M.general_feature_extractor.v7(
instruments=m1.data,
features=m3.data,
start_date='',
end_date='',
before_start_days=0
)
m16 = M.derived_feature_extractor.v3(
input_data=m15.data,
features=m3.data,
date_col='date',
instrument_col='instrument',
drop_na=True,
remove_extra_columns=True
)
m7 = M.join.v3(
data1=m2.data,
data2=m16.data,
on='date,instrument',
how='inner',
sort=False
)
m13 = M.dropnan.v1(
input_data=m7.data
)
m9 = M.instruments.v2(
start_date=T.live_run_param('trading_date', '2016-01-01'),
end_date=T.live_run_param('trading_date', '2017-01-01'),
market='CN_STOCK_A',
instrument_list='',
max_count=0
)
m17 = M.general_feature_extractor.v7(
instruments=m9.data,
features=m3.data,
start_date='',
end_date='',
before_start_days=0
)
m18 = M.derived_feature_extractor.v3(
input_data=m17.data,
features=m3.data,
date_col='date',
instrument_col='instrument',
drop_na=False,
remove_extra_columns=False
)
m14 = M.dropnan.v1(
input_data=m18.data
)
m8 = M.advanced_auto_labeler.v2(
instruments=m9.data,
label_expr="""# #号开始的表示注释
# 0. 每行一个,顺序执行,从第二个开始,可以使用label字段
# 1. 可用数据字段见 https://bigquant.com/docs/data_history_data.html
# 添加benchmark_前缀,可使用对应的benchmark数据
# 2. 可用操作符和函数见 `表达式引擎 <https://bigquant.com/docs/big_expr.html>`_
# 计算收益:5日收盘价(作为卖出价格)除以明日开盘价(作为买入价格)
where(shift(close, -5) / shift(open, -1)>1,1,0)
# 极值处理:用1%和99%分位的值做clip
#clip(label, all_quantile(label, 0.01), all_quantile(label, 0.99))
# 将分数映射到分类,这里使用20个分类
#all_wbins(label, 20)
# 过滤掉一字涨停的情况 (设置label为NaN,在后续处理和训练中会忽略NaN的label)
where(shift(high, -1) == shift(low, -1), NaN, label)
""",
start_date='',
end_date='',
benchmark='000300.SHA',
drop_na_label=True,
cast_label_int=True
)
m10 = M.dropnan.v1(
input_data=m8.data
)
m11 = M.instruments.v2(
start_date='2016-01-01',
end_date='2017-01-01',
market='CN_STOCK_A',
instrument_list='',
max_count=0
)
m21 = M.cached.v3(
run=m21_run_bigquant_run,
post_run=m21_post_run_bigquant_run,
input_ports='',
params='{}',
output_ports=''
)
m4 = M.cached.v3(
input_1=m13.data,
input_2=m21.data_1,
run=m4_run_bigquant_run,
post_run=m4_post_run_bigquant_run,
input_ports='',
params='{}',
output_ports=''
)
m5 = M.cached.v3(
input_1=m4.data_1,
input_2=m14.data,
input_3=m21.data_1,
run=m5_run_bigquant_run,
post_run=m5_post_run_bigquant_run,
input_ports='',
params='{}',
output_ports=''
)
m6 = M.cached.v3(
input_1=m5.data_1,
input_2=m10.data,
run=m6_run_bigquant_run,
post_run=m6_post_run_bigquant_run,
input_ports='',
params='{}',
output_ports='',
m_cached=False
)
m19 = M.trade.v4(
instruments=m11.data,
options_data=m5.data_1,
start_date='',
end_date='',
handle_data=m19_handle_data_bigquant_run,
prepare=m19_prepare_bigquant_run,
initialize=m19_initialize_bigquant_run,
volume_limit=0.025,
order_price_field_buy='open',
order_price_field_sell='close',
capital_base=1000000,
auto_cancel_non_tradable_orders=True,
data_frequency='daily',
price_type='后复权',
product_type='股票',
plot_charts=True,
backtest_only=False,
benchmark='000300.SHA'
)