因子:样例因子(7个)
因子是否标准化:是
标注:未来5日收益(不做离散化)
算法:LSTM
类型:回归问题
训练集:10-16年
测试集:16-19年
选股依据:根据预测值降序排序买入
持股数:30
持仓天数:5
# 本代码由可视化策略环境自动生成 2019年8月28日 11:39
# 本代码单元只能在可视化模式下编辑。您也可以拷贝代码,粘贴到新建的代码单元或者策略,然后修改。
# Python 代码入口函数,input_1/2/3 对应三个输入端,data_1/2/3 对应三个输出端
def m4_run_bigquant_run(input_1, input_2, input_3):
# 示例代码如下。在这里编写您的代码
df = input_1.read_pickle()
feature_len = len(input_2.read_pickle())
df['x'] = df['x'].reshape(df['x'].shape[0], int(feature_len), int(df['x'].shape[1]/feature_len))
data_1 = DataSource.write_pickle(df)
return Outputs(data_1=data_1)
# 后处理函数,可选。输入是主函数的输出,可以在这里对数据做处理,或者返回更友好的outputs数据格式。此函数输出不会被缓存。
def m4_post_run_bigquant_run(outputs):
return outputs
# Python 代码入口函数,input_1/2/3 对应三个输入端,data_1/2/3 对应三个输出端
def m8_run_bigquant_run(input_1, input_2, input_3):
# 示例代码如下。在这里编写您的代码
df = input_1.read_pickle()
feature_len = len(input_2.read_pickle())
df['x'] = df['x'].reshape(df['x'].shape[0], int(feature_len), int(df['x'].shape[1]/feature_len))
data_1 = DataSource.write_pickle(df)
return Outputs(data_1=data_1)
# 后处理函数,可选。输入是主函数的输出,可以在这里对数据做处理,或者返回更友好的outputs数据格式。此函数输出不会被缓存。
def m8_post_run_bigquant_run(outputs):
return outputs
# Python 代码入口函数,input_1/2/3 对应三个输入端,data_1/2/3 对应三个输出端
def m24_run_bigquant_run(input_1, input_2, input_3):
# 示例代码如下。在这里编写您的代码
pred_label = input_1.read_pickle()
df = input_2.read_df()
df = pd.DataFrame({'pred_label':pred_label[:,0], 'instrument':df.instrument, 'date':df.date})
df.sort_values(['date','pred_label'],inplace=True, ascending=[True,False])
return Outputs(data_1=DataSource.write_df(df), data_2=None, data_3=None)
# 后处理函数,可选。输入是主函数的输出,可以在这里对数据做处理,或者返回更友好的outputs数据格式。此函数输出不会被缓存。
def m24_post_run_bigquant_run(outputs):
return outputs
# 回测引擎:初始化函数,只执行一次
def m19_initialize_bigquant_run(context):
# 加载预测数据
context.ranker_prediction = context.options['data'].read_df()
# 系统已经设置了默认的交易手续费和滑点,要修改手续费可使用如下函数
context.set_commission(PerOrder(buy_cost=0.0003, sell_cost=0.0013, min_cost=5))
# 预测数据,通过options传入进来,使用 read_df 函数,加载到内存 (DataFrame)
# 设置买入的股票数量,这里买入预测股票列表排名靠前的5只
stock_count = 30
# 每只的股票的权重,如下的权重分配会使得靠前的股票分配多一点的资金,[0.339160, 0.213986, 0.169580, ..]
context.stock_weights = T.norm([1 / math.log(i + 2) for i in range(0, stock_count)])
# 设置每只股票占用的最大资金比例
context.max_cash_per_instrument = 0.9
context.options['hold_days'] = 5
# 回测引擎:每日数据处理函数,每天执行一次
def m19_handle_data_bigquant_run(context, data):
# 按日期过滤得到今日的预测数据
ranker_prediction = context.ranker_prediction[
context.ranker_prediction.date == data.current_dt.strftime('%Y-%m-%d')]
# 1. 资金分配
# 平均持仓时间是hold_days,每日都将买入股票,每日预期使用 1/hold_days 的资金
# 实际操作中,会存在一定的买入误差,所以在前hold_days天,等量使用资金;之后,尽量使用剩余资金(这里设置最多用等量的1.5倍)
is_staging = context.trading_day_index < context.options['hold_days'] # 是否在建仓期间(前 hold_days 天)
cash_avg = context.portfolio.portfolio_value / context.options['hold_days']
cash_for_buy = min(context.portfolio.cash, (1 if is_staging else 1.5) * cash_avg)
cash_for_sell = cash_avg - (context.portfolio.cash - cash_for_buy)
positions = {e.symbol: p.amount * p.last_sale_price
for e, p in context.perf_tracker.position_tracker.positions.items()}
# 2. 生成卖出订单:hold_days天之后才开始卖出;对持仓的股票,按机器学习算法预测的排序末位淘汰
if not is_staging and cash_for_sell > 0:
equities = {e.symbol: e for e, p in context.perf_tracker.position_tracker.positions.items()}
instruments = list(reversed(list(ranker_prediction.instrument[ranker_prediction.instrument.apply(
lambda x: x in equities and not context.has_unfinished_sell_order(equities[x]))])))
# print('rank order for sell %s' % instruments)
for instrument in instruments:
context.order_target(context.symbol(instrument), 0)
cash_for_sell -= positions[instrument]
if cash_for_sell <= 0:
break
# 3. 生成买入订单:按机器学习算法预测的排序,买入前面的stock_count只股票
buy_cash_weights = context.stock_weights
buy_instruments = list(ranker_prediction.instrument[:len(buy_cash_weights)])
max_cash_per_instrument = context.portfolio.portfolio_value * context.max_cash_per_instrument
for i, instrument in enumerate(buy_instruments):
cash = cash_for_buy * buy_cash_weights[i]
if cash > max_cash_per_instrument - positions.get(instrument, 0):
# 确保股票持仓量不会超过每次股票最大的占用资金量
cash = max_cash_per_instrument - positions.get(instrument, 0)
if cash > 0:
context.order_value(context.symbol(instrument), cash)
# 回测引擎:准备数据,只执行一次
def m19_prepare_bigquant_run(context):
pass
m1 = M.instruments.v2(
start_date='2010-01-01',
end_date='2016-01-01',
market='CN_STOCK_A',
instrument_list=' ',
max_count=0
)
m2 = M.advanced_auto_labeler.v2(
instruments=m1.data,
label_expr="""# #号开始的表示注释
# 0. 每行一个,顺序执行,从第二个开始,可以使用label字段
# 1. 可用数据字段见 https://bigquant.com/docs/data_history_data.html
# 添加benchmark_前缀,可使用对应的benchmark数据
# 2. 可用操作符和函数见 `表达式引擎 <https://bigquant.com/docs/big_expr.html>`_
# 计算收益:5日收盘价(作为卖出价格)除以明日开盘价(作为买入价格)
shift(close, -5) / shift(open, -1)-1
# 极值处理:用1%和99%分位的值做clip
clip(label, all_quantile(label, 0.01), all_quantile(label, 0.99))
# 过滤掉一字涨停的情况 (设置label为NaN,在后续处理和训练中会忽略NaN的label)
where(shift(high, -1) == shift(low, -1), NaN, label)
""",
start_date='',
end_date='',
benchmark='000300.SHA',
drop_na_label=True,
cast_label_int=False
)
m13 = M.standardlize.v8(
input_1=m2.data,
columns_input='label'
)
m3 = M.input_features.v1(
features="""close_0/mean(close_0,5)
close_0/mean(close_0,10)
close_0/mean(close_0,20)
close_0/open_0
open_0/mean(close_0,5)
open_0/mean(close_0,10)
open_0/mean(close_0,20)"""
)
m15 = M.general_feature_extractor.v7(
instruments=m1.data,
features=m3.data,
start_date='',
end_date='',
before_start_days=30
)
m16 = M.derived_feature_extractor.v3(
input_data=m15.data,
features=m3.data,
date_col='date',
instrument_col='instrument',
drop_na=True,
remove_extra_columns=False
)
m14 = M.standardlize.v8(
input_1=m16.data,
input_2=m3.data,
columns_input='[]'
)
m7 = M.join.v3(
data1=m13.data,
data2=m14.data,
on='date,instrument',
how='inner',
sort=False
)
m26 = M.dl_convert_to_bin.v2(
input_data=m7.data,
features=m3.data,
window_size=5,
feature_clip=5,
flatten=True,
window_along_col='instrument'
)
m4 = M.cached.v3(
input_1=m26.data,
input_2=m3.data,
run=m4_run_bigquant_run,
post_run=m4_post_run_bigquant_run,
input_ports='',
params='{}',
output_ports=''
)
m9 = M.instruments.v2(
start_date=T.live_run_param('trading_date', '2016-01-01'),
end_date=T.live_run_param('trading_date', '2019-04-16'),
market='CN_STOCK_A',
instrument_list='',
max_count=0
)
m17 = M.general_feature_extractor.v7(
instruments=m9.data,
features=m3.data,
start_date='',
end_date='',
before_start_days=30
)
m18 = M.derived_feature_extractor.v3(
input_data=m17.data,
features=m3.data,
date_col='date',
instrument_col='instrument',
drop_na=True,
remove_extra_columns=False
)
m25 = M.standardlize.v8(
input_1=m18.data,
input_2=m3.data,
columns_input='[]'
)
m27 = M.dl_convert_to_bin.v2(
input_data=m25.data,
features=m3.data,
window_size=5,
feature_clip=5,
flatten=True,
window_along_col='instrument'
)
m8 = M.cached.v3(
input_1=m27.data,
input_2=m3.data,
run=m8_run_bigquant_run,
post_run=m8_post_run_bigquant_run,
input_ports='',
params='{}',
output_ports=''
)
m6 = M.dl_layer_input.v1(
shape='7,5',
batch_shape='',
dtype='float32',
sparse=False,
name=''
)
m10 = M.dl_layer_lstm.v1(
inputs=m6.data,
units=32,
activation='tanh',
recurrent_activation='hard_sigmoid',
use_bias=True,
kernel_initializer='glorot_uniform',
recurrent_initializer='Orthogonal',
bias_initializer='Zeros',
unit_forget_bias=True,
kernel_regularizer='None',
kernel_regularizer_l1=0,
kernel_regularizer_l2=0,
recurrent_regularizer='None',
recurrent_regularizer_l1=0,
recurrent_regularizer_l2=0,
bias_regularizer='None',
bias_regularizer_l1=0,
bias_regularizer_l2=0,
activity_regularizer='None',
activity_regularizer_l1=0,
activity_regularizer_l2=0,
kernel_constraint='None',
recurrent_constraint='None',
bias_constraint='None',
dropout=0,
recurrent_dropout=0,
return_sequences=False,
implementation='0',
name=''
)
m12 = M.dl_layer_dropout.v1(
inputs=m10.data,
rate=0.2,
noise_shape='',
name=''
)
m20 = M.dl_layer_dense.v1(
inputs=m12.data,
units=30,
activation='tanh',
use_bias=True,
kernel_initializer='glorot_uniform',
bias_initializer='Zeros',
kernel_regularizer='None',
kernel_regularizer_l1=0,
kernel_regularizer_l2=0,
bias_regularizer='None',
bias_regularizer_l1=0,
bias_regularizer_l2=0,
activity_regularizer='None',
activity_regularizer_l1=0,
activity_regularizer_l2=0,
kernel_constraint='None',
bias_constraint='None',
name=''
)
m21 = M.dl_layer_dropout.v1(
inputs=m20.data,
rate=0.2,
noise_shape='',
name=''
)
m22 = M.dl_layer_dense.v1(
inputs=m21.data,
units=1,
activation='tanh',
use_bias=True,
kernel_initializer='glorot_uniform',
bias_initializer='Zeros',
kernel_regularizer='None',
kernel_regularizer_l1=0,
kernel_regularizer_l2=0,
bias_regularizer='None',
bias_regularizer_l1=0,
bias_regularizer_l2=0,
activity_regularizer='None',
activity_regularizer_l1=0,
activity_regularizer_l2=0,
kernel_constraint='None',
bias_constraint='None',
name=''
)
m34 = M.dl_model_init.v1(
inputs=m6.data,
outputs=m22.data
)
m5 = M.dl_model_train.v1(
input_model=m34.data,
training_data=m4.data_1,
optimizer='RMSprop',
loss='mean_squared_error',
metrics='mae',
batch_size=256,
epochs=5,
n_gpus=0,
verbose='2:每个epoch输出一行记录'
)
m11 = M.dl_model_predict.v1(
trained_model=m5.data,
input_data=m8.data_1,
batch_size=1024,
n_gpus=0,
verbose='2:每个epoch输出一行记录'
)
m24 = M.cached.v3(
input_1=m11.data,
input_2=m18.data,
run=m24_run_bigquant_run,
post_run=m24_post_run_bigquant_run,
input_ports='',
params='{}',
output_ports=''
)
m19 = M.trade.v4(
instruments=m9.data,
options_data=m24.data_1,
start_date='',
end_date='',
initialize=m19_initialize_bigquant_run,
handle_data=m19_handle_data_bigquant_run,
prepare=m19_prepare_bigquant_run,
volume_limit=0.025,
order_price_field_buy='open',
order_price_field_sell='close',
capital_base=1000000,
auto_cancel_non_tradable_orders=True,
data_frequency='daily',
price_type='后复权',
product_type='股票',
plot_charts=True,
backtest_only=False,
benchmark='000300.SHA'
)
[2019-08-28 11:00:21.346579] INFO: bigquant: instruments.v2 开始运行..
[2019-08-28 11:00:21.428587] INFO: bigquant: 命中缓存
[2019-08-28 11:00:21.432077] INFO: bigquant: instruments.v2 运行完成[0.085501s].
[2019-08-28 11:00:21.440023] INFO: bigquant: advanced_auto_labeler.v2 开始运行..
[2019-08-28 11:00:21.480780] INFO: bigquant: 命中缓存
[2019-08-28 11:00:21.484993] INFO: bigquant: advanced_auto_labeler.v2 运行完成[0.044964s].
[2019-08-28 11:00:21.491707] INFO: bigquant: standardlize.v8 开始运行..
[2019-08-28 11:00:21.532872] INFO: bigquant: 命中缓存
[2019-08-28 11:00:21.534686] INFO: bigquant: standardlize.v8 运行完成[0.042985s].
[2019-08-28 11:00:21.538881] INFO: bigquant: input_features.v1 开始运行..
[2019-08-28 11:00:21.578926] INFO: bigquant: 命中缓存
[2019-08-28 11:00:21.580933] INFO: bigquant: input_features.v1 运行完成[0.042048s].
[2019-08-28 11:00:21.637279] INFO: bigquant: general_feature_extractor.v7 开始运行..
[2019-08-28 11:00:21.702575] INFO: bigquant: 命中缓存
[2019-08-28 11:00:21.705561] INFO: bigquant: general_feature_extractor.v7 运行完成[0.068257s].
[2019-08-28 11:00:21.711850] INFO: bigquant: derived_feature_extractor.v3 开始运行..
[2019-08-28 11:00:21.747658] INFO: bigquant: 命中缓存
[2019-08-28 11:00:21.751126] INFO: bigquant: derived_feature_extractor.v3 运行完成[0.039255s].
[2019-08-28 11:00:21.754221] INFO: bigquant: standardlize.v8 开始运行..
[2019-08-28 11:00:21.790723] INFO: bigquant: 命中缓存
[2019-08-28 11:00:21.793050] INFO: bigquant: standardlize.v8 运行完成[0.038835s].
[2019-08-28 11:00:21.797890] INFO: bigquant: join.v3 开始运行..
[2019-08-28 11:00:21.830740] INFO: bigquant: 命中缓存
[2019-08-28 11:00:21.832920] INFO: bigquant: join.v3 运行完成[0.035016s].
[2019-08-28 11:00:21.879952] INFO: bigquant: dl_convert_to_bin.v2 开始运行..
[2019-08-28 11:00:21.909862] INFO: bigquant: 命中缓存
[2019-08-28 11:00:21.911830] INFO: bigquant: dl_convert_to_bin.v2 运行完成[0.031887s].
[2019-08-28 11:00:21.919036] INFO: bigquant: cached.v3 开始运行..
[2019-08-28 11:00:21.995684] INFO: bigquant: 命中缓存
[2019-08-28 11:00:21.998005] INFO: bigquant: cached.v3 运行完成[0.078961s].
[2019-08-28 11:00:22.001128] INFO: bigquant: instruments.v2 开始运行..
[2019-08-28 11:00:22.036353] INFO: bigquant: 命中缓存
[2019-08-28 11:00:22.038313] INFO: bigquant: instruments.v2 运行完成[0.037178s].
[2019-08-28 11:00:22.091440] INFO: bigquant: general_feature_extractor.v7 开始运行..
[2019-08-28 11:00:22.126390] INFO: bigquant: 命中缓存
[2019-08-28 11:00:22.128568] INFO: bigquant: general_feature_extractor.v7 运行完成[0.037134s].
[2019-08-28 11:00:22.132055] INFO: bigquant: derived_feature_extractor.v3 开始运行..
[2019-08-28 11:00:22.188452] INFO: bigquant: 命中缓存
[2019-08-28 11:00:22.190987] INFO: bigquant: derived_feature_extractor.v3 运行完成[0.058923s].
[2019-08-28 11:00:22.195284] INFO: bigquant: standardlize.v8 开始运行..
[2019-08-28 11:00:22.239004] INFO: bigquant: 命中缓存
[2019-08-28 11:00:22.241312] INFO: bigquant: standardlize.v8 运行完成[0.046023s].
[2019-08-28 11:00:22.274914] INFO: bigquant: dl_convert_to_bin.v2 开始运行..
[2019-08-28 11:00:22.319762] INFO: bigquant: 命中缓存
[2019-08-28 11:00:22.321784] INFO: bigquant: dl_convert_to_bin.v2 运行完成[0.046885s].
[2019-08-28 11:00:22.327212] INFO: bigquant: cached.v3 开始运行..
[2019-08-28 11:00:22.370828] INFO: bigquant: 命中缓存
[2019-08-28 11:00:22.373472] INFO: bigquant: cached.v3 运行完成[0.04624s].
[2019-08-28 11:00:25.215001] INFO: bigquant: cached.v3 开始运行..
[2019-08-28 11:00:25.295436] INFO: bigquant: cached.v3 运行完成[0.080419s].
[2019-08-28 11:00:25.308795] INFO: bigquant: dl_model_train.v1 开始运行..
[2019-08-28 11:00:33.791118] INFO: dl_model_train: 准备训练,训练样本个数:3164669,迭代次数:5
[2019-08-28 11:23:40.080937] INFO: dl_model_train: 训练结束,耗时:1386.29s
[2019-08-28 11:23:40.325318] INFO: bigquant: dl_model_train.v1 运行完成[1395.016536s].
[2019-08-28 11:23:40.330309] INFO: bigquant: dl_model_predict.v1 开始运行..
[2019-08-28 11:24:15.060138] INFO: bigquant: dl_model_predict.v1 运行完成[34.729801s].
[2019-08-28 11:24:15.069027] INFO: bigquant: cached.v3 开始运行..
[2019-08-28 11:24:23.557723] INFO: bigquant: cached.v3 运行完成[8.488691s].
[2019-08-28 11:24:23.640197] INFO: bigquant: backtest.v8 开始运行..
[2019-08-28 11:24:23.644775] INFO: bigquant: biglearning backtest:V8.2.9
[2019-08-28 11:24:23.647191] INFO: bigquant: product_type:stock by specified
[2019-08-28 11:24:23.957620] INFO: bigquant: cached.v2 开始运行..
[2019-08-28 11:24:23.991210] INFO: bigquant: 命中缓存
[2019-08-28 11:24:23.993740] INFO: bigquant: cached.v2 运行完成[0.036136s].
[2019-08-28 11:24:45.869263] INFO: algo: TradingAlgorithm V1.5.6
[2019-08-28 11:24:49.356870] INFO: algo: trading transform...
[2019-08-28 11:25:56.163951] INFO: Performance: Simulated 800 trading days out of 800.
[2019-08-28 11:25:56.166609] INFO: Performance: first open: 2016-01-04 09:30:00+00:00
[2019-08-28 11:25:56.169714] INFO: Performance: last close: 2019-04-16 15:00:00+00:00
[2019-08-28 11:26:19.027006] INFO: bigquant: backtest.v8 运行完成[115.386817s].