因子:样例因子(7个)
因子是否标准化:是
标注:未来5日收益(不做离散化)
算法:DNN
类型:回归问题
训练集:10-15年
测试集:16-19年
选股依据:根据预测值降序排序买入
持股数:30
持仓天数:5
模型结构
输入层 7 - 因子数量
全连接层 256 激活函数为relu
dropout 0.1
全连接层 128 激活函数为relu
全连接层 1 激活函数为linear - 预测输出
from tensorflow.keras import optimizers
# 本代码由可视化策略环境自动生成 2021年8月5日 17:08
# 本代码单元只能在可视化模式下编辑。您也可以拷贝代码,粘贴到新建的代码单元或者策略,然后修改。
# Python 代码入口函数,input_1/2/3 对应三个输入端,data_1/2/3 对应三个输出端
def m10_run_bigquant_run(input_1, input_2, input_3):
    # 示例代码如下。在这里编写您的代码
    from sklearn.model_selection import train_test_split
    data = input_1.read()
    x_train, x_val, y_train, y_val = train_test_split(data["x"], data['y'])
    data_1 = DataSource.write_pickle({'x': x_train, 'y': y_train})
    data_2 = DataSource.write_pickle({'x': x_val, 'y': y_val})
    return Outputs(data_1=data_1, data_2=data_2, data_3=None)
# 后处理函数,可选。输入是主函数的输出,可以在这里对数据做处理,或者返回更友好的outputs数据格式。此函数输出不会被缓存。
def m10_post_run_bigquant_run(outputs):
    return outputs
# 用户的自定义层需要写到字典中,比如
# {
#   "MyLayer": MyLayer
# }
m5_custom_objects_bigquant_run = {
 
}
# Python 代码入口函数,input_1/2/3 对应三个输入端,data_1/2/3 对应三个输出端
def m24_run_bigquant_run(input_1, input_2, input_3):
    # 示例代码如下。在这里编写您的代码
    pred_label = input_1.read_pickle()
    df = input_2.read_df()
    df = pd.DataFrame({'pred_label':pred_label[:,0], 'instrument':df.instrument, 'date':df.date})
    df.sort_values(['date','pred_label'],inplace=True, ascending=[True,False])
    return Outputs(data_1=DataSource.write_df(df), data_2=None, data_3=None)
# 后处理函数,可选。输入是主函数的输出,可以在这里对数据做处理,或者返回更友好的outputs数据格式。此函数输出不会被缓存。
def m24_post_run_bigquant_run(outputs):
    return outputs
# 回测引擎:初始化函数,只执行一次
def m19_initialize_bigquant_run(context):
    # 加载预测数据
    context.ranker_prediction = context.options['data'].read_df()
    # 系统已经设置了默认的交易手续费和滑点,要修改手续费可使用如下函数
    context.set_commission(PerOrder(buy_cost=0.0003, sell_cost=0.0013, min_cost=5))
    # 预测数据,通过options传入进来,使用 read_df 函数,加载到内存 (DataFrame)
    # 设置买入的股票数量,这里买入预测股票列表排名靠前的5只
    stock_count = 20
    # 每只的股票的权重,如下的权重分配会使得靠前的股票分配多一点的资金,[0.339160, 0.213986, 0.169580, ..]
    context.stock_weights = T.norm([1 / math.log(i + 2) for i in range(0, stock_count)])
    # 设置每只股票占用的最大资金比例
    context.max_cash_per_instrument = 0.2
    context.options['hold_days'] = 5
# 回测引擎:每日数据处理函数,每天执行一次
def m19_handle_data_bigquant_run(context, data):
    # 按日期过滤得到今日的预测数据
    ranker_prediction = context.ranker_prediction[
        context.ranker_prediction.date == data.current_dt.strftime('%Y-%m-%d')]
    # 1. 资金分配
    # 平均持仓时间是hold_days,每日都将买入股票,每日预期使用 1/hold_days 的资金
    # 实际操作中,会存在一定的买入误差,所以在前hold_days天,等量使用资金;之后,尽量使用剩余资金(这里设置最多用等量的1.5倍)
    is_staging = context.trading_day_index < context.options['hold_days'] # 是否在建仓期间(前 hold_days 天)
    cash_avg = context.portfolio.portfolio_value / context.options['hold_days']
    cash_for_buy = min(context.portfolio.cash, (1 if is_staging else 1.5) * cash_avg)
    cash_for_sell = cash_avg - (context.portfolio.cash - cash_for_buy)
    positions = {e.symbol: p.amount * p.last_sale_price
                 for e, p in context.perf_tracker.position_tracker.positions.items()}
    # 2. 生成卖出订单:hold_days天之后才开始卖出;对持仓的股票,按机器学习算法预测的排序末位淘汰
    if not is_staging and cash_for_sell > 0:
        equities = {e.symbol: e for e, p in context.perf_tracker.position_tracker.positions.items()}
        instruments = list(reversed(list(ranker_prediction.instrument[ranker_prediction.instrument.apply(
                lambda x: x in equities and not context.has_unfinished_sell_order(equities[x]))])))
        # print('rank order for sell %s' % instruments)
        for instrument in instruments:
            context.order_target(context.symbol(instrument), 0)
            cash_for_sell -= positions[instrument]
            if cash_for_sell <= 0:
                break
    # 3. 生成买入订单:按机器学习算法预测的排序,买入前面的stock_count只股票
    buy_cash_weights = context.stock_weights
    buy_instruments = list(ranker_prediction.instrument[:len(buy_cash_weights)])
    max_cash_per_instrument = context.portfolio.portfolio_value * context.max_cash_per_instrument
    for i, instrument in enumerate(buy_instruments):
        cash = cash_for_buy * buy_cash_weights[i]
        if cash > max_cash_per_instrument - positions.get(instrument, 0):
            # 确保股票持仓量不会超过每次股票最大的占用资金量
            cash = max_cash_per_instrument - positions.get(instrument, 0)
        if cash > 0:
            context.order_value(context.symbol(instrument), cash)
# 回测引擎:准备数据,只执行一次
def m19_prepare_bigquant_run(context):
    pass
m1 = M.instruments.v2(
    start_date='2010-01-01',
    end_date='2015-12-31',
    market='CN_STOCK_A',
    instrument_list='',
    max_count=0
)
m2 = M.advanced_auto_labeler.v2(
    instruments=m1.data,
    label_expr="""# #号开始的表示注释
# 0. 每行一个,顺序执行,从第二个开始,可以使用label字段
# 1. 可用数据字段见 https://bigquant.com/docs/data_history_data.html
#   添加benchmark_前缀,可使用对应的benchmark数据
# 2. 可用操作符和函数见 `表达式引擎 <https://bigquant.com/docs/big_expr.html>`_
# 计算收益:5日收盘价(作为卖出价格)除以明日开盘价(作为买入价格)
shift(close, -5) / shift(open, -1)-1
# 极值处理:用1%和99%分位的值做clip
clip(label, all_quantile(label, 0.01), all_quantile(label, 0.99))
# 过滤掉一字涨停的情况 (设置label为NaN,在后续处理和训练中会忽略NaN的label)
where(shift(high, -1) == shift(low, -1), NaN, label)
""",
    start_date='',
    end_date='',
    benchmark='000300.SHA',
    drop_na_label=True,
    cast_label_int=False
)
m13 = M.standardlize.v8(
    input_1=m2.data,
    columns_input='label'
)
m3 = M.input_features.v1(
    features="""close_0/mean(close_0,5)
close_0/mean(close_0,10)
close_0/mean(close_0,20)
close_0/open_0
open_0/mean(close_0,5)
open_0/mean(close_0,10)
open_0/mean(close_0,20)"""
)
m15 = M.general_feature_extractor.v7(
    instruments=m1.data,
    features=m3.data,
    start_date='',
    end_date='',
    before_start_days=0
)
m16 = M.derived_feature_extractor.v3(
    input_data=m15.data,
    features=m3.data,
    date_col='date',
    instrument_col='instrument',
    drop_na=True,
    remove_extra_columns=False
)
m14 = M.standardlize.v8(
    input_1=m16.data,
    input_2=m3.data,
    columns_input='[]'
)
m7 = M.join.v3(
    data1=m13.data,
    data2=m14.data,
    on='date,instrument',
    how='inner',
    sort=False
)
m26 = M.dl_convert_to_bin.v2(
    input_data=m7.data,
    features=m3.data,
    window_size=1,
    feature_clip=5,
    flatten=True,
    window_along_col='instrument'
)
m10 = M.cached.v3(
    input_1=m26.data,
    run=m10_run_bigquant_run,
    post_run=m10_post_run_bigquant_run,
    input_ports='',
    params='{}',
    output_ports=''
)
m9 = M.instruments.v2(
    start_date=T.live_run_param('trading_date', '2016-01-01'),
    end_date=T.live_run_param('trading_date', '2019-04-20'),
    market='CN_STOCK_A',
    instrument_list='',
    max_count=0
)
m17 = M.general_feature_extractor.v7(
    instruments=m9.data,
    features=m3.data,
    start_date='',
    end_date='',
    before_start_days=0
)
m18 = M.derived_feature_extractor.v3(
    input_data=m17.data,
    features=m3.data,
    date_col='date',
    instrument_col='instrument',
    drop_na=True,
    remove_extra_columns=False
)
m25 = M.standardlize.v8(
    input_1=m18.data,
    input_2=m3.data,
    columns_input='[]'
)
m27 = M.dl_convert_to_bin.v2(
    input_data=m25.data,
    features=m3.data,
    window_size=1,
    feature_clip=5,
    flatten=True,
    window_along_col='instrument'
)
m6 = M.dl_layer_input.v1(
    shape='7',
    batch_shape='',
    dtype='float32',
    sparse=False,
    name=''
)
m8 = M.dl_layer_dense.v1(
    inputs=m6.data,
    units=256,
    activation='relu',
    use_bias=True,
    kernel_initializer='glorot_uniform',
    bias_initializer='Zeros',
    kernel_regularizer='None',
    kernel_regularizer_l1=0,
    kernel_regularizer_l2=0,
    bias_regularizer='None',
    bias_regularizer_l1=0,
    bias_regularizer_l2=0,
    activity_regularizer='None',
    activity_regularizer_l1=0,
    activity_regularizer_l2=0,
    kernel_constraint='None',
    bias_constraint='None',
    name=''
)
m21 = M.dl_layer_dropout.v1(
    inputs=m8.data,
    rate=0.1,
    noise_shape='',
    name=''
)
m20 = M.dl_layer_dense.v1(
    inputs=m21.data,
    units=128,
    activation='relu',
    use_bias=True,
    kernel_initializer='glorot_uniform',
    bias_initializer='Zeros',
    kernel_regularizer='None',
    kernel_regularizer_l1=0,
    kernel_regularizer_l2=0,
    bias_regularizer='None',
    bias_regularizer_l1=0,
    bias_regularizer_l2=0,
    activity_regularizer='None',
    activity_regularizer_l1=0,
    activity_regularizer_l2=0,
    kernel_constraint='None',
    bias_constraint='None',
    name=''
)
m22 = M.dl_layer_dropout.v1(
    inputs=m20.data,
    rate=0.1,
    noise_shape='',
    name=''
)
m23 = M.dl_layer_dense.v1(
    inputs=m22.data,
    units=1,
    activation='linear',
    use_bias=True,
    kernel_initializer='glorot_uniform',
    bias_initializer='Zeros',
    kernel_regularizer='None',
    kernel_regularizer_l1=0,
    kernel_regularizer_l2=0,
    bias_regularizer='None',
    bias_regularizer_l1=0,
    bias_regularizer_l2=0,
    activity_regularizer='None',
    activity_regularizer_l1=0,
    activity_regularizer_l2=0,
    kernel_constraint='None',
    bias_constraint='None',
    name=''
)
m4 = M.dl_model_init.v1(
    inputs=m6.data,
    outputs=m23.data
)
m5 = M.dl_model_train.v1(
    input_model=m4.data,
    training_data=m10.data_1,
    validation_data=m10.data_2,
    optimizer='自定义',
    user_optimizer=optimizers.Adam(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=None, decay=0.0, amsgrad=False)
,
    loss='mean_squared_error',
    metrics='mse',
    batch_size=1024,
    epochs=30,
    custom_objects=m5_custom_objects_bigquant_run,
    n_gpus=0,
    verbose='2:每个epoch输出一行记录'
)
m11 = M.dl_model_predict.v1(
    trained_model=m5.data,
    input_data=m27.data,
    batch_size=1024,
    n_gpus=0,
    verbose='2:每个epoch输出一行记录'
)
m24 = M.cached.v3(
    input_1=m11.data,
    input_2=m18.data,
    run=m24_run_bigquant_run,
    post_run=m24_post_run_bigquant_run,
    input_ports='',
    params='{}',
    output_ports=''
)
m19 = M.trade.v4(
    instruments=m9.data,
    options_data=m24.data_1,
    start_date='',
    end_date='',
    initialize=m19_initialize_bigquant_run,
    handle_data=m19_handle_data_bigquant_run,
    prepare=m19_prepare_bigquant_run,
    volume_limit=0.025,
    order_price_field_buy='open',
    order_price_field_sell='close',
    capital_base=1000000,
    auto_cancel_non_tradable_orders=True,
    data_frequency='daily',
    price_type='后复权',
    product_type='股票',
    plot_charts=True,
    backtest_only=False,
    benchmark='000300.SHA'
)
[2021-08-05 16:38:08.409651] INFO: moduleinvoker: instruments.v2 开始运行..
[2021-08-05 16:38:08.419493] INFO: moduleinvoker: 命中缓存
[2021-08-05 16:38:08.421402] INFO: moduleinvoker: instruments.v2 运行完成[0.011755s].
[2021-08-05 16:38:08.424748] INFO: moduleinvoker: advanced_auto_labeler.v2 开始运行..
[2021-08-05 16:38:08.430945] INFO: moduleinvoker: 命中缓存
[2021-08-05 16:38:08.432344] INFO: moduleinvoker: advanced_auto_labeler.v2 运行完成[0.007599s].
[2021-08-05 16:38:08.434570] INFO: moduleinvoker: standardlize.v8 开始运行..
[2021-08-05 16:38:08.440897] INFO: moduleinvoker: 命中缓存
[2021-08-05 16:38:08.442304] INFO: moduleinvoker: standardlize.v8 运行完成[0.007732s].
[2021-08-05 16:38:08.444371] INFO: moduleinvoker: input_features.v1 开始运行..
[2021-08-05 16:38:08.451813] INFO: moduleinvoker: 命中缓存
[2021-08-05 16:38:08.455219] INFO: moduleinvoker: input_features.v1 运行完成[0.010805s].
[2021-08-05 16:38:08.473581] INFO: moduleinvoker: general_feature_extractor.v7 开始运行..
[2021-08-05 16:38:08.484190] INFO: moduleinvoker: 命中缓存
[2021-08-05 16:38:08.486748] INFO: moduleinvoker: general_feature_extractor.v7 运行完成[0.013199s].
[2021-08-05 16:38:08.492237] INFO: moduleinvoker: derived_feature_extractor.v3 开始运行..
[2021-08-05 16:38:08.502278] INFO: moduleinvoker: 命中缓存
[2021-08-05 16:38:08.504835] INFO: moduleinvoker: derived_feature_extractor.v3 运行完成[0.012585s].
[2021-08-05 16:38:08.508302] INFO: moduleinvoker: standardlize.v8 开始运行..
[2021-08-05 16:38:08.523295] INFO: moduleinvoker: 命中缓存
[2021-08-05 16:38:08.525700] INFO: moduleinvoker: standardlize.v8 运行完成[0.017397s].
[2021-08-05 16:38:08.530323] INFO: moduleinvoker: join.v3 开始运行..
[2021-08-05 16:38:08.538709] INFO: moduleinvoker: 命中缓存
[2021-08-05 16:38:08.540756] INFO: moduleinvoker: join.v3 运行完成[0.010437s].
[2021-08-05 16:38:08.554060] INFO: moduleinvoker: dl_convert_to_bin.v2 开始运行..
[2021-08-05 16:38:08.568213] INFO: moduleinvoker: 命中缓存
[2021-08-05 16:38:08.572114] INFO: moduleinvoker: dl_convert_to_bin.v2 运行完成[0.018109s].
[2021-08-05 16:38:08.579056] INFO: moduleinvoker: cached.v3 开始运行..
[2021-08-05 16:38:08.590709] INFO: moduleinvoker: 命中缓存
[2021-08-05 16:38:08.592955] INFO: moduleinvoker: cached.v3 运行完成[0.013923s].
[2021-08-05 16:38:08.596727] INFO: moduleinvoker: instruments.v2 开始运行..
[2021-08-05 16:38:08.602280] INFO: moduleinvoker: 命中缓存
[2021-08-05 16:38:08.603749] INFO: moduleinvoker: instruments.v2 运行完成[0.007022s].
[2021-08-05 16:38:08.612748] INFO: moduleinvoker: general_feature_extractor.v7 开始运行..
[2021-08-05 16:38:08.618529] INFO: moduleinvoker: 命中缓存
[2021-08-05 16:38:08.620359] INFO: moduleinvoker: general_feature_extractor.v7 运行完成[0.007622s].
[2021-08-05 16:38:08.623539] INFO: moduleinvoker: derived_feature_extractor.v3 开始运行..
[2021-08-05 16:38:08.631492] INFO: moduleinvoker: 命中缓存
[2021-08-05 16:38:08.633272] INFO: moduleinvoker: derived_feature_extractor.v3 运行完成[0.009736s].
[2021-08-05 16:38:08.635760] INFO: moduleinvoker: standardlize.v8 开始运行..
[2021-08-05 16:38:08.641902] INFO: moduleinvoker: 命中缓存
[2021-08-05 16:38:08.643473] INFO: moduleinvoker: standardlize.v8 运行完成[0.007714s].
[2021-08-05 16:38:08.652650] INFO: moduleinvoker: dl_convert_to_bin.v2 开始运行..
[2021-08-05 16:38:08.662417] INFO: moduleinvoker: 命中缓存
[2021-08-05 16:38:08.665701] INFO: moduleinvoker: dl_convert_to_bin.v2 运行完成[0.013065s].
[2021-08-05 16:38:08.672843] INFO: moduleinvoker: dl_layer_input.v1 运行完成[0.002197s].
[2021-08-05 16:38:08.699999] INFO: moduleinvoker: dl_layer_dense.v1 运行完成[0.01755s].
[2021-08-05 16:38:08.708483] INFO: moduleinvoker: dl_layer_dropout.v1 运行完成[0.004783s].
[2021-08-05 16:38:08.722025] INFO: moduleinvoker: dl_layer_dense.v1 运行完成[0.010408s].
[2021-08-05 16:38:08.728630] INFO: moduleinvoker: dl_layer_dropout.v1 运行完成[0.003661s].
[2021-08-05 16:38:08.741283] INFO: moduleinvoker: dl_layer_dense.v1 运行完成[0.009771s].
[2021-08-05 16:38:08.769629] INFO: moduleinvoker: cached.v3 开始运行..
[2021-08-05 16:38:08.776926] INFO: moduleinvoker: 命中缓存
[2021-08-05 16:38:08.778988] INFO: moduleinvoker: cached.v3 运行完成[0.009383s].
[2021-08-05 16:38:08.781094] INFO: moduleinvoker: dl_model_init.v1 运行完成[0.037032s].
[2021-08-05 16:38:08.811194] INFO: moduleinvoker: dl_model_train.v1 开始运行..
[2021-08-05 16:38:09.186162] INFO: dl_model_train: 准备训练,训练样本个数:2350293,迭代次数:30
[2021-08-05 16:46:43.222976] INFO: dl_model_train: 训练结束,耗时:514.03s
[2021-08-05 16:46:43.252901] INFO: moduleinvoker: dl_model_train.v1 运行完成[514.440654s].
[2021-08-05 16:46:43.262266] INFO: moduleinvoker: dl_model_predict.v1 开始运行..
[2021-08-05 16:46:47.796183] INFO: moduleinvoker: dl_model_predict.v1 运行完成[4.533949s].
[2021-08-05 16:46:47.801738] INFO: moduleinvoker: cached.v3 开始运行..
[2021-08-05 16:46:56.695583] INFO: moduleinvoker: cached.v3 运行完成[8.893819s].
[2021-08-05 16:46:56.760426] INFO: moduleinvoker: backtest.v8 开始运行..
[2021-08-05 16:46:56.770752] INFO: backtest: biglearning backtest:V8.5.0
[2021-08-05 16:46:56.774169] INFO: backtest: product_type:stock by specified
[2021-08-05 16:46:58.218568] INFO: moduleinvoker: cached.v2 开始运行..
[2021-08-05 16:46:58.226351] INFO: moduleinvoker: 命中缓存
[2021-08-05 16:46:58.227996] INFO: moduleinvoker: cached.v2 运行完成[0.009446s].
[2021-08-05 16:47:05.939144] INFO: algo: TradingAlgorithm V1.8.3
[2021-08-05 16:47:08.401340] INFO: algo: trading transform...
[2021-08-05 16:48:20.319346] INFO: Performance: Simulated 803 trading days out of 803.
[2021-08-05 16:48:20.320939] INFO: Performance: first open: 2016-01-04 09:30:00+00:00
[2021-08-05 16:48:20.322172] INFO: Performance: last close: 2019-04-19 15:00:00+00:00
[2021-08-05 16:48:39.325924] INFO: moduleinvoker: backtest.v8 运行完成[102.565519s].
[2021-08-05 16:48:39.327563] INFO: moduleinvoker: trade.v4 运行完成[102.626774s].
# 方法一:手动绘制曲线
train_loss = m5.data.read()["history"]["loss"]
val_loss = m5.data.read()["history"]["val_loss"]
train_acc = m5.data.read()["history"]["mse"]
val_acc = m5.data.read()["history"]["val_mse"]
T.plot(pd.DataFrame({'train':train_loss,'validation':val_loss}), title ='LOSS')
T.plot(pd.DataFrame({'train':train_acc,'validation':val_acc}), title ='MSE')