复制链接
克隆策略
In [10]:
# 本代码由可视化策略环境自动生成 2022年4月11日 09:45
# 本代码单元只能在可视化模式下编辑。您也可以拷贝代码,粘贴到新建的代码单元或者策略,然后修改。


# 用户的自定义层需要写到字典中,比如
# {
#   "MyLayer": MyLayer
# }
m6_custom_objects_bigquant_run = {
    
}

# Python 代码入口函数,input_1/2/3 对应三个输入端,data_1/2/3 对应三个输出端
def m2_run_bigquant_run(input_1, input_2, input_3):

    test_data = input_2.read_pickle()
    pred_label = input_1.read_pickle()
    pred_result = pred_label.reshape(pred_label.shape[0]) 
    dt = input_3.read_df()['date'][-1*len(pred_result):]
    pred_df = pd.Series(pred_result, index=dt)
    ds = DataSource.write_df(pred_df)
    
    return Outputs(data_1=ds)

# 后处理函数,可选。输入是主函数的输出,可以在这里对数据做处理,或者返回更友好的outputs数据格式。此函数输出不会被缓存。
def m2_post_run_bigquant_run(outputs):
    return outputs

# 回测引擎:初始化函数,只执行一次
def m1_initialize_bigquant_run(context):
    # 加载预测数据
    context.prediction = context.options['data'].read_df()

    # 系统已经设置了默认的交易手续费和滑点,要修改手续费可使用如下函数
    context.set_commission(PerOrder(buy_cost=0.0003, sell_cost=0.0013, min_cost=5))
# 回测引擎:每日数据处理函数,每天执行一次
def m1_handle_data_bigquant_run(context, data):
    # 按日期过滤得到今日的预测数据
    try:
        prediction = context.prediction[data.current_dt.strftime('%Y-%m-%d')]
    except KeyError as e:
        return
    
    instrument = context.instruments[0]
    sid = context.symbol(instrument)
    cur_position = context.portfolio.positions[sid].amount
    
    # 交易逻辑
    if prediction > 0.5 and cur_position == 0:
        context.order_target_percent(context.symbol(instrument), 1)
        print(data.current_dt, '买入!')
        
    elif prediction < 0.5 and cur_position > 0:
        context.order_target_percent(context.symbol(instrument), 0)
        print(data.current_dt, '卖出!')
    
# 回测引擎:准备数据,只执行一次
def m1_prepare_bigquant_run(context):
    pass

# 回测引擎:每个单位时间开始前调用一次,即每日开盘前调用一次。
def m1_before_trading_start_bigquant_run(context, data):
    pass


m3 = M.dl_layer_input.v1(
    shape='50,5',
    batch_shape='',
    dtype='float32',
    sparse=False,
    name=''
)

m13 = M.dl_layer_reshape.v1(
    inputs=m3.data,
    target_shape='50,5,1',
    name=''
)

m14 = M.dl_layer_conv2d.v1(
    inputs=m13.data,
    filters=32,
    kernel_size='3,5',
    strides='1,1',
    padding='valid',
    data_format='channels_last',
    dilation_rate='1,1',
    activation='relu',
    use_bias=True,
    kernel_initializer='glorot_uniform',
    bias_initializer='Zeros',
    kernel_regularizer='None',
    kernel_regularizer_l1=0,
    kernel_regularizer_l2=0,
    bias_regularizer='None',
    bias_regularizer_l1=0,
    bias_regularizer_l2=0,
    activity_regularizer='None',
    activity_regularizer_l1=0,
    activity_regularizer_l2=0,
    kernel_constraint='None',
    bias_constraint='None',
    name=''
)

m15 = M.dl_layer_reshape.v1(
    inputs=m14.data,
    target_shape='48,32',
    name=''
)

m4 = M.dl_layer_lstm.v1(
    inputs=m15.data,
    units=32,
    activation='tanh',
    recurrent_activation='hard_sigmoid',
    use_bias=True,
    kernel_initializer='glorot_uniform',
    recurrent_initializer='Orthogonal',
    bias_initializer='Ones',
    unit_forget_bias=True,
    kernel_regularizer='None',
    kernel_regularizer_l1=0,
    kernel_regularizer_l2=0,
    recurrent_regularizer='None',
    recurrent_regularizer_l1=0,
    recurrent_regularizer_l2=0,
    bias_regularizer='None',
    bias_regularizer_l1=0,
    bias_regularizer_l2=0,
    activity_regularizer='None',
    activity_regularizer_l1=0,
    activity_regularizer_l2=0,
    kernel_constraint='None',
    recurrent_constraint='None',
    bias_constraint='None',
    dropout=0,
    recurrent_dropout=0,
    return_sequences=False,
    implementation='2',
    name=''
)

m11 = M.dl_layer_dropout.v1(
    inputs=m4.data,
    rate=0.4,
    noise_shape='',
    name=''
)

m10 = M.dl_layer_dense.v1(
    inputs=m11.data,
    units=32,
    activation='tanh',
    use_bias=True,
    kernel_initializer='glorot_uniform',
    bias_initializer='Zeros',
    kernel_regularizer='None',
    kernel_regularizer_l1=0,
    kernel_regularizer_l2=0,
    bias_regularizer='None',
    bias_regularizer_l1=0,
    bias_regularizer_l2=0,
    activity_regularizer='None',
    activity_regularizer_l1=0,
    activity_regularizer_l2=0,
    kernel_constraint='None',
    bias_constraint='None',
    name=''
)

m12 = M.dl_layer_dropout.v1(
    inputs=m10.data,
    rate=0.8,
    noise_shape='',
    name=''
)

m9 = M.dl_layer_dense.v1(
    inputs=m12.data,
    units=1,
    activation='sigmoid',
    use_bias=True,
    kernel_initializer='glorot_uniform',
    bias_initializer='Zeros',
    kernel_regularizer='None',
    kernel_regularizer_l1=0,
    kernel_regularizer_l2=0,
    bias_regularizer='None',
    bias_regularizer_l1=0,
    bias_regularizer_l2=0,
    activity_regularizer='None',
    activity_regularizer_l1=0,
    activity_regularizer_l2=0,
    kernel_constraint='None',
    bias_constraint='None',
    name=''
)

m5 = M.dl_model_init.v1(
    inputs=m3.data,
    outputs=m9.data
)
# rank_avg_mf_net_amount_0/rank_avg_mf_net_amount_5
# rank_avg_mf_net_amount_5/rank_avg_mf_net_amount_10
# return_5/return_20
# rank_amount_5
# avg_turn_10
# pe_ttm_0>0
# pb_lf_0
# sum(mf_net_pct_main_0>0.12,30)>11
# fs_roa_ttm_0>5
# close_0>ts_max(close_0,56)
# ta_sma_10_0/ta_sma_30_0
# ta_sar_0
# swing_volatility_10_0/swing_volatility_60_0 
# ta_cci_14_0 
# rank_return_3 
# mf_net_amount_0>mf_net_amount_1 
# mf_net_amount_xl_0>mean(mf_net_amount_xl_0, 30)
# rank(mean(mf_net_amount_l_0,5))/rank(mean(mf_net_amount_l_0,10))
# correlation(sqrt(volume_0),return_0,5)
# correlation(log(volume_0),abs(return_0-1),5)
# (close_0-close_30)/close_30>1.25
# (close_0-close_5)/close_5>1.16
# ta_bbands_middleband_28_0
m8 = M.input_features.v1(
    features="""
    (close_0/close_1-1)*10
(high_0/high_1-1)*10
(low_0/low_1-1)*10
(open_0/open_1-1)*10
(volume_0/volume_1-1)*10

"""
)

m24 = M.instruments.v2(
    start_date='2017-01-02',
    end_date='2020-12-31',
    market='CN_STOCK_A',
    instrument_list='600009.SHA',
    max_count=0
)

m21 = M.advanced_auto_labeler.v2(
    instruments=m24.data,
    label_expr="""# #号开始的表示注释
# 0. 每行一个,顺序执行,从第二个开始,可以使用label字段
# 1. 可用数据字段见 https://bigquant.com/docs/develop/datasource/deprecated/history_data.html
#   添加benchmark_前缀,可使用对应的benchmark数据
# 2. 可用操作符和函数见 `表达式引擎 <https://bigquant.com/docs/develop/bigexpr/usage.html>`_

# 计算收益:5日收盘价(作为卖出价格)除以明日开盘价(作为买入价格)
where(shift(close, -10) / close -1>0,1,0)

# 过滤掉一字涨停的情况 (设置label为NaN,在后续处理和训练中会忽略NaN的label)
where(shift(high, -1) == shift(low, -1), NaN, label)
""",
    start_date='',
    end_date='',
    benchmark='000300.SHA',
    drop_na_label=True,
    cast_label_int=True,
    user_functions={}
)

m22 = M.general_feature_extractor.v7(
    instruments=m24.data,
    features=m8.data,
    start_date='',
    end_date='',
    before_start_days=90
)

m23 = M.derived_feature_extractor.v3(
    input_data=m22.data,
    features=m8.data,
    date_col='date',
    instrument_col='instrument',
    drop_na=False,
    remove_extra_columns=False,
    user_functions={}
)

m17 = M.join.v3(
    data1=m21.data,
    data2=m23.data,
    on='date',
    how='inner',
    sort=True
)

m18 = M.dropnan.v1(
    input_data=m17.data
)

m25 = M.dl_convert_to_bin.v2(
    input_data=m18.data,
    features=m8.data,
    window_size=50,
    feature_clip=5,
    flatten=False,
    window_along_col=''
)

m6 = M.dl_model_train.v1(
    input_model=m5.data,
    training_data=m25.data,
    optimizer='Adam',
    loss='binary_crossentropy',
    metrics='accuracy',
    batch_size=2048,
    epochs=10,
    custom_objects=m6_custom_objects_bigquant_run,
    n_gpus=1,
    verbose='1:输出进度条记录'
)

m28 = M.instruments.v2(
    start_date=T.live_run_param('trading_date', '2021-02-11'),
    end_date=T.live_run_param('trading_date', '2022-04-01'),
    market='CN_STOCK_A',
    instrument_list='600009.SHA',
    max_count=0
)

m16 = M.general_feature_extractor.v7(
    instruments=m28.data,
    features=m8.data,
    start_date='',
    end_date='',
    before_start_days=90
)

m26 = M.derived_feature_extractor.v3(
    input_data=m16.data,
    features=m8.data,
    date_col='date',
    instrument_col='instrument',
    drop_na=False,
    remove_extra_columns=False,
    user_functions={}
)

m20 = M.dropnan.v1(
    input_data=m26.data
)

m27 = M.dl_convert_to_bin.v2(
    input_data=m20.data,
    features=m8.data,
    window_size=50,
    feature_clip=5,
    flatten=False,
    window_along_col=''
)

m7 = M.dl_model_predict.v1(
    trained_model=m6.data,
    input_data=m27.data,
    batch_size=10240,
    n_gpus=0,
    verbose='2:每个epoch输出一行记录'
)

m2 = M.cached.v3(
    input_1=m7.data,
    input_2=m27.data,
    input_3=m20.data,
    run=m2_run_bigquant_run,
    post_run=m2_post_run_bigquant_run,
    input_ports='',
    params='{}',
    output_ports=''
)

m1 = M.trade.v4(
    instruments=m28.data,
    options_data=m2.data_1,
    start_date='',
    end_date='',
    initialize=m1_initialize_bigquant_run,
    handle_data=m1_handle_data_bigquant_run,
    prepare=m1_prepare_bigquant_run,
    before_trading_start=m1_before_trading_start_bigquant_run,
    volume_limit=0.025,
    order_price_field_buy='open',
    order_price_field_sell='close',
    capital_base=1000000,
    auto_cancel_non_tradable_orders=True,
    data_frequency='daily',
    price_type='真实价格',
    product_type='股票',
    plot_charts=True,
    backtest_only=False,
    benchmark='000300.HIX'
)
Epoch 1/10
---------------------------------------------------------------------------
ValueError                                Traceback (most recent call last)
<ipython-input-10-84838f410bb8> in <module>
    305 )
    306 
--> 307 m6 = M.dl_model_train.v1(
    308     input_model=m5.data,
    309     training_data=m25.data,

ValueError: in user code:

    /usr/local/python3/lib/python3.8/site-packages/tensorflow/python/keras/engine/training.py:805 train_function  *
        return step_function(self, iterator)
    /usr/local/python3/lib/python3.8/site-packages/tensorflow/python/keras/engine/training.py:795 step_function  **
        outputs = model.distribute_strategy.run(run_step, args=(data,))
    /usr/local/python3/lib/python3.8/site-packages/tensorflow/python/distribute/distribute_lib.py:1259 run
        return self._extended.call_for_each_replica(fn, args=args, kwargs=kwargs)
    /usr/local/python3/lib/python3.8/site-packages/tensorflow/python/distribute/distribute_lib.py:2730 call_for_each_replica
        return self._call_for_each_replica(fn, args, kwargs)
    /usr/local/python3/lib/python3.8/site-packages/tensorflow/python/distribute/distribute_lib.py:3417 _call_for_each_replica
        return fn(*args, **kwargs)
    /usr/local/python3/lib/python3.8/site-packages/tensorflow/python/keras/engine/training.py:788 run_step  **
        outputs = model.train_step(data)
    /usr/local/python3/lib/python3.8/site-packages/tensorflow/python/keras/engine/training.py:754 train_step
        y_pred = self(x, training=True)
    /usr/local/python3/lib/python3.8/site-packages/tensorflow/python/keras/engine/base_layer.py:998 __call__
        input_spec.assert_input_compatibility(self.input_spec, inputs, self.name)
    /usr/local/python3/lib/python3.8/site-packages/tensorflow/python/keras/engine/input_spec.py:271 assert_input_compatibility
        raise ValueError('Input ' + str(input_index) +

    ValueError: Input 0 is incompatible with layer BigQuantDL: expected shape=(None, 50, 5), found shape=(None, 50, 6)