深度学习模型初步测试

策略分享
标签: #<Tag:0x00007f61e8c4f5f8>

(brantyz) #1

是一个初步尝,训练时准确率并不高。还没有放在回测中测试。

克隆策略
In [16]:
# 基础参数配置
class conf:
    start_date = '2010-01-01'
    end_date='2017-01-01'
    # split_date 之前的数据用于训练,之后的数据用作效果评估
    split_date = '2015-01-01'
    batch_size=128
    label_num=60
    # D.instruments: https://bigquant.com/docs/data_instruments.html
    instruments = D.instruments(start_date, end_date)

    # 机器学习目标标注函数
    # 如下标注函数等价于 min(max((持有期间的收益 * 100), -20), 20) + 20 (后面的M.fast_auto_labeler会做取整操作)
    # 说明:max/min这里将标注分数限定在区间[-20, 20],+20将分数变为非负数 (StockRanker要求标注分数非负整数)
    label_expr = ['return * 25', 'where(label > {0}, {0}, where(label < -{0}, -{0}, label)) + {0}'.format(5)]
    # 持有天数,用于计算label_expr中的return值(收益)
    hold_days = 5

    # 特征 https://bigquant.com/docs/data_features.html,你可以通过表达式构造任何特征
    features = [
        'avg_turn_5',  # 5日平均换手率
        'avg_turn_20',  # 20日平均换手率
        '(volume_0+volume_1+volume_2+volume_3+volume_4)/5',  # 5日平均交易量
        '(volume_0+volume_1+volume_2+volume_3+volume_4+volume_5+volume_6+volume_7+volume_8+volume_9+volume_10+volume_11+volume_12+volume_13+volume_14+volume_15+volume_16+volume_17+volume_18+volume_19)/20',  # 20日平均交易量
        'ta_sma_5_0',  # 5日移动平均
        'ta_sma_10_0',  # 10日移动平均
        'ta_sma_20_0',  # 20日移动平均
    ]
In [17]:
# 给数据做标注:给每一行数据(样本)打分,一般分数越高表示越好
m1 = M.fast_auto_labeler.v5(
    instruments=conf.instruments, start_date=conf.start_date, end_date=conf.end_date,
    label_expr=conf.label_expr, hold_days=conf.hold_days,
    benchmark='000300.SHA', sell_at='open', buy_at='open')
# 计算特征数据
m2 = M.general_feature_extractor.v5(
    instruments=conf.instruments, start_date=conf.start_date, end_date=conf.end_date,
    features=conf.features)
# 数据预处理:缺失数据处理,数据规范化,T.get_stock_ranker_default_transforms为StockRanker模型做数据预处理
m3 = M.transform.v2(
    data=m2.data, transforms=T.get_stock_ranker_default_transforms(),
    drop_null=True, astype='int32', except_columns=['date', 'instrument'],
    clip_lower=0, clip_upper=200000000)
# 合并标注和特征数据
m4 = M.join.v2(data1=m1.data, data2=m3.data, on=['date', 'instrument'], sort=True)

# 训练数据集
m5_training = M.filter.v2(data=m4.data, expr='date < "%s"' % conf.split_date)
# 评估数据集
m5_evaluation = M.filter.v2(data=m4.data, expr='"%s" <= date' % conf.split_date)
# # StockRanker机器学习训练
# m6 = M.stock_ranker_train.v2(training_ds=m5_training.data, features=conf.features)
# # 对评估集做预测
# m7 = M.stock_ranker_predict.v2(model_id=m6.model_id, data=m5_evaluation.data)


# ## 量化回测 https://bigquant.com/docs/strategy_backtest.html
# # 回测引擎:初始化函数,只执行一次
# def initialize(context):
#     # 系统已经设置了默认的交易手续费和滑点,要修改手续费可使用如下函数
#     context.set_commission(PerOrder(buy_cost=0.0003, sell_cost=0.0013, min_cost=5))
#     # 预测数据,通过options传入进来,使用 read_df 函数,加载到内存 (DataFrame)
#     context.ranker_prediction = context.options['ranker_prediction'].read_df()
#     # 设置买入的股票数量,这里买入预测股票列表排名靠前的5只
#     stock_count = 5
#     # 每只的股票的权重,如下的权重分配会使得靠前的股票分配多一点的资金,[0.339160, 0.213986, 0.169580, ..]
#     context.stock_weights = T.norm([1 / math.log(i + 2) for i in range(0, stock_count)])
#     # 设置每只股票占用的最大资金比例
#     context.max_cash_per_instrument = 0.2

# # 回测引擎:每日数据处理函数,每天执行一次
# def handle_data(context, data):
#     # 按日期过滤得到今日的预测数据
#     ranker_prediction = context.ranker_prediction[context.ranker_prediction.date == data.current_dt.strftime('%Y-%m-%d')]

#     # 1. 资金分配
#     # 平均持仓时间是hold_days,每日都将买入股票,每日预期使用 1/hold_days 的资金
#     # 实际操作中,会存在一定的买入误差,所以在前hold_days天,等量使用资金;之后,尽量使用剩余资金(这里设置最多用等量的1.5倍)
#     is_staging = context.trading_day_index < context.options['hold_days'] # 是否在建仓期间(前 hold_days 天)
#     cash_avg = context.portfolio.portfolio_value / context.options['hold_days']
#     cash_for_buy = min(context.portfolio.cash, (1 if is_staging else 1.5) * cash_avg)
#     cash_for_sell = cash_avg - (context.portfolio.cash - cash_for_buy)
#     positions = {e.symbol: p.amount * p.last_sale_price         for e, p in context.perf_tracker.position_tracker.positions.items()}

#     # 2. 生成卖出订单:hold_days天之后才开始卖出;对持仓的股票,按StockRanker预测的排序末位淘汰
#     if not is_staging and cash_for_sell > 0:
#         equities = {e.symbol: e for e, p in context.perf_tracker.position_tracker.positions.items()}
#         instruments = list(reversed(list(ranker_prediction.instrument[ranker_prediction.instrument.apply(
#                 lambda x: x in equities and not context.has_unfinished_sell_order(equities[x]))])))
#         # print('rank order for sell %s' % instruments)
#         for instrument in instruments:
#             context.order_target(context.symbol(instrument), 0)
#             cash_for_sell -= positions[instrument]
#             if cash_for_sell <= 0:
#                 break

#     # 3. 生成买入订单:按StockRanker预测的排序,买入前面的stock_count只股票
#     buy_cash_weights = context.stock_weights
#     buy_instruments = list(ranker_prediction.instrument[:len(buy_cash_weights)])
#     max_cash_per_instrument = context.portfolio.portfolio_value * context.max_cash_per_instrument
#     for i, instrument in enumerate(buy_instruments):
#         cash = cash_for_buy * buy_cash_weights[i]
#         if cash > max_cash_per_instrument - positions.get(instrument, 0):
#             # 确保股票持仓量不会超过每次股票最大的占用资金量
#             cash = max_cash_per_instrument - positions.get(instrument, 0)
#         if cash > 0:
#             context.order_value(context.symbol(instrument), cash)

# # 调用回测引擎
# m8 = M.backtest.v5(
#     instruments=m7.instruments,
#     start_date=m7.start_date,
#     end_date=m7.end_date,
#     initialize=initialize,
#     handle_data=handle_data,
#     order_price_field_buy='open',       # 表示 开盘 时买入
#     order_price_field_sell='close',     # 表示 收盘 前卖出
#     capital_base=1000000,               # 初始资金
#     benchmark='000300.SHA',             # 比较基准,不影响回测结果
#     # 通过 options 参数传递预测数据和参数给回测引擎
#     options={'ranker_prediction': m7.predictions, 'hold_days': conf.hold_days}
# )
[2017-04-29 16:21:31.665501] INFO: bigquant: fast_auto_labeler.v5 start ..
[2017-04-29 16:21:38.466157] INFO: fast_auto_labeler: load history data: 3854057 rows
[2017-04-29 16:21:39.426047] INFO: fast_auto_labeler: start labeling
[2017-04-29 16:23:15.827334] INFO: bigquant: fast_auto_labeler.v5 end [104.161736s].
[2017-04-29 16:23:15.835289] INFO: bigquant: general_feature_extractor.v5 start ..
[2017-04-29 16:23:15.837804] INFO: bigquant: hit cache
[2017-04-29 16:23:15.839132] INFO: bigquant: general_feature_extractor.v5 end [0.003843s].
[2017-04-29 16:23:15.844771] INFO: bigquant: transform.v2 start ..
[2017-04-29 16:23:15.847136] INFO: bigquant: hit cache
[2017-04-29 16:23:15.848457] INFO: bigquant: transform.v2 end [0.003695s].
[2017-04-29 16:23:15.851847] INFO: bigquant: join.v2 start ..
[2017-04-29 16:23:25.902988] INFO: filter: /y_2010, rows=424080/424613, timetaken=8.120956s
[2017-04-29 16:23:34.062625] INFO: filter: /y_2011, rows=505162/505694, timetaken=8.089353s
[2017-04-29 16:23:42.664114] INFO: filter: /y_2012, rows=561285/562381, timetaken=8.517399s
[2017-04-29 16:23:51.995797] INFO: filter: /y_2013, rows=563104/564139, timetaken=9.223825s
[2017-04-29 16:24:01.951167] INFO: filter: /y_2014, rows=566142/567630, timetaken=9.858321s
[2017-04-29 16:24:12.142760] INFO: filter: /y_2015, rows=558321/565355, timetaken=10.085794s
[2017-04-29 16:24:21.752922] INFO: filter: /y_2016, rows=618023/637125, timetaken=9.586498s
[2017-04-29 16:24:22.248995] INFO: filter: total result rows: 3796117
[2017-04-29 16:24:22.253411] INFO: bigquant: join.v2 end [66.401552s].
[2017-04-29 16:24:22.257963] INFO: bigquant: filter.v2 start ..
[2017-04-29 16:24:22.262115] INFO: filter: filter with expr date < "2015-01-01"
[2017-04-29 16:24:23.760467] INFO: filter: filter /y_2010, 424080/424080
[2017-04-29 16:24:25.214307] INFO: filter: filter /y_2011, 505162/505162
[2017-04-29 16:24:26.888647] INFO: filter: filter /y_2012, 561285/561285
[2017-04-29 16:24:28.945789] INFO: filter: filter /y_2013, 563104/563104
[2017-04-29 16:24:31.174644] INFO: filter: filter /y_2014, 566142/566142
[2017-04-29 16:24:31.571828] INFO: filter: filter /y_2015, 0/558321
[2017-04-29 16:24:32.106390] INFO: filter: filter /y_2016, 0/618023
[2017-04-29 16:24:32.230664] INFO: bigquant: filter.v2 end [9.972655s].
[2017-04-29 16:24:32.236063] INFO: bigquant: filter.v2 start ..
[2017-04-29 16:24:32.241525] INFO: filter: filter with expr "2015-01-01" <= date
[2017-04-29 16:24:32.769707] INFO: filter: filter /y_2010, 0/424080
[2017-04-29 16:24:33.022929] INFO: filter: filter /y_2011, 0/505162
[2017-04-29 16:24:33.315973] INFO: filter: filter /y_2012, 0/561285
[2017-04-29 16:24:33.616330] INFO: filter: filter /y_2013, 0/563104
[2017-04-29 16:24:33.883399] INFO: filter: filter /y_2014, 0/566142
[2017-04-29 16:24:35.558923] INFO: filter: filter /y_2015, 558321/558321
[2017-04-29 16:24:37.994871] INFO: filter: filter /y_2016, 618023/618023
[2017-04-29 16:24:38.270827] INFO: bigquant: filter.v2 end [6.034699s].
In [18]:
import tensorflow as tf
slim=tf.contrib.slim
import numpy as np
label_index=[m1.data.read_df().columns[-2]]
train_index=m3.data.read_df().columns[2:].tolist()
def fc_model(inputs):
    with slim.arg_scope([slim.fully_connected],weights_regularizer=slim.l2_regularizer(5e-5)):
        net=slim.fully_connected(inputs,128,activation_fn=tf.nn.relu,normalizer_fn=None)
        net=slim.fully_connected(net,64,activation_fn=tf.nn.relu,normalizer_fn=None)
        net=slim.fully_connected(net,conf.label_num,activation_fn=None,normalizer_fn=None)
        return net

xx_inputs=tf.placeholder(tf.float32,shape=[conf.batch_size,len(train_index)])
yy_inputs=tf.placeholder(tf.int32,shape=[conf.batch_size])

logits_op=fc_model(xx_inputs)
one_hot_label=tf.one_hot(yy_inputs,conf.label_num)
# loss_op=tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits_op,labels=yy_inputs)
loss_op=slim.losses.softmax_cross_entropy(onehot_labels=one_hot_label,logits=logits_op,label_smoothing=0.1)
regularization_losses = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)
loss_op = tf.add_n([loss_op], name='total_loss')
top1_error = 1 - tf.reduce_mean(tf.to_float(tf.nn.in_top_k(logits_op, yy_inputs, k=1)))
# train_op= tf.train.MomentumOptimizer(learning_rate=0.001,momentum=0.5).minimize(loss_op)
train_op= tf.train.AdamOptimizer().minimize(loss_op)
prediction_op=tf.nn.softmax(logits_op)

sess=tf.Session()
sess.run(tf.global_variables_initializer())
In [19]:
train_data=m5_training.data.read_df()
for col_name in train_index:
    train_data[col_name]/=train_data[col_name].max()
train_data=np.asarray(train_data[(train_index+label_index)])
In [20]:
for epoch_id in range(30):
    np.random.shuffle(train_data)
    loss_list=[];error_list=[]
    for batch_idx in range(int(len(train_data)/conf.batch_size)):
        xx_data=train_data[batch_idx*conf.batch_size:batch_idx*conf.batch_size+conf.batch_size,:-1]
        yy_data=train_data[batch_idx*conf.batch_size:batch_idx*conf.batch_size+conf.batch_size,-1]
        loss,error,_=sess.run([loss_op,top1_error,train_op],feed_dict={xx_inputs:xx_data,yy_inputs:yy_data})
        loss_list.append(loss)
        error_list.append(error)
        if batch_idx%2000==0:
            print('epoch :%s batch_idx:%s loss:%s top1_error:%s'%(epoch_id,batch_idx,np.mean(loss_list),np.mean(error_list)))
            loss_list=[];error_list=[]
epoch :0 batch_idx:0 loss:2.47574 top1_error:0.664062
epoch :0 batch_idx:2000 loss:2.27436 top1_error:0.709746
epoch :0 batch_idx:4000 loss:2.27077 top1_error:0.705648
epoch :0 batch_idx:6000 loss:2.27351 top1_error:0.707773
epoch :0 batch_idx:8000 loss:2.27153 top1_error:0.707047
epoch :0 batch_idx:10000 loss:2.26923 top1_error:0.706539
In [ ]:
 

(路飞) #2

全是代码,文字介绍少了一点,数学功底不好的人看起来很费劲啊,楼主能加一点文字介绍吗?