克隆策略
In [ ]:
# 基础参数配置
class conf:
    start_date = '2010-01-01'
    end_date='2017-01-01'
    # split_date 之前的数据用于训练,之后的数据用作效果评估
    split_date = '2015-01-01'
    # D.instruments: https://bigquant.com/docs/data_instruments.html
    instruments = D.instruments(start_date, end_date)

    # 机器学习目标标注函数
    # 如下标注函数等价于 max(min((持有期间的收益 * 100), -20), 20) + 20 (后面的M.fast_auto_labeler会做取整操作)
    # 说明:max/min这里将标注分数限定在区间[-20, 20],+20将分数变为非负数 (StockRanker要求标注分数非负整数)
    label_expr = ['return * 100', 'where(label > {0}, {0}, where(label < -{0}, -{0}, label)) + {0}'.format(20)]
    # 持有天数,用于计算label_expr中的return值(收益)
    hold_days = 5

    # 特征 https://bigquant.com/docs/data_features.html,你可以通过表达式构造任何特征
    features = [
        'list_days_0',  # 已经上市的天数
    ]

# 给数据做标注:给每一行数据(样本)打分,一般分数越高表示越好
m1 = M.fast_auto_labeler.v5(
    instruments=conf.instruments, start_date=conf.start_date, end_date=conf.end_date,
    label_expr=conf.label_expr, hold_days=conf.hold_days,
    benchmark='000300.SHA', sell_at='open', buy_at='open')
# 计算特征数据
m2 = M.general_feature_extractor.v5(
    instruments=conf.instruments, start_date=conf.start_date, end_date=conf.end_date,
    features=conf.features)
# 数据预处理:缺失数据处理,数据规范化,T.get_stock_ranker_default_transforms为StockRanker模型做数据预处理
m3 = M.transform.v2(
    data=m2.data, transforms=T.get_stock_ranker_default_transforms(),
    drop_null=True, astype='int32', except_columns=['date', 'instrument'],
    clip_lower=0, clip_upper=200000000)
# 合并标注和特征数据
m4 = M.join.v2(data1=m1.data, data2=m3.data, on=['date', 'instrument'], sort=True)

# 训练数据集
m5_training = M.filter.v2(data=m4.data, expr='date < "%s"' % conf.split_date)
# 评估数据集
m5_evaluation = M.filter.v2(data=m4.data, expr='"%s" <= date' % conf.split_date)
# StockRanker机器学习训练
m6 = M.stock_ranker_train.v2(training_ds=m5_training.data, features=conf.features)
# 对评估集做预测
m7 = M.stock_ranker_predict.v2(model_id=m6.model_id, data=m5_evaluation.data)


## 量化回测 https://bigquant.com/docs/strategy_backtest.html
# 回测引擎:初始化函数,只执行一次
def initialize(context):
    # 系统已经设置了默认的交易手续费和滑点,要修改手续费可使用如下函数
    context.set_commission(PerOrder(buy_cost=0.0003, sell_cost=0.0013, min_cost=5))
    # 预测数据,通过options传入进来,使用 read_df 函数,加载到内存 (DataFrame)
    context.ranker_prediction = context.options['ranker_prediction'].read_df()
    # 设置买入的股票数量,这里买入预测股票列表排名靠前的5只
    stock_count = 5
    # 每只的股票的权重,如下的权重分配会使得靠前的股票分配多一点的资金,[0.339160, 0.213986, 0.169580, ..]
    context.stock_weights = T.norm([1 / math.log(i + 2) for i in range(0, stock_count)])
    # 设置每只股票占用的最大资金比例
    context.max_cash_per_instrument = 0.2

# 回测引擎:每日数据处理函数,每天执行一次
def handle_data(context, data):
    # 按日期过滤得到今日的预测数据
    ranker_prediction = context.ranker_prediction[context.ranker_prediction.date == data.current_dt.strftime('%Y-%m-%d')]

    # 1. 资金分配
    # 平均持仓时间是hold_days,每日都将买入股票,每日预期使用 1/hold_days 的资金
    # 实际操作中,会存在一定的买入误差,所以在前hold_days天,等量使用资金;之后,尽量使用剩余资金(这里设置最多用等量的1.5倍)
    is_staging = context.trading_day_index < context.options['hold_days'] # 是否在建仓期间(前 hold_days 天)
    cash_avg = context.portfolio.portfolio_value / context.options['hold_days']
    cash_for_buy = min(context.portfolio.cash, (1 if is_staging else 1.5) * cash_avg)
    cash_for_sell = cash_avg - (context.portfolio.cash - cash_for_buy)
    positions = {e.symbol: p.amount * p.last_sale_price         for e, p in context.perf_tracker.position_tracker.positions.items()}

    # 2. 生成卖出订单:hold_days天之后才开始卖出;对持仓的股票,按StockRanker预测的排序末位淘汰
    if not is_staging and cash_for_sell > 0:
        equities = {e.symbol: e for e, p in context.perf_tracker.position_tracker.positions.items()}
        instruments = list(reversed(list(ranker_prediction.instrument[ranker_prediction.instrument.apply(
                lambda x: x in equities and not context.has_unfinished_sell_order(equities[x]))])))
        # print('rank order for sell %s' % instruments)
        for instrument in instruments:
            context.order_target(context.symbol(instrument), 0)
            cash_for_sell -= positions[instrument]
            if cash_for_sell <= 0:
                break

    # 3. 生成买入订单:按StockRanker预测的排序,买入前面的stock_count只股票
    buy_cash_weights = context.stock_weights
    buy_instruments = list(ranker_prediction.instrument[:len(buy_cash_weights)])
    max_cash_per_instrument = context.portfolio.portfolio_value * context.max_cash_per_instrument
    for i, instrument in enumerate(buy_instruments):
        cash = cash_for_buy * buy_cash_weights[i]
        if cash > max_cash_per_instrument - positions.get(instrument, 0):
            # 确保股票持仓量不会超过每次股票最大的占用资金量
            cash = max_cash_per_instrument - positions.get(instrument, 0)
        if cash > 0:
            context.order_value(context.symbol(instrument), cash)

# 调用回测引擎
m8 = M.backtest.v5(
    instruments=m7.instruments,
    start_date=m7.start_date,
    end_date=m7.end_date,
    initialize=initialize,
    handle_data=handle_data,
    order_price_field_buy='open',       # 表示 开盘 时买入
    order_price_field_sell='close',     # 表示 收盘 前卖出
    capital_base=1000000,               # 初始资金
    benchmark='000300.SHA',             # 比较基准,不影响回测结果
    # 通过 options 参数传递预测数据和参数给回测引擎
    options={'ranker_prediction': m7.predictions, 'hold_days': conf.hold_days} 
)
[2017-04-07 16:30:50.493011] INFO: bigquant: fast_auto_labeler.v5 start ..
[2017-04-07 16:30:50.495610] INFO: bigquant: hit cache
[2017-04-07 16:30:50.502125] INFO: bigquant: fast_auto_labeler.v5 end [0.009149s].
[2017-04-07 16:30:50.506931] INFO: bigquant: general_feature_extractor.v5 start ..
[2017-04-07 16:30:54.370939] INFO: general_feature_extractor: year 2010, featurerows=431567
[2017-04-07 16:30:57.900254] INFO: general_feature_extractor: year 2011, featurerows=511455
[2017-04-07 16:31:02.691453] INFO: general_feature_extractor: year 2012, featurerows=565675
[2017-04-07 16:31:07.789944] INFO: general_feature_extractor: year 2013, featurerows=564168
[2017-04-07 16:31:11.879902] INFO: general_feature_extractor: year 2014, featurerows=569948
[2017-04-07 16:31:16.612250] INFO: general_feature_extractor: year 2015, featurerows=569698
[2017-04-07 16:31:22.663122] INFO: general_feature_extractor: year 2016, featurerows=641546
[2017-04-07 16:31:23.777968] INFO: general_feature_extractor: year 2017, featurerows=0
[2017-04-07 16:31:23.804652] INFO: general_feature_extractor: total feature rows: 3854057
[2017-04-07 16:31:23.808091] INFO: bigquant: general_feature_extractor.v5 end [33.301142s].
[2017-04-07 16:31:23.814650] INFO: bigquant: transform.v2 start ..
[2017-04-07 16:31:24.338684] INFO: transform: transformed /y_2010, 431567/431567
[2017-04-07 16:31:24.774809] INFO: transform: transformed /y_2011, 511455/511455
[2017-04-07 16:31:25.253249] INFO: transform: transformed /y_2012, 565675/565675
[2017-04-07 16:31:25.748764] INFO: transform: transformed /y_2013, 564168/564168
[2017-04-07 16:31:26.312197] INFO: transform: transformed /y_2014, 569948/569948
[2017-04-07 16:31:26.848106] INFO: transform: transformed /y_2015, 569698/569698
[2017-04-07 16:31:27.366426] INFO: transform: transformed /y_2016, 641546/641546
[2017-04-07 16:31:27.415311] INFO: transform: transformed /y_2017, 0/0
[2017-04-07 16:31:27.449717] INFO: transform: transformed rows: 3854057/3854057
[2017-04-07 16:31:27.452958] INFO: bigquant: transform.v2 end [3.638299s].
[2017-04-07 16:31:27.457612] INFO: bigquant: join.v2 start ..
[2017-04-07 16:31:36.621361] INFO: filter: /y_2010, rows=431027/431567, timetaken=6.63244s
[2017-04-07 16:31:43.629029] INFO: filter: /y_2011, rows=510919/511455, timetaken=6.938455s
[2017-04-07 16:31:50.770157] INFO: filter: /y_2012, rows=564579/565675, timetaken=7.055953s
[2017-04-07 16:31:58.022491] INFO: filter: /y_2013, rows=563133/564168, timetaken=7.143753s
[2017-04-07 16:32:04.713014] INFO: filter: /y_2014, rows=567871/569948, timetaken=6.573261s
[2017-04-07 16:32:08.627096] INFO: filter: /y_2015, rows=560419/569698, timetaken=3.813008s
[2017-04-07 16:32:12.745423] INFO: filter: /y_2016, rows=619554/641546, timetaken=4.10009s
[2017-04-07 16:32:12.903417] INFO: filter: total result rows: 3817502
[2017-04-07 16:32:12.906550] INFO: bigquant: join.v2 end [45.448919s].
[2017-04-07 16:32:12.911028] INFO: bigquant: filter.v2 start ..
[2017-04-07 16:32:12.916261] INFO: filter: filter with expr date < "2015-01-01"
[2017-04-07 16:32:13.438584] INFO: filter: filter /y_2010, 431027/431027
[2017-04-07 16:32:13.833259] INFO: filter: filter /y_2011, 510919/510919
[2017-04-07 16:32:14.316030] INFO: filter: filter /y_2012, 564579/564579
[2017-04-07 16:32:14.796145] INFO: filter: filter /y_2013, 563133/563133
[2017-04-07 16:32:15.281421] INFO: filter: filter /y_2014, 567871/567871
[2017-04-07 16:32:15.481699] INFO: filter: filter /y_2015, 0/560419
[2017-04-07 16:32:15.686132] INFO: filter: filter /y_2016, 0/619554
[2017-04-07 16:32:15.704898] INFO: bigquant: filter.v2 end [2.793808s].
[2017-04-07 16:32:15.709082] INFO: bigquant: filter.v2 start ..
[2017-04-07 16:32:15.714137] INFO: filter: filter with expr "2015-01-01" <= date
[2017-04-07 16:32:15.980858] INFO: filter: filter /y_2010, 0/431027
[2017-04-07 16:32:16.157762] INFO: filter: filter /y_2011, 0/510919
[2017-04-07 16:32:16.365527] INFO: filter: filter /y_2012, 0/564579
[2017-04-07 16:32:16.545553] INFO: filter: filter /y_2013, 0/563133
[2017-04-07 16:32:16.729569] INFO: filter: filter /y_2014, 0/567871
[2017-04-07 16:32:17.203997] INFO: filter: filter /y_2015, 560419/560419
[2017-04-07 16:32:17.700144] INFO: filter: filter /y_2016, 619554/619554
[2017-04-07 16:32:17.736688] INFO: bigquant: filter.v2 end [2.027571s].
[2017-04-07 16:32:17.741145] INFO: bigquant: stock_ranker_train.v2 start ..
[2017-04-07 16:32:19.615760] INFO: df2bin: prepare data: training ..
[2017-04-07 16:33:16.958740] INFO: stock_ranker_train: training: 2637529 rows
[2017-04-07 16:35:37.283157] INFO: bigquant: stock_ranker_train.v2 end [199.541451s].
[2017-04-07 16:35:37.287880] INFO: bigquant: stock_ranker_predict.v2 start ..
[2017-04-07 16:35:38.033690] INFO: df2bin: prepare data: prediction ..
[2017-04-07 16:36:03.517233] INFO: stock_ranker_predict: prediction: 1179973 rows
[2017-04-07 16:36:12.650083] INFO: bigquant: stock_ranker_predict.v2 end [35.362146s].
[2017-04-07 16:36:12.666276] INFO: bigquant: backtest.v5 start ..
/var/app/enabled/pandas/tseries/index.py:817: PerformanceWarning: Non-vectorized DateOffset being applied to Series or DatetimeIndex
  "or DatetimeIndex", PerformanceWarning)
/var/app/enabled/empyrical/stats.py:534: RuntimeWarning: divide by zero encountered in double_scalars
  sortino = mu / dsr
[2017-04-07 16:38:10.177819] INFO: Performance: Simulated 482 trading days out of 482.
[2017-04-07 16:38:10.180119] INFO: Performance: first open: 2015-01-05 14:30:00+00:00
[2017-04-07 16:38:10.181561] INFO: Performance: last close: 2016-12-22 20:00:00+00:00
/var/app/enabled/pandas/core/generic.py:1138: PerformanceWarning: 
your performance may suffer as PyTables will pickle object types that it cannot
map directly to c-types [inferred_type->mixed,key->block4_values] [items->['orders', 'period_label', 'positions', 'transactions']]

  return pytables.to_hdf(path_or_buf, key, self, **kwargs)
[2017-04-07 16:38:11.404819] INFO: bigquant: ir base: 0.21582569774338758
  • 收益率108.53%
  • 年化收益率46.85%
  • 基准收益率-5.6%
  • 阿尔法0.37
  • 贝塔1.09
  • 夏普比率2.47
  • 收益波动率40.97%
  • 信息比率4.97
  • 最大回撤44.42%
  • 盈/亏次数1241/885
  • 盈/亏利润率+5.95%/-5.54%
[2017-04-07 16:38:12.313342] INFO: bigquant: backtest.v5 end [119.647031s].
In [4]:
m6.feature_gains.read_df()
Out[4]:
feature gain
0 list_days_0 2143.952656