克隆策略
In [1]:
# 基础参数配置
class conf:
    start_date = '2010-01-01'
    end_date='2017-01-01'
    # split_date 之前的数据用于训练,之后的数据用作效果评估
    split_date = '2015-01-01'
    # D.instruments: https://bigquant.com/docs/data_instruments.html
    instruments = D.instruments(start_date, split_date)

    # 机器学习目标标注函数
    # 如下标注函数等价于 min(max((持有期间的收益 * 100), -20), 20) + 20 (后面的M.fast_auto_labeler会做取整操作)
    # 说明:max/min这里将标注分数限定在区间[-20, 20],+20将分数变为非负数 (StockRanker要求标注分数非负整数)
    label_expr = ['return * 100', 'where(label > {0}, {0}, where(label < -{0}, -{0}, label)) + {0}'.format(20)]
    # 持有天数,用于计算label_expr中的return值(收益)
    hold_days = 5

    # 特征 https://bigquant.com/docs/data_features.html,你可以通过表达式构造任何特征
    features = [
        'return_5',  # 5日收益
        'return_10',  # 10日收益
        'rank_return_10',  # 10日收益排名
        'rank_return_20',  # 20日收益排名
        'avg_amount_20',  # 20日平均交易额
        'avg_turn_20',  # 20日平均换手率
        'avg_turn_10',  # 10日平均换手率
        'fs_common_equity_0',  # 普通股总权益
        'market_cap_0',  # 总市值
        'pe_ttm_0',  # 市盈率TTM
        'rank_pe_ttm_0',  # 市盈率TTM排名
        'fs_net_profit_yoy_0',  # 归属母公司股东净利润同比增长率
        'fs_net_profit_qoq_0',  # 归属母公司股东净利润单季度环比增长率
        'list_days_0',  # 已经上市的天数
        'list_board_0',  # 上市板
    ]

# 给数据做标注:给每一行数据(样本)打分,一般分数越高表示越好
m1 = M.fast_auto_labeler.v8(
    instruments=conf.instruments, start_date=conf.start_date, end_date=conf.split_date,
    label_expr=conf.label_expr, hold_days=conf.hold_days,
    benchmark='000300.SHA', sell_at='open', buy_at='open', is_regression=True)
# 计算特征数据
m2 = M.general_feature_extractor.v5(
    instruments=conf.instruments, start_date=conf.start_date, end_date=conf.split_date,
    features=conf.features)

m3=M.add_columns.v1(data=m2.data, eval_list=conf.features)

# 数据预处理:缺失数据处理,数据规范化
m4 = M.transform.v2(
    data=m3.data, transforms=None,
    drop_null=True, astype='int32', except_columns=['date', 'instrument'],
    clip_lower=-200000000, clip_upper=200000000)
# 合并标注和特征数据
m5 = M.join.v2(data1=m1.data, data2=m4.data, on=['date', 'instrument'], sort=True)
# 随机森林训练
m6 = M.random_forest_train.v1(training_ds=m5.data, features=conf.features, is_regression=True)


## 量化回测 https://bigquant.com/docs/module_trade.html
# 回测引擎:准备数据,只执行一次
def prepare(context):
    # context.start_date / end_date,回测的时候,为trader传入参数;在实盘运行的时候,由系统替换为实盘日期
    n1 = M.general_feature_extractor.v5(
        instruments=D.instruments(),
        start_date=context.start_date, end_date=context.end_date,features=conf.features)
    n2=M.add_columns.v1(data=n1.data, eval_list=conf.features)
    n3 = M.transform.v2(
        data=n2.data, transforms=None,
        drop_null=True, astype='int32', except_columns=['date', 'instrument'],
        clip_lower=-200000000, clip_upper=200000000)
    n4 = M.random_forest_predict.v1(model=context.options['model_id'],data=n3.data)
    context.instruments = n4.instruments
    context.options['predictions'] = n4.predictions

# 回测引擎:初始化函数,只执行一次
def initialize(context):
    # 加载预测数据
    context.ranker_prediction = context.options['predictions'].read_df()

    # 系统已经设置了默认的交易手续费和滑点,要修改手续费可使用如下函数
    context.set_commission(PerOrder(buy_cost=0.0003, sell_cost=0.0013, min_cost=5))
    # 预测数据,通过options传入进来,使用 read_df 函数,加载到内存 (DataFrame)
    # 设置买入的股票数量,这里买入预测股票列表排名靠前的5只
    stock_count = 5
    # 每只的股票的权重,如下的权重分配会使得靠前的股票分配多一点的资金,[0.339160, 0.213986, 0.169580, ..]
    context.stock_weights = T.norm([1 / math.log(i + 2) for i in range(0, stock_count)])
    # 设置每只股票占用的最大资金比例
    context.max_cash_per_instrument = 0.2

# 回测引擎:每日数据处理函数,每天执行一次
def handle_data(context, data):
    # 按日期过滤得到今日的预测数据
    ranker_prediction = context.ranker_prediction[
        context.ranker_prediction.date == data.current_dt.strftime('%Y-%m-%d')]

    # 1. 资金分配
    # 平均持仓时间是hold_days,每日都将买入股票,每日预期使用 1/hold_days 的资金
    # 实际操作中,会存在一定的买入误差,所以在前hold_days天,等量使用资金;之后,尽量使用剩余资金(这里设置最多用等量的1.5倍)
    is_staging = context.trading_day_index < context.options['hold_days'] # 是否在建仓期间(前 hold_days 天)
    cash_avg = context.portfolio.portfolio_value / context.options['hold_days']
    cash_for_buy = min(context.portfolio.cash, (1 if is_staging else 1.5) * cash_avg)
    cash_for_sell = cash_avg - (context.portfolio.cash - cash_for_buy)
    positions = {e.symbol: p.amount * p.last_sale_price
                 for e, p in context.perf_tracker.position_tracker.positions.items()}

    # 2. 生成卖出订单:hold_days天之后才开始卖出;对持仓的股票,按StockRanker预测的排序末位淘汰
    if not is_staging and cash_for_sell > 0:
        equities = {e.symbol: e for e, p in context.perf_tracker.position_tracker.positions.items()}
        instruments = list(reversed(list(ranker_prediction.instrument[ranker_prediction.instrument.apply(
                lambda x: x in equities and not context.has_unfinished_sell_order(equities[x]))])))
        # print('rank order for sell %s' % instruments)
        for instrument in instruments:
            context.order_target(context.symbol(instrument), 0)
            cash_for_sell -= positions[instrument]
            if cash_for_sell <= 0:
                break

    # 3. 生成买入订单:按StockRanker预测的排序,买入前面的stock_count只股票
    buy_cash_weights = context.stock_weights
    buy_instruments = list(ranker_prediction.instrument[:len(buy_cash_weights)])
    max_cash_per_instrument = context.portfolio.portfolio_value * context.max_cash_per_instrument
    for i, instrument in enumerate(buy_instruments):
        cash = cash_for_buy * buy_cash_weights[i]
        if cash > max_cash_per_instrument - positions.get(instrument, 0):
            # 确保股票持仓量不会超过每次股票最大的占用资金量
            cash = max_cash_per_instrument - positions.get(instrument, 0)
        if cash > 0:
            context.order_value(context.symbol(instrument), cash)


# 调用交易引擎
m7 = M.trade.v2(
    instruments=None,
    start_date=conf.split_date,
    end_date=conf.end_date,
    prepare=prepare,
    initialize=initialize,
    handle_data=handle_data,
    order_price_field_buy='open',       # 表示 开盘 时买入
    order_price_field_sell='close',     # 表示 收盘 前卖出
    capital_base=1000000,               # 初始资金
    benchmark='000300.SHA',             # 比较基准,不影响回测结果
    # 通过 options 参数传递预测数据和参数给回测引擎
    options={'hold_days': conf.hold_days, 'model_id': m6.model}
)
[2017-08-24 21:32:08.708201] INFO: bigquant: fast_auto_labeler.v8 start ..
[2017-08-24 21:32:08.716144] INFO: bigquant: hit cache
[2017-08-24 21:32:08.733901] INFO: bigquant: fast_auto_labeler.v8 end [0.025622s].
[2017-08-24 21:32:08.786621] INFO: bigquant: general_feature_extractor.v5 start ..
[2017-08-24 21:32:27.416542] INFO: general_feature_extractor: year 2010, featurerows=431567
[2017-08-24 21:32:44.373677] INFO: general_feature_extractor: year 2011, featurerows=511455
[2017-08-24 21:33:07.972706] INFO: general_feature_extractor: year 2012, featurerows=565675
[2017-08-24 21:33:36.550363] INFO: general_feature_extractor: year 2013, featurerows=564168
[2017-08-24 21:34:02.250235] INFO: general_feature_extractor: year 2014, featurerows=569948
[2017-08-24 21:34:18.098771] INFO: general_feature_extractor: year 2015, featurerows=0
[2017-08-24 21:34:18.116152] INFO: general_feature_extractor: total feature rows: 2642813
[2017-08-24 21:34:18.118101] INFO: bigquant: general_feature_extractor.v5 end [129.331494s].
[2017-08-24 21:34:18.128043] INFO: bigquant: add_columns.v1 start ..
[2017-08-24 21:34:19.195304] INFO: add_columns: add columns for /y_2010
[2017-08-24 21:34:20.297450] INFO: add_columns: add columns for /y_2011
[2017-08-24 21:34:21.563310] INFO: add_columns: add columns for /y_2012
[2017-08-24 21:34:22.292253] INFO: add_columns: add columns for /y_2013
[2017-08-24 21:34:23.844560] INFO: add_columns: add columns for /y_2014
[2017-08-24 21:34:23.880413] INFO: bigquant: add_columns.v1 end [5.752376s].
[2017-08-24 21:34:23.891063] INFO: bigquant: transform.v2 start ..
[2017-08-24 21:34:26.093803] INFO: transform: transformed /y_2010, 406144/431567
[2017-08-24 21:34:28.441137] INFO: transform: transformed /y_2011, 488290/511455
[2017-08-24 21:34:31.641964] INFO: transform: transformed /y_2012, 550884/565675
[2017-08-24 21:34:34.837926] INFO: transform: transformed /y_2013, 560482/564168
[2017-08-24 21:34:38.127771] INFO: transform: transformed /y_2014, 561385/569948
[2017-08-24 21:34:38.161254] INFO: transform: transformed rows: 2567185/2642813
[2017-08-24 21:34:38.184574] INFO: bigquant: transform.v2 end [14.293467s].
[2017-08-24 21:34:38.195779] INFO: bigquant: join.v2 start ..
[2017-08-24 21:34:52.108654] INFO: join: /y_2010, rows=405633/406144, timetaken=8.379036s
[2017-08-24 21:35:01.615982] INFO: join: /y_2011, rows=487782/488290, timetaken=9.473107s
[2017-08-24 21:35:11.633710] INFO: join: /y_2012, rows=549815/550884, timetaken=9.981263s
[2017-08-24 21:35:21.588594] INFO: join: /y_2013, rows=559468/560482, timetaken=9.911853s
[2017-08-24 21:35:30.697995] INFO: join: /y_2014, rows=544933/561385, timetaken=9.06286s
[2017-08-24 21:35:30.840972] INFO: join: total result rows: 2547631
[2017-08-24 21:35:30.842758] INFO: bigquant: join.v2 end [52.647026s].
[2017-08-24 21:35:31.145574] INFO: bigquant: random_forest_train.v1 start ..
[2017-08-24 21:39:13.367450] INFO: random_forest_train: model train data score:0.06
[2017-08-24 21:39:13.395739] INFO: bigquant: random_forest_train.v1 end [222.250158s].
[2017-08-24 21:39:13.439547] INFO: bigquant: backtest.v7 start ..
[2017-08-24 21:39:13.639220] INFO: bigquant: general_feature_extractor.v5 start ..
[2017-08-24 21:39:26.532446] INFO: general_feature_extractor: year 2015, featurerows=569698
[2017-08-24 21:39:51.068248] INFO: general_feature_extractor: year 2016, featurerows=641546
[2017-08-24 21:40:03.501360] INFO: general_feature_extractor: year 2017, featurerows=0
[2017-08-24 21:40:03.518657] INFO: general_feature_extractor: total feature rows: 1211244
[2017-08-24 21:40:03.520454] INFO: bigquant: general_feature_extractor.v5 end [49.88126s].
[2017-08-24 21:40:03.526473] INFO: bigquant: add_columns.v1 start ..
[2017-08-24 21:40:04.422096] INFO: add_columns: add columns for /y_2015
[2017-08-24 21:40:05.431012] INFO: add_columns: add columns for /y_2016
[2017-08-24 21:40:05.481537] INFO: bigquant: add_columns.v1 end [1.954997s].
[2017-08-24 21:40:05.488685] INFO: bigquant: transform.v2 start ..
[2017-08-24 21:40:08.266235] INFO: transform: transformed /y_2015, 555361/569698
[2017-08-24 21:40:11.780707] INFO: transform: transformed /y_2016, 629999/641546
[2017-08-24 21:40:11.803440] INFO: transform: transformed rows: 1185360/1211244
[2017-08-24 21:40:11.830598] INFO: bigquant: transform.v2 end [6.341889s].
[2017-08-24 21:40:11.841701] INFO: bigquant: random_forest_predict.v1 start ..
[2017-08-24 21:40:18.091140] INFO: bigquant: random_forest_predict.v1 end [6.249434s].
[2017-08-24 21:40:51.838060] INFO: Performance: Simulated 488 trading days out of 488.
[2017-08-24 21:40:51.839273] INFO: Performance: first open: 2015-01-05 14:30:00+00:00
[2017-08-24 21:40:51.840425] INFO: Performance: last close: 2016-12-30 20:00:00+00:00
[注意] 有 2 笔卖出是在多天内完成的。当日卖出股票超过了当日股票交易的2.5%会出现这种情况。
  • 收益率194.1%
  • 年化收益率74.55%
  • 基准收益率-6.33%
  • 阿尔法0.77
  • 贝塔0.92
  • 夏普比率1.76
  • 收益波动率40.51%
  • 信息比率2.77
  • 最大回撤43.57%
[2017-08-24 21:40:53.797163] INFO: bigquant: backtest.v7 end [100.357593s].