TabNet: Attentive Interpretable Tabular Learning
基于Tabnet模型的量化选股方案。抽取了98个量价因子,2010到2018年为数据训练TabNet模型,并将模型的预测结果应用在2018到2021年9月的数据上进行了回测。
TabNet核心参数
# 本代码由可视化策略环境自动生成 2023年5月25日 18:13
# 本代码单元只能在可视化模式下编辑。您也可以拷贝代码,粘贴到新建的代码单元或者策略,然后修改。
# Python 代码入口函数,input_1/2/3 对应三个输入端,data_1/2/3 对应三个输出端
def m12_run_bigquant_run(input_1, input_2, input_3):
# 示例代码如下。在这里编写您的代码
from sklearn.model_selection import train_test_split
data = input_1.read()
x_train, x_val, y_train, y_val = train_test_split(data["x"], data['y'], random_state=2021)
data_1 = DataSource.write_pickle({'x': x_train, 'y': y_train.reshape(-1, 1)})
data_2 = DataSource.write_pickle({'x': x_val, 'y': y_val.reshape(-1, 1)})
return Outputs(data_1=data_1, data_2=data_2, data_3=None)
# 后处理函数,可选。输入是主函数的输出,可以在这里对数据做处理,或者返回更友好的outputs数据格式。此函数输出不会被缓存。
def m12_post_run_bigquant_run(outputs):
return outputs
# Python 代码入口函数,input_1/2/3 对应三个输入端,data_1/2/3 对应三个输出端
def m20_run_bigquant_run(input_1, input_2, input_3):
# 示例代码如下。在这里编写您的代码
pred_label = input_1.read_pickle()
df = input_2.read_df()
df = pd.DataFrame({'pred_label':pred_label[:,0], 'instrument':df.instrument, 'date':df.date})
df.sort_values(['date','pred_label'],inplace=True, ascending=[True,False])
return Outputs(data_1=DataSource.write_df(df), data_2=None, data_3=None)
# 后处理函数,可选。输入是主函数的输出,可以在这里对数据做处理,或者返回更友好的outputs数据格式。此函数输出不会被缓存。
def m20_post_run_bigquant_run(outputs):
return outputs
# 回测引擎:初始化函数,只执行一次
def m21_initialize_bigquant_run(context):
# 加载预测数据
context.ranker_prediction = context.options['data'].read_df()
# 系统已经设置了默认的交易手续费和滑点,要修改手续费可使用如下函数
context.set_commission(PerOrder(buy_cost=0.001, sell_cost=0.001, min_cost=5))
# 预测数据,通过options传入进来,使用 read_df 函数,加载到内存 (DataFrame)
# 设置买入的股票数量,这里买入预测股票列表排名靠前的5只
stock_count = 5
# 每只的股票的权重,如下的权重分配会使得靠前的股票分配多一点的资金,[0.339160, 0.213986, 0.169580, ..]
context.stock_weights = T.norm([1 / math.log(i + 2) for i in range(0, stock_count)])
# 设置每只股票占用的最大资金比例
context.max_cash_per_instrument = 0.2
context.options['hold_days'] = 5
# 回测引擎:每日数据处理函数,每天执行一次
def m21_handle_data_bigquant_run(context, data):
# 按日期过滤得到今日的预测数据
ranker_prediction = context.ranker_prediction[
context.ranker_prediction.date == data.current_dt.strftime('%Y-%m-%d')]
# 1. 资金分配
# 平均持仓时间是hold_days,每日都将买入股票,每日预期使用 1/hold_days 的资金
# 实际操作中,会存在一定的买入误差,所以在前hold_days天,等量使用资金;之后,尽量使用剩余资金(这里设置最多用等量的1.5倍)
is_staging = context.trading_day_index < context.options['hold_days'] # 是否在建仓期间(前 hold_days 天)
cash_avg = context.portfolio.portfolio_value / context.options['hold_days']
cash_for_buy = min(context.portfolio.cash, (1 if is_staging else 1.5) * cash_avg)
cash_for_sell = cash_avg - (context.portfolio.cash - cash_for_buy)
positions = {e.symbol: p.amount * p.last_sale_price
for e, p in context.perf_tracker.position_tracker.positions.items()}
# 2. 生成卖出订单:hold_days天之后才开始卖出;对持仓的股票,按机器学习算法预测的排序末位淘汰
if not is_staging and cash_for_sell > 0:
equities = {e.symbol: e for e, p in context.perf_tracker.position_tracker.positions.items()}
instruments = list(reversed(list(ranker_prediction.instrument[ranker_prediction.instrument.apply(
lambda x: x in equities and not context.has_unfinished_sell_order(equities[x]))])))
# print('rank order for sell %s' % instruments)
for instrument in instruments:
context.order_target(context.symbol(instrument), 0)
cash_for_sell -= positions[instrument]
if cash_for_sell <= 0:
break
# 3. 生成买入订单:按机器学习算法预测的排序,买入前面的stock_count只股票
buy_cash_weights = context.stock_weights
buy_instruments = list(ranker_prediction.instrument[:len(buy_cash_weights)])
max_cash_per_instrument = context.portfolio.portfolio_value * context.max_cash_per_instrument
for i, instrument in enumerate(buy_instruments):
cash = cash_for_buy * buy_cash_weights[i]
if cash > max_cash_per_instrument - positions.get(instrument, 0):
# 确保股票持仓量不会超过每次股票最大的占用资金量
cash = max_cash_per_instrument - positions.get(instrument, 0)
if cash > 0:
context.order_value(context.symbol(instrument), cash)
# 回测引擎:准备数据,只执行一次
def m21_prepare_bigquant_run(context):
pass
m1 = M.instruments.v2(
start_date='2018-01-01',
end_date='2021-12-31',
market='CN_STOCK_A',
instrument_list="""600009.SHA
600016.SHA
600028.SHA
600031.SHA
600085.SHA
600111.SHA
600276.SHA
600362.SHA
600383.SHA
600426.SHA
600519.SHA
600585.SHA
600600.SHA
600660.SHA
600837.SHA
601006.SHA
601328.SHA
601398.SHA
601601.SHA
601628.SHA
601668.SHA
601808.SHA
601857.SHA
601898.SHA
601939.SHA""",
max_count=0
)
m2 = M.advanced_auto_labeler.v2(
instruments=m1.data,
label_expr="""# #号开始的表示注释
# 0. 每行一个,顺序执行,从第二个开始,可以使用label字段
# 1. 可用数据字段见 https://bigquant.com/docs/data_history_data.html
# 添加benchmark_前缀,可使用对应的benchmark数据
# 2. 可用操作符和函数见 `表达式引擎 <https://bigquant.com/docs/big_expr.html>`_
# 计算收益:5日收盘价(作为卖出价格)除以明日开盘价(作为买入价格)
shift(close, -5) / shift(open, -1)-1
# 极值处理:用1%和99%分位的值做clip
clip(label, all_quantile(label, 0.01), all_quantile(label, 0.99))
# 过滤掉一字涨停的情况 (设置label为NaN,在后续处理和训练中会忽略NaN的label)
where(shift(high, -1) == shift(low, -1), NaN, label)
""",
start_date='',
end_date='',
benchmark='000300.SHA',
drop_na_label=True,
cast_label_int=False
)
m17 = M.standardlize.v8(
input_1=m2.data,
columns_input='label'
)
m3 = M.input_features.v1(
features="""return_5
return_10
return_20
avg_amount_0/avg_amount_5
avg_amount_5/avg_amount_20
rank_avg_amount_0/rank_avg_amount_5
rank_avg_amount_5/rank_avg_amount_10
rank_return_0
rank_return_5
rank_return_10
rank_return_0/rank_return_5
rank_return_5/rank_return_10
pe_ttm_0
# close_0
# open_0
# high_0
# low_0
# amount_0
# turn_0
# return_0
# close_1
# open_1
# high_1
# low_1
# return_1
# amount_1
# turn_1
# close_2
# open_2
# high_2
# low_2
# amount_2
# turn_2
# return_2
# close_3
# open_3
# high_3
# low_3
# amount_3
# turn_3
# return_3
# close_4
# open_4
# high_4
# low_4
# amount_4
# turn_4
# return_4
# mean(close_0, 5)
# mean(low_0, 5)
# mean(open_0, 5)
# mean(high_0, 5)
# mean(turn_0, 5)
# mean(amount_0, 5)
# mean(return_0, 5)
# # ts_max(close_0, 5)
# # ts_max(low_0, 5)
# # ts_max(open_0, 5)
# # ts_max(high_0, 5)
# # ts_max(turn_0, 5)
# # ts_max(amount_0, 5)
# # ts_max(return_0, 5)
# # ts_min(close_0, 5)
# # ts_min(low_0, 5)
# # ts_min(open_0, 5)
# # ts_min(high_0, 5)
# # ts_min(turn_0, 5)
# # ts_min(amount_0, 5)
# # ts_min(return_0, 5)
# std(close_0, 5)
# std(low_0, 5)
# std(open_0, 5)
# std(high_0, 5)
# std(turn_0, 5)
# std(amount_0, 5)
# std(return_0, 5)
# ts_rank(close_0, 5)
# ts_rank(low_0, 5)
# ts_rank(open_0, 5)
# ts_rank(high_0, 5)
# ts_rank(turn_0, 5)
# ts_rank(amount_0, 5)
# ts_rank(return_0, 5)
# decay_linear(close_0, 5)
# decay_linear(low_0, 5)
# decay_linear(open_0, 5)
# decay_linear(high_0, 5)
# decay_linear(turn_0, 5)
# decay_linear(amount_0, 5)
# decay_linear(return_0, 5)
# correlation(volume_0, return_0, 5)
# correlation(volume_0, high_0, 5)
# correlation(volume_0, low_0, 5)
# correlation(volume_0, close_0, 5)
# correlation(volume_0, open_0, 5)
# correlation(volume_0, turn_0, 5)
# # correlation(return_0, high_0, 5)
# # correlation(return_0, low_0, 5)
# # correlation(return_0, close_0, 5)
# # correlation(return_0, open_0, 5)
# # correlation(return_0, turn_0, 5)
# # correlation(high_0, low_0, 5)
# # correlation(high_0, close_0, 5)
# # correlation(high_0, open_0, 5)
# # correlation(high_0, turn_0, 5)
# # correlation(low_0, close_0, 5)
# # correlation(low_0, open_0, 5)
# # correlation(low_0, turn_0, 5)
# # correlation(close_0, open_0, 5)
# # correlation(close_0, turn_0, 5)
# # correlation(open_0, turn_0, 5)"""
)
m6 = M.general_feature_extractor.v7(
instruments=m1.data,
features=m3.data,
start_date='',
end_date='',
before_start_days=10
)
m7 = M.derived_feature_extractor.v3(
input_data=m6.data,
features=m3.data,
date_col='date',
instrument_col='instrument',
drop_na=True,
remove_extra_columns=False
)
m14 = M.fillnan.v1(
input_data=m7.data,
features=m3.data,
fill_value='0.0'
)
m4 = M.join.v3(
data1=m17.data,
data2=m14.data,
on='date,instrument',
how='inner',
sort=False
)
m10 = M.dl_convert_to_bin.v2(
input_data=m4.data,
features=m3.data,
window_size=1,
feature_clip=3,
flatten=True,
window_along_col='instrument'
)
m12 = M.cached.v3(
input_1=m10.data,
run=m12_run_bigquant_run,
post_run=m12_post_run_bigquant_run,
input_ports='',
params='{}',
output_ports=''
)
m18 = M.dl_models_tabnet_train.v1(
training_data=m12.data_1,
validation_data=m12.data_2,
input_dim=13,
n_steps=3,
n_d=32,
n_a=32,
gamma=1.3,
momentum=0.02,
batch_size=5120,
virtual_batch_size=512,
epochs=100,
num_workers=4,
device_name='auto:自动调用GPU',
verbose='1:输出进度条记录'
)
m5 = M.instruments.v2(
start_date='2022-01-01',
end_date='2023-04-30',
market='CN_STOCK_A',
instrument_list="""600009.SHA
600016.SHA
600028.SHA
600031.SHA
600085.SHA
600111.SHA
600276.SHA
600362.SHA
600383.SHA
600426.SHA
600519.SHA
600585.SHA
600600.SHA
600660.SHA
600837.SHA
601006.SHA
601328.SHA
601398.SHA
601601.SHA
601628.SHA
601668.SHA
601808.SHA
601857.SHA
601898.SHA
601939.SHA""",
max_count=0
)
m8 = M.general_feature_extractor.v7(
instruments=m5.data,
features=m3.data,
start_date='',
end_date='',
before_start_days=10
)
m9 = M.derived_feature_extractor.v3(
input_data=m8.data,
features=m3.data,
date_col='date',
instrument_col='instrument',
drop_na=True,
remove_extra_columns=False
)
m15 = M.fillnan.v1(
input_data=m9.data,
features=m3.data,
fill_value='0.0'
)
m11 = M.dl_convert_to_bin.v2(
input_data=m15.data,
features=m3.data,
window_size=1,
feature_clip=3,
flatten=True,
window_along_col='instrument'
)
m19 = M.dl_models_tabnet_predict.v1(
trained_model=m18.data,
input_data=m11.data,
m_cached=False
)
m20 = M.cached.v3(
input_1=m19.data,
input_2=m9.data,
run=m20_run_bigquant_run,
post_run=m20_post_run_bigquant_run,
input_ports='',
params='{}',
output_ports=''
)
m16 = M.concat.v3(
input_data_1=m4.data,
input_data_2=m15.data
)
m21 = M.trade.v4(
instruments=m5.data,
options_data=m20.data_1,
start_date='',
end_date='',
initialize=m21_initialize_bigquant_run,
handle_data=m21_handle_data_bigquant_run,
prepare=m21_prepare_bigquant_run,
volume_limit=0.025,
order_price_field_buy='open',
order_price_field_sell='close',
capital_base=1000000,
auto_cancel_non_tradable_orders=True,
data_frequency='daily',
price_type='后复权',
product_type='股票',
plot_charts=True,
backtest_only=False,
benchmark='000300.SHA'
)
m22 = M.strategy_turn_analysis.v1(
raw_perf=m21.raw_perf
)
[2023-05-25 18:11:33.619019] INFO: moduleinvoker: instruments.v2 开始运行..
[2023-05-25 18:11:33.626420] INFO: moduleinvoker: 命中缓存
[2023-05-25 18:11:33.628839] INFO: moduleinvoker: instruments.v2 运行完成[0.009829s].
[2023-05-25 18:11:33.638936] INFO: moduleinvoker: advanced_auto_labeler.v2 开始运行..
[2023-05-25 18:11:33.645926] INFO: moduleinvoker: 命中缓存
[2023-05-25 18:11:33.648096] INFO: moduleinvoker: advanced_auto_labeler.v2 运行完成[0.009164s].
[2023-05-25 18:11:33.654339] INFO: moduleinvoker: standardlize.v8 开始运行..
[2023-05-25 18:11:33.660708] INFO: moduleinvoker: 命中缓存
[2023-05-25 18:11:33.662381] INFO: moduleinvoker: standardlize.v8 运行完成[0.008041s].
[2023-05-25 18:11:33.667475] INFO: moduleinvoker: input_features.v1 开始运行..
[2023-05-25 18:11:33.673955] INFO: moduleinvoker: 命中缓存
[2023-05-25 18:11:33.675610] INFO: moduleinvoker: input_features.v1 运行完成[0.00815s].
[2023-05-25 18:11:33.708479] INFO: moduleinvoker: general_feature_extractor.v7 开始运行..
[2023-05-25 18:11:33.715767] INFO: moduleinvoker: 命中缓存
[2023-05-25 18:11:33.717692] INFO: moduleinvoker: general_feature_extractor.v7 运行完成[0.009241s].
[2023-05-25 18:11:33.728504] INFO: moduleinvoker: derived_feature_extractor.v3 开始运行..
[2023-05-25 18:11:33.734589] INFO: moduleinvoker: 命中缓存
[2023-05-25 18:11:33.736361] INFO: moduleinvoker: derived_feature_extractor.v3 运行完成[0.007865s].
[2023-05-25 18:11:33.750881] INFO: moduleinvoker: fillnan.v1 开始运行..
[2023-05-25 18:11:33.757655] INFO: moduleinvoker: 命中缓存
[2023-05-25 18:11:33.760127] INFO: moduleinvoker: fillnan.v1 运行完成[0.009256s].
[2023-05-25 18:11:33.771732] INFO: moduleinvoker: join.v3 开始运行..
[2023-05-25 18:11:33.779289] INFO: moduleinvoker: 命中缓存
[2023-05-25 18:11:33.783388] INFO: moduleinvoker: join.v3 运行完成[0.011654s].
[2023-05-25 18:11:33.813559] INFO: moduleinvoker: dl_convert_to_bin.v2 开始运行..
[2023-05-25 18:11:33.821691] INFO: moduleinvoker: 命中缓存
[2023-05-25 18:11:33.823784] INFO: moduleinvoker: dl_convert_to_bin.v2 运行完成[0.01027s].
[2023-05-25 18:11:33.838020] INFO: moduleinvoker: cached.v3 开始运行..
[2023-05-25 18:11:33.847611] INFO: moduleinvoker: 命中缓存
[2023-05-25 18:11:33.849586] INFO: moduleinvoker: cached.v3 运行完成[0.01159s].
[2023-05-25 18:11:33.857096] INFO: moduleinvoker: dl_models_tabnet_train.v1 开始运行..
[2023-05-25 18:11:33.869168] INFO: moduleinvoker: 命中缓存
[2023-05-25 18:11:33.871360] INFO: moduleinvoker: dl_models_tabnet_train.v1 运行完成[0.014273s].
[2023-05-25 18:11:33.878817] INFO: moduleinvoker: instruments.v2 开始运行..
[2023-05-25 18:11:33.886538] INFO: moduleinvoker: 命中缓存
[2023-05-25 18:11:33.889717] INFO: moduleinvoker: instruments.v2 运行完成[0.0109s].
[2023-05-25 18:11:33.921803] INFO: moduleinvoker: general_feature_extractor.v7 开始运行..
[2023-05-25 18:11:33.928306] INFO: moduleinvoker: 命中缓存
[2023-05-25 18:11:33.930307] INFO: moduleinvoker: general_feature_extractor.v7 运行完成[0.008532s].
[2023-05-25 18:11:33.940892] INFO: moduleinvoker: derived_feature_extractor.v3 开始运行..
[2023-05-25 18:11:33.948768] INFO: moduleinvoker: 命中缓存
[2023-05-25 18:11:33.952692] INFO: moduleinvoker: derived_feature_extractor.v3 运行完成[0.011785s].
[2023-05-25 18:11:33.967532] INFO: moduleinvoker: fillnan.v1 开始运行..
[2023-05-25 18:11:33.978332] INFO: moduleinvoker: 命中缓存
[2023-05-25 18:11:33.981043] INFO: moduleinvoker: fillnan.v1 运行完成[0.013525s].
[2023-05-25 18:11:34.015795] INFO: moduleinvoker: dl_convert_to_bin.v2 开始运行..
[2023-05-25 18:11:34.024276] INFO: moduleinvoker: 命中缓存
[2023-05-25 18:11:34.026131] INFO: moduleinvoker: dl_convert_to_bin.v2 运行完成[0.010384s].
[2023-05-25 18:11:34.038772] INFO: moduleinvoker: dl_models_tabnet_predict.v1 开始运行..
[2023-05-25 18:11:34.116666] INFO: dl_models_tabnet_pred: 模型预测,样本个数:8190
[2023-05-25 18:11:34.490593] INFO: moduleinvoker: dl_models_tabnet_predict.v1 运行完成[0.45181s].
[2023-05-25 18:11:34.507528] INFO: moduleinvoker: cached.v3 开始运行..
[2023-05-25 18:11:34.792254] INFO: moduleinvoker: cached.v3 运行完成[0.284726s].
[2023-05-25 18:11:34.855621] INFO: moduleinvoker: backtest.v8 开始运行..
[2023-05-25 18:11:34.862019] INFO: backtest: biglearning backtest:V8.6.3
[2023-05-25 18:11:34.863693] INFO: backtest: product_type:stock by specified
[2023-05-25 18:11:34.951864] INFO: moduleinvoker: cached.v2 开始运行..
[2023-05-25 18:11:34.960645] INFO: moduleinvoker: 命中缓存
[2023-05-25 18:11:34.962443] INFO: moduleinvoker: cached.v2 运行完成[0.010594s].
[2023-05-25 18:11:36.780974] INFO: backtest: algo history_data=DataSource(f873e34a75514293b994c3341f52b948T)
[2023-05-25 18:11:36.783122] INFO: algo: TradingAlgorithm V1.8.9
[2023-05-25 18:11:37.211371] INFO: algo: trading transform...
[2023-05-25 18:11:42.918116] INFO: Performance: Simulated 320 trading days out of 320.
[2023-05-25 18:11:42.920620] INFO: Performance: first open: 2022-01-04 09:30:00+00:00
[2023-05-25 18:11:42.923275] INFO: Performance: last close: 2023-04-28 15:00:00+00:00
[2023-05-25 18:11:51.709320] INFO: moduleinvoker: backtest.v8 运行完成[16.853684s].
[2023-05-25 18:11:51.713940] INFO: moduleinvoker: trade.v4 运行完成[16.909584s].
# 输出predict
predict_df = m20.data_1.read()
predict_df.head()
predict_df.to_csv("tabnet_predict.csv")