1
TabNet: Attentive Interpretable Tabular Learning
基于Tabnet模型的量化选股方案。抽取了98个量价因子,2010到2018年为数据训练TabNet模型,并将模型的预测结果应用在2018到2021年9月的数据上进行了回测。
TabNet核心参数
# 本代码由可视化策略环境自动生成 2022年8月22日 13:31
# 本代码单元只能在可视化模式下编辑。您也可以拷贝代码,粘贴到新建的代码单元或者策略,然后修改。
# Python 代码入口函数,input_1/2/3 对应三个输入端,data_1/2/3 对应三个输出端
def m12_run_bigquant_run(input_1, input_2, input_3):
# 示例代码如下。在这里编写您的代码
from sklearn.model_selection import train_test_split
data = input_1.read()
x_train, x_val, y_train, y_val = train_test_split(data["x"], data['y'], shuffle=False, random_state=2021)
data_1 = DataSource.write_pickle({'x': x_train, 'y': y_train.reshape(-1, 1)})
data_2 = DataSource.write_pickle({'x': x_val, 'y': y_val.reshape(-1, 1)})
return Outputs(data_1=data_1, data_2=data_2, data_3=None)
# 后处理函数,可选。输入是主函数的输出,可以在这里对数据做处理,或者返回更友好的outputs数据格式。此函数输出不会被缓存。
def m12_post_run_bigquant_run(outputs):
return outputs
# Python 代码入口函数,input_1/2/3 对应三个输入端,data_1/2/3 对应三个输出端
def m20_run_bigquant_run(input_1, input_2, input_3):
# 示例代码如下。在这里编写您的代码
pred_label = input_1.read_pickle()
df = input_2.read_df()
df = pd.DataFrame({'pred_label':pred_label[:,0], 'instrument':df.instrument, 'date':df.date})
df.sort_values(['date','pred_label'],inplace=True, ascending=[True,False])
return Outputs(data_1=DataSource.write_df(df), data_2=None, data_3=None)
# 后处理函数,可选。输入是主函数的输出,可以在这里对数据做处理,或者返回更友好的outputs数据格式。此函数输出不会被缓存。
def m20_post_run_bigquant_run(outputs):
return outputs
# 回测引擎:初始化函数,只执行一次
def m21_initialize_bigquant_run(context):
# 加载预测数据
context.ranker_prediction = context.options['data'].read_df()
# 系统已经设置了默认的交易手续费和滑点,要修改手续费可使用如下函数
context.set_commission(PerOrder(buy_cost=0.001, sell_cost=0.001, min_cost=5))
# 预测数据,通过options传入进来,使用 read_df 函数,加载到内存 (DataFrame)
# 设置买入的股票数量,这里买入预测股票列表排名靠前的5只
stock_count = 2
# 每只的股票的权重,如下的权重分配会使得靠前的股票分配多一点的资金,[0.339160, 0.213986, 0.169580, ..]
context.stock_weights = T.norm([1 / math.log(i + 2) for i in range(0, stock_count)])
# 设置每只股票占用的最大资金比例
context.max_cash_per_instrument = 0.2
context.options['hold_days'] = 5
# 回测引擎:每日数据处理函数,每天执行一次
def m21_handle_data_bigquant_run(context, data):
# 按日期过滤得到今日的预测数据
ranker_prediction = context.ranker_prediction[
context.ranker_prediction.date == data.current_dt.strftime('%Y-%m-%d')]
# 1. 资金分配
# 平均持仓时间是hold_days,每日都将买入股票,每日预期使用 1/hold_days 的资金
# 实际操作中,会存在一定的买入误差,所以在前hold_days天,等量使用资金;之后,尽量使用剩余资金(这里设置最多用等量的1.5倍)
is_staging = context.trading_day_index < context.options['hold_days'] # 是否在建仓期间(前 hold_days 天)
cash_avg = context.portfolio.portfolio_value / context.options['hold_days']
cash_for_buy = min(context.portfolio.cash, (1 if is_staging else 1.5) * cash_avg)
cash_for_sell = cash_avg - (context.portfolio.cash - cash_for_buy)
positions = {e.symbol: p.amount * p.last_sale_price
for e, p in context.perf_tracker.position_tracker.positions.items()}
# 2. 生成卖出订单:hold_days天之后才开始卖出;对持仓的股票,按机器学习算法预测的排序末位淘汰
if not is_staging and cash_for_sell > 0:
equities = {e.symbol: e for e, p in context.perf_tracker.position_tracker.positions.items()}
instruments = list(reversed(list(ranker_prediction.instrument[ranker_prediction.instrument.apply(
lambda x: x in equities and not context.has_unfinished_sell_order(equities[x]))])))
# print('rank order for sell %s' % instruments)
for instrument in instruments:
context.order_target(context.symbol(instrument), 0)
cash_for_sell -= positions[instrument]
if cash_for_sell <= 0:
break
# 3. 生成买入订单:按机器学习算法预测的排序,买入前面的stock_count只股票
buy_cash_weights = context.stock_weights
buy_instruments = list(ranker_prediction.instrument[:len(buy_cash_weights)])
max_cash_per_instrument = context.portfolio.portfolio_value * context.max_cash_per_instrument
for i, instrument in enumerate(buy_instruments):
cash = cash_for_buy * buy_cash_weights[i]
if cash > max_cash_per_instrument - positions.get(instrument, 0):
# 确保股票持仓量不会超过每次股票最大的占用资金量
cash = max_cash_per_instrument - positions.get(instrument, 0)
if cash > 0:
context.order_value(context.symbol(instrument), cash)
# 回测引擎:准备数据,只执行一次
def m21_prepare_bigquant_run(context):
pass
g = T.Graph({
'm1': 'M.instruments.v2',
'm1.start_date': '2017-01-01',
'm1.end_date': '2019-12-31',
'm1.market': 'CN_STOCK_A',
'm1.instrument_list': '',
'm1.max_count': 0,
'm2': 'M.advanced_auto_labeler.v2',
'm2.instruments': T.Graph.OutputPort('m1.data'),
'm2.label_expr': """# #号开始的表示注释
# 0. 每行一个,顺序执行,从第二个开始,可以使用label字段
# 1. 可用数据字段见 https://bigquant.com/docs/data_history_data.html
# 添加benchmark_前缀,可使用对应的benchmark数据
# 2. 可用操作符和函数见 `表达式引擎 <https://bigquant.com/docs/big_expr.html>`_
# 计算收益:5日收盘价(作为卖出价格)除以明日开盘价(作为买入价格)
shift(close, -5) / shift(open, -1)-1
# 极值处理:用1%和99%分位的值做clip
clip(label, all_quantile(label, 0.01), all_quantile(label, 0.99))
# 过滤掉一字涨停的情况 (设置label为NaN,在后续处理和训练中会忽略NaN的label)
where(shift(high, -1) == shift(low, -1), NaN, label)
""",
'm2.start_date': '',
'm2.end_date': '',
'm2.benchmark': '000300.SHA',
'm2.drop_na_label': True,
'm2.cast_label_int': False,
'm17': 'M.standardlize.v8',
'm17.input_1': T.Graph.OutputPort('m2.data'),
'm17.columns_input': 'label',
'm3': 'M.input_features.v1',
'm3.features': """close_0
open_0
high_0
low_0
amount_0
turn_0
return_0
close_1
open_1
high_1
low_1
return_1
amount_1
turn_1
close_2
open_2
high_2
low_2
amount_2
turn_2
return_2
close_3
open_3
high_3
low_3
amount_3
turn_3
return_3
close_4
open_4
high_4
low_4
amount_4
turn_4
return_4
mean(close_0, 5)
mean(low_0, 5)
mean(open_0, 5)
mean(high_0, 5)
mean(turn_0, 5)
mean(amount_0, 5)
mean(return_0, 5)
ts_max(close_0, 5)
ts_max(low_0, 5)
ts_max(open_0, 5)
ts_max(high_0, 5)
ts_max(turn_0, 5)
ts_max(amount_0, 5)
ts_max(return_0, 5)
ts_min(close_0, 5)
ts_min(low_0, 5)
ts_min(open_0, 5)
ts_min(high_0, 5)
ts_min(turn_0, 5)
ts_min(amount_0, 5)
ts_min(return_0, 5)
std(close_0, 5)
std(low_0, 5)
std(open_0, 5)
std(high_0, 5)
std(turn_0, 5)
std(amount_0, 5)
std(return_0, 5)
ts_rank(close_0, 5)
ts_rank(low_0, 5)
ts_rank(open_0, 5)
ts_rank(high_0, 5)
ts_rank(turn_0, 5)
ts_rank(amount_0, 5)
ts_rank(return_0, 5)
decay_linear(close_0, 5)
decay_linear(low_0, 5)
decay_linear(open_0, 5)
decay_linear(high_0, 5)
decay_linear(turn_0, 5)
decay_linear(amount_0, 5)
decay_linear(return_0, 5)
correlation(volume_0, return_0, 5)
correlation(volume_0, high_0, 5)
correlation(volume_0, low_0, 5)
correlation(volume_0, close_0, 5)
correlation(volume_0, open_0, 5)
correlation(volume_0, turn_0, 5)
correlation(return_0, high_0, 5)
correlation(return_0, low_0, 5)
correlation(return_0, close_0, 5)
correlation(return_0, open_0, 5)
correlation(return_0, turn_0, 5)
correlation(high_0, low_0, 5)
correlation(high_0, close_0, 5)
correlation(high_0, open_0, 5)
correlation(high_0, turn_0, 5)
correlation(low_0, close_0, 5)
correlation(low_0, open_0, 5)
correlation(low_0, turn_0, 5)
correlation(close_0, open_0, 5)
correlation(close_0, turn_0, 5)
correlation(open_0, turn_0, 5)""",
'm6': 'M.general_feature_extractor.v7',
'm6.instruments': T.Graph.OutputPort('m1.data'),
'm6.features': T.Graph.OutputPort('m3.data'),
'm6.start_date': '',
'm6.end_date': '',
'm6.before_start_days': 10,
'm7': 'M.derived_feature_extractor.v3',
'm7.input_data': T.Graph.OutputPort('m6.data'),
'm7.features': T.Graph.OutputPort('m3.data'),
'm7.date_col': 'date',
'm7.instrument_col': 'instrument',
'm7.drop_na': True,
'm7.remove_extra_columns': False,
'm13': 'M.standardlize.v8',
'm13.input_1': T.Graph.OutputPort('m7.data'),
'm13.input_2': T.Graph.OutputPort('m3.data'),
'm13.columns_input': '[]',
'm14': 'M.fillnan.v1',
'm14.input_data': T.Graph.OutputPort('m13.data'),
'm14.features': T.Graph.OutputPort('m3.data'),
'm14.fill_value': '0.0',
'm4': 'M.join.v3',
'm4.data1': T.Graph.OutputPort('m17.data'),
'm4.data2': T.Graph.OutputPort('m14.data'),
'm4.on': 'date,instrument',
'm4.how': 'inner',
'm4.sort': False,
'm10': 'M.dl_convert_to_bin.v2',
'm10.input_data': T.Graph.OutputPort('m4.data'),
'm10.features': T.Graph.OutputPort('m3.data'),
'm10.window_size': 1,
'm10.feature_clip': 3,
'm10.flatten': True,
'm10.window_along_col': 'instrument',
'm12': 'M.cached.v3',
'm12.input_1': T.Graph.OutputPort('m10.data'),
'm12.run': m12_run_bigquant_run,
'm12.post_run': m12_post_run_bigquant_run,
'm12.input_ports': '',
'm12.params': '{}',
'm12.output_ports': '',
'm18': 'M.dl_models_tabnet_train.v1',
'm18.training_data': T.Graph.OutputPort('m12.data_1'),
'm18.validation_data': T.Graph.OutputPort('m12.data_2'),
'm18.input_dim': 98,
'm18.n_steps': 3,
'm18.n_d': 16,
'm18.n_a': 16,
'm18.gamma': 1.3,
'm18.momentum': 0.01,
'm18.batch_size': 20480,
'm18.virtual_batch_size': 1280,
'm18.epochs': 30,
'm18.num_workers': 4,
'm18.device_name': 'auto:自动调用GPU',
'm18.verbose': '1:输出进度条记录',
'm5': 'M.instruments.v2',
'm5.start_date': '2020-01-01',
'm5.end_date': '2022-08-22',
'm5.market': 'CN_STOCK_A',
'm5.instrument_list': """
603067.SHA
002579.SZA
600077.SHA
603366.SHA
002723.SZA
603667.SHA
002077.SZA
603398.SHA
002139.SZA
600775.SHA
600783.SHA
600749.SHA
600750.SHA
600782.SHA
600751.SHA
600753.SHA
600754.SHA
600755.SHA
600756.SHA
600757.SHA
600011.SHA
000698.SZA
603680.SHA
002128.SZA
600021.SHA
000593.SZA
002395.SZA
600778.SHA
603956.SHA
002414.SZA
002877.SZA
002527.SZA
002209.SZA
002425.SZA
000065.SZA
002861.SZA
002431.SZA
600818.SHA
600027.SHA
603668.SHA
""",
'm5.max_count': 0,
'm8': 'M.general_feature_extractor.v7',
'm8.instruments': T.Graph.OutputPort('m5.data'),
'm8.features': T.Graph.OutputPort('m3.data'),
'm8.start_date': '',
'm8.end_date': '',
'm8.before_start_days': 10,
'm9': 'M.derived_feature_extractor.v3',
'm9.input_data': T.Graph.OutputPort('m8.data'),
'm9.features': T.Graph.OutputPort('m3.data'),
'm9.date_col': 'date',
'm9.instrument_col': 'instrument',
'm9.drop_na': True,
'm9.remove_extra_columns': False,
'm16': 'M.standardlize.v8',
'm16.input_1': T.Graph.OutputPort('m9.data'),
'm16.input_2': T.Graph.OutputPort('m3.data'),
'm16.columns_input': '[]',
'm15': 'M.fillnan.v1',
'm15.input_data': T.Graph.OutputPort('m16.data'),
'm15.features': T.Graph.OutputPort('m3.data'),
'm15.fill_value': '0.0',
'm11': 'M.dl_convert_to_bin.v2',
'm11.input_data': T.Graph.OutputPort('m15.data'),
'm11.features': T.Graph.OutputPort('m3.data'),
'm11.window_size': 1,
'm11.feature_clip': 3,
'm11.flatten': True,
'm11.window_along_col': 'instrument',
'm19': 'M.dl_models_tabnet_predict.v1',
'm19.trained_model': T.Graph.OutputPort('m18.data'),
'm19.input_data': T.Graph.OutputPort('m11.data'),
'm19.m_cached': False,
'm20': 'M.cached.v3',
'm20.input_1': T.Graph.OutputPort('m19.data'),
'm20.input_2': T.Graph.OutputPort('m9.data'),
'm20.run': m20_run_bigquant_run,
'm20.post_run': m20_post_run_bigquant_run,
'm20.input_ports': '',
'm20.params': '{}',
'm20.output_ports': '',
'm21': 'M.trade.v4',
'm21.instruments': T.Graph.OutputPort('m5.data'),
'm21.options_data': T.Graph.OutputPort('m20.data_1'),
'm21.start_date': '',
'm21.end_date': '',
'm21.initialize': m21_initialize_bigquant_run,
'm21.handle_data': m21_handle_data_bigquant_run,
'm21.prepare': m21_prepare_bigquant_run,
'm21.volume_limit': 0.025,
'm21.order_price_field_buy': 'open',
'm21.order_price_field_sell': 'close',
'm21.capital_base': 1000000,
'm21.auto_cancel_non_tradable_orders': True,
'm21.data_frequency': 'daily',
'm21.price_type': '后复权',
'm21.product_type': '股票',
'm21.plot_charts': True,
'm21.backtest_only': False,
'm21.benchmark': '000300.SHA',
})
# g.run({})
def m23_run_bigquant_run(
bq_graph,
inputs,
trading_days_market='CN', # 使用那个市场的交易日历, TODO
train_instruments_mid='m1', # 训练数据 证券代码列表 模块id
test_instruments_mid='m5', # 测试数据 证券代码列表 模块id
predict_mid='m20', # 预测 模块id
trade_mid='m21', # 回测 模块id
start_date='2014-01-01', # 数据开始日期
end_date=T.live_run_param('trading_date', '2021-10-08'), # 数据结束日期
train_update_days=250, # 更新周期,按交易日计算,每多少天更新一次
train_update_days_for_live=None, #模拟实盘模式下的更新周期,按交易日计算,每多少天更新一次。如果需要在模拟实盘阶段使用不同的模型更新周期,可以设置这个参数
train_data_min_days=250, # 最小数据天数,按交易日计算,所以第一个滚动的结束日期是 从开始日期到开始日期+最小数据天数
train_data_max_days=750, # 最大数据天数,按交易日计算,0,表示没有限制,否则每一个滚动的开始日期=max(此滚动的结束日期-最大数据天数, 开始日期
rolling_count_for_live=1, #实盘模式下滚动次数,模拟实盘模式下,取最后多少次滚动。一般在模拟实盘模式下,只用到最后一次滚动训练的模型,这里可以设置为1;如果你的滚动训练数据时间段很短,以至于期间可能没有训练数据,这里可以设置大一点。0表示没有限制
):
def merge_datasources(input_1):
df_list = [ds[0].read_df().set_index('date').loc[ds[1]:].reset_index() for ds in input_1]
df = pd.concat(df_list)
instrument_data = {
'start_date': df['date'].min().strftime('%Y-%m-%d'),
'end_date': df['date'].max().strftime('%Y-%m-%d'),
'instruments': list(set(df['instrument'])),
}
return Outputs(data=DataSource.write_df(df), instrument_data=DataSource.write_pickle(instrument_data))
def gen_rolling_dates(trading_days_market, start_date, end_date, train_update_days, train_update_days_for_live, train_data_min_days, train_data_max_days, rolling_count_for_live):
# 是否实盘模式
tdays = list(D.trading_days(market=trading_days_market, start_date=start_date, end_date=end_date)['date'])
is_live_run = T.live_run_param('trading_date', None) is not None
if is_live_run and train_update_days_for_live:
train_update_days = train_update_days_for_live
rollings = []
train_end_date = train_data_min_days
while train_end_date < len(tdays):
if train_data_max_days is not None and train_data_max_days > 0:
train_start_date = max(train_end_date - train_data_max_days, 0)
else:
train_start_date = 0
rollings.append({
'train_start_date': tdays[train_start_date].strftime('%Y-%m-%d'),
'train_end_date': tdays[train_end_date - 1].strftime('%Y-%m-%d'),
'test_start_date': tdays[train_end_date].strftime('%Y-%m-%d'),
'test_end_date': tdays[min(train_end_date + train_update_days, len(tdays)) - 1].strftime('%Y-%m-%d'),
})
train_end_date += train_update_days
if not rollings:
raise Exception('没有滚动需要执行,请检查配置')
if is_live_run and rolling_count_for_live:
rollings = rollings[-rolling_count_for_live:]
return rollings
g = bq_graph
rolling_dates = gen_rolling_dates(
trading_days_market, start_date, end_date, train_update_days, train_update_days_for_live, train_data_min_days, train_data_max_days, rolling_count_for_live)
print('=========:', len(rolling_dates), rolling_dates)
# 训练和预测
results = []
for rolling in rolling_dates:
parameters = {}
# 先禁用回测
parameters[trade_mid + '.__enabled__'] = False
parameters[train_instruments_mid + '.start_date'] = rolling['train_start_date']
parameters[train_instruments_mid + '.end_date'] = rolling['train_end_date']
parameters[test_instruments_mid + '.start_date'] = rolling['test_start_date']
parameters[test_instruments_mid + '.end_date'] = rolling['test_end_date']
# print('------ rolling_train:', parameters)
results.append(g.run(parameters))
print('++++++++:', len( results), results)
# 合并预测结果并回测
mx = M.cached.v3(run=merge_datasources, input_1=[[result[predict_mid].data_1, result[test_instruments_mid].data.read_pickle()['start_date']] for result in results])
parameters = {}
parameters['*.__enabled__'] = False
parameters[trade_mid + '.__enabled__'] = True
parameters[trade_mid + '.instruments'] = mx.instrument_data
parameters[trade_mid + '.options_data'] = mx.data
trade = g.run(parameters)
return {'rollings': results, 'trade': trade}
m23 = M.hyper_rolling_train.v1(
run=m23_run_bigquant_run,
run_now=True,
bq_graph=g
)
[2022-08-22 13:29:11.710013] INFO: moduleinvoker: instruments.v2 开始运行..
[2022-08-22 13:29:11.729283] INFO: moduleinvoker: 命中缓存
[2022-08-22 13:29:11.731359] INFO: moduleinvoker: instruments.v2 运行完成[0.021355s].
[2022-08-22 13:29:11.737560] INFO: moduleinvoker: input_features.v1 开始运行..
[2022-08-22 13:29:11.750446] INFO: moduleinvoker: 命中缓存
[2022-08-22 13:29:11.752404] INFO: moduleinvoker: input_features.v1 运行完成[0.014841s].
[2022-08-22 13:29:11.758706] INFO: moduleinvoker: instruments.v2 开始运行..
[2022-08-22 13:29:11.815704] INFO: moduleinvoker: instruments.v2 运行完成[0.056985s].
[2022-08-22 13:29:11.826464] INFO: moduleinvoker: advanced_auto_labeler.v2 开始运行..
[2022-08-22 13:29:11.834367] INFO: moduleinvoker: 命中缓存
[2022-08-22 13:29:11.837166] INFO: moduleinvoker: advanced_auto_labeler.v2 运行完成[0.010701s].
[2022-08-22 13:29:11.857050] INFO: moduleinvoker: general_feature_extractor.v7 开始运行..
[2022-08-22 13:29:11.888082] INFO: moduleinvoker: 命中缓存
[2022-08-22 13:29:11.891294] INFO: moduleinvoker: general_feature_extractor.v7 运行完成[0.034221s].
[2022-08-22 13:29:11.908580] INFO: moduleinvoker: general_feature_extractor.v7 开始运行..
[2022-08-22 13:29:13.084010] INFO: 基础特征抽取: 年份 2015, 特征行数=6747
[2022-08-22 13:29:14.230982] INFO: 基础特征抽取: 年份 2016, 特征行数=324
[2022-08-22 13:29:14.309961] INFO: 基础特征抽取: 总行数: 7071
[2022-08-22 13:29:14.315384] INFO: moduleinvoker: general_feature_extractor.v7 运行完成[2.406818s].
[2022-08-22 13:29:14.326116] INFO: moduleinvoker: standardlize.v8 开始运行..
[2022-08-22 13:29:14.341411] INFO: moduleinvoker: 命中缓存
[2022-08-22 13:29:14.343297] INFO: moduleinvoker: standardlize.v8 运行完成[0.017192s].
[2022-08-22 13:29:14.353209] INFO: moduleinvoker: derived_feature_extractor.v3 开始运行..
[2022-08-22 13:29:14.363718] INFO: moduleinvoker: 命中缓存
[2022-08-22 13:29:14.365592] INFO: moduleinvoker: derived_feature_extractor.v3 运行完成[0.012384s].
[2022-08-22 13:29:14.381716] INFO: moduleinvoker: derived_feature_extractor.v3 开始运行..
[2022-08-22 13:29:14.507576] INFO: derived_feature_extractor: 提取完成 mean(close_0, 5), 0.010s
[2022-08-22 13:29:14.520922] INFO: derived_feature_extractor: 提取完成 mean(low_0, 5), 0.011s
[2022-08-22 13:29:14.534737] INFO: derived_feature_extractor: 提取完成 mean(open_0, 5), 0.012s
[2022-08-22 13:29:14.545486] INFO: derived_feature_extractor: 提取完成 mean(high_0, 5), 0.009s
[2022-08-22 13:29:14.558335] INFO: derived_feature_extractor: 提取完成 mean(turn_0, 5), 0.011s
[2022-08-22 13:29:14.567571] INFO: derived_feature_extractor: 提取完成 mean(amount_0, 5), 0.008s
[2022-08-22 13:29:14.576587] INFO: derived_feature_extractor: 提取完成 mean(return_0, 5), 0.008s
[2022-08-22 13:29:14.585593] INFO: derived_feature_extractor: 提取完成 ts_max(close_0, 5), 0.008s
[2022-08-22 13:29:14.600604] INFO: derived_feature_extractor: 提取完成 ts_max(low_0, 5), 0.012s
[2022-08-22 13:29:14.610575] INFO: derived_feature_extractor: 提取完成 ts_max(open_0, 5), 0.008s
[2022-08-22 13:29:14.620441] INFO: derived_feature_extractor: 提取完成 ts_max(high_0, 5), 0.008s
[2022-08-22 13:29:14.630022] INFO: derived_feature_extractor: 提取完成 ts_max(turn_0, 5), 0.008s
[2022-08-22 13:29:14.641208] INFO: derived_feature_extractor: 提取完成 ts_max(amount_0, 5), 0.010s
[2022-08-22 13:29:14.650779] INFO: derived_feature_extractor: 提取完成 ts_max(return_0, 5), 0.008s
[2022-08-22 13:29:14.660145] INFO: derived_feature_extractor: 提取完成 ts_min(close_0, 5), 0.008s
[2022-08-22 13:29:14.669648] INFO: derived_feature_extractor: 提取完成 ts_min(low_0, 5), 0.008s
[2022-08-22 13:29:14.679844] INFO: derived_feature_extractor: 提取完成 ts_min(open_0, 5), 0.009s
[2022-08-22 13:29:14.692341] INFO: derived_feature_extractor: 提取完成 ts_min(high_0, 5), 0.011s
[2022-08-22 13:29:14.708096] INFO: derived_feature_extractor: 提取完成 ts_min(turn_0, 5), 0.013s
[2022-08-22 13:29:14.722849] INFO: derived_feature_extractor: 提取完成 ts_min(amount_0, 5), 0.013s
[2022-08-22 13:29:14.734034] INFO: derived_feature_extractor: 提取完成 ts_min(return_0, 5), 0.010s
[2022-08-22 13:29:14.749478] INFO: derived_feature_extractor: 提取完成 std(close_0, 5), 0.013s
[2022-08-22 13:29:14.766169] INFO: derived_feature_extractor: 提取完成 std(low_0, 5), 0.015s
[2022-08-22 13:29:14.784879] INFO: derived_feature_extractor: 提取完成 std(open_0, 5), 0.016s
[2022-08-22 13:29:14.803521] INFO: derived_feature_extractor: 提取完成 std(high_0, 5), 0.016s
[2022-08-22 13:29:14.814559] INFO: derived_feature_extractor: 提取完成 std(turn_0, 5), 0.009s
[2022-08-22 13:29:14.825708] INFO: derived_feature_extractor: 提取完成 std(amount_0, 5), 0.009s
[2022-08-22 13:29:14.837312] INFO: derived_feature_extractor: 提取完成 std(return_0, 5), 0.010s
[2022-08-22 13:29:14.867408] INFO: derived_feature_extractor: 提取完成 ts_rank(close_0, 5), 0.028s
[2022-08-22 13:29:14.900700] INFO: derived_feature_extractor: 提取完成 ts_rank(low_0, 5), 0.032s
[2022-08-22 13:29:14.929166] INFO: derived_feature_extractor: 提取完成 ts_rank(open_0, 5), 0.027s
[2022-08-22 13:29:14.981791] INFO: derived_feature_extractor: 提取完成 ts_rank(high_0, 5), 0.051s
[2022-08-22 13:29:15.012703] INFO: derived_feature_extractor: 提取完成 ts_rank(turn_0, 5), 0.029s
[2022-08-22 13:29:15.039096] INFO: derived_feature_extractor: 提取完成 ts_rank(amount_0, 5), 0.024s
[2022-08-22 13:29:15.064036] INFO: derived_feature_extractor: 提取完成 ts_rank(return_0, 5), 0.023s
[2022-08-22 13:29:15.081923] INFO: derived_feature_extractor: 提取完成 decay_linear(close_0, 5), 0.016s
[2022-08-22 13:29:15.108018] INFO: derived_feature_extractor: 提取完成 decay_linear(low_0, 5), 0.024s
[2022-08-22 13:29:15.138664] INFO: derived_feature_extractor: 提取完成 decay_linear(open_0, 5), 0.028s
[2022-08-22 13:29:15.168522] INFO: derived_feature_extractor: 提取完成 decay_linear(high_0, 5), 0.028s
[2022-08-22 13:29:15.196951] INFO: derived_feature_extractor: 提取完成 decay_linear(turn_0, 5), 0.026s
[2022-08-22 13:29:15.218168] INFO: derived_feature_extractor: 提取完成 decay_linear(amount_0, 5), 0.018s
[2022-08-22 13:29:15.236328] INFO: derived_feature_extractor: 提取完成 decay_linear(return_0, 5), 0.017s
[2022-08-22 13:29:15.459443] INFO: derived_feature_extractor: 提取完成 correlation(volume_0, return_0, 5), 0.222s
[2022-08-22 13:29:15.716120] INFO: derived_feature_extractor: 提取完成 correlation(volume_0, high_0, 5), 0.255s
[2022-08-22 13:29:15.939102] INFO: derived_feature_extractor: 提取完成 correlation(volume_0, low_0, 5), 0.221s
[2022-08-22 13:29:16.143595] INFO: derived_feature_extractor: 提取完成 correlation(volume_0, close_0, 5), 0.203s
[2022-08-22 13:29:16.380094] INFO: derived_feature_extractor: 提取完成 correlation(volume_0, open_0, 5), 0.235s
[2022-08-22 13:29:16.597228] INFO: derived_feature_extractor: 提取完成 correlation(volume_0, turn_0, 5), 0.215s
[2022-08-22 13:29:16.825174] INFO: derived_feature_extractor: 提取完成 correlation(return_0, high_0, 5), 0.226s
[2022-08-22 13:29:17.035334] INFO: derived_feature_extractor: 提取完成 correlation(return_0, low_0, 5), 0.208s
[2022-08-22 13:29:17.275463] INFO: derived_feature_extractor: 提取完成 correlation(return_0, close_0, 5), 0.238s
[2022-08-22 13:29:17.482697] INFO: derived_feature_extractor: 提取完成 correlation(return_0, open_0, 5), 0.206s
[2022-08-22 13:29:17.704750] INFO: derived_feature_extractor: 提取完成 correlation(return_0, turn_0, 5), 0.220s
[2022-08-22 13:29:17.924304] INFO: derived_feature_extractor: 提取完成 correlation(high_0, low_0, 5), 0.218s
[2022-08-22 13:29:18.136733] INFO: derived_feature_extractor: 提取完成 correlation(high_0, close_0, 5), 0.211s
[2022-08-22 13:29:18.366748] INFO: derived_feature_extractor: 提取完成 correlation(high_0, open_0, 5), 0.228s
[2022-08-22 13:29:18.577230] INFO: derived_feature_extractor: 提取完成 correlation(high_0, turn_0, 5), 0.209s
[2022-08-22 13:29:18.825663] INFO: derived_feature_extractor: 提取完成 correlation(low_0, close_0, 5), 0.246s
[2022-08-22 13:29:19.053036] INFO: derived_feature_extractor: 提取完成 correlation(low_0, open_0, 5), 0.225s
[2022-08-22 13:29:19.306172] INFO: derived_feature_extractor: 提取完成 correlation(low_0, turn_0, 5), 0.252s
[2022-08-22 13:29:19.505247] INFO: derived_feature_extractor: 提取完成 correlation(close_0, open_0, 5), 0.197s
[2022-08-22 13:29:19.697879] INFO: derived_feature_extractor: 提取完成 correlation(close_0, turn_0, 5), 0.191s
[2022-08-22 13:29:19.907635] INFO: derived_feature_extractor: 提取完成 correlation(open_0, turn_0, 5), 0.208s
[2022-08-22 13:29:19.991046] INFO: derived_feature_extractor: /y_2015, 6747
[2022-08-22 13:29:20.105550] INFO: derived_feature_extractor: /y_2016, 324
[2022-08-22 13:29:20.201909] INFO: moduleinvoker: derived_feature_extractor.v3 运行完成[5.820185s].
[2022-08-22 13:29:20.208847] INFO: moduleinvoker: standardlize.v8 开始运行..
[2022-08-22 13:29:20.218993] INFO: moduleinvoker: 命中缓存
[2022-08-22 13:29:20.220889] INFO: moduleinvoker: standardlize.v8 运行完成[0.012067s].
[2022-08-22 13:29:20.227225] INFO: moduleinvoker: standardlize.v8 开始运行..
[2022-08-22 13:29:35.613318] INFO: moduleinvoker: standardlize.v8 运行完成[15.386082s].
[2022-08-22 13:29:35.628141] INFO: moduleinvoker: fillnan.v1 开始运行..
[2022-08-22 13:29:35.634575] INFO: moduleinvoker: 命中缓存
[2022-08-22 13:29:35.636010] INFO: moduleinvoker: fillnan.v1 运行完成[0.007876s].
[2022-08-22 13:29:35.644737] INFO: moduleinvoker: fillnan.v1 开始运行..
[2022-08-22 13:29:35.916766] INFO: moduleinvoker: fillnan.v1 运行完成[0.271999s].
[2022-08-22 13:29:35.927770] INFO: moduleinvoker: join.v3 开始运行..
[2022-08-22 13:29:35.934203] INFO: moduleinvoker: 命中缓存
[2022-08-22 13:29:35.935853] INFO: moduleinvoker: join.v3 运行完成[0.00809s].
[2022-08-22 13:29:35.958103] INFO: moduleinvoker: dl_convert_to_bin.v2 开始运行..
[2022-08-22 13:29:36.107343] INFO: moduleinvoker: dl_convert_to_bin.v2 运行完成[0.149206s].
[2022-08-22 13:29:36.136230] INFO: moduleinvoker: dl_convert_to_bin.v2 开始运行..
[2022-08-22 13:29:36.144513] INFO: moduleinvoker: 命中缓存
[2022-08-22 13:29:36.146055] INFO: moduleinvoker: dl_convert_to_bin.v2 运行完成[0.009849s].
[2022-08-22 13:29:36.160212] INFO: moduleinvoker: cached.v3 开始运行..
[2022-08-22 13:29:36.182364] INFO: moduleinvoker: 命中缓存
[2022-08-22 13:29:36.184350] INFO: moduleinvoker: cached.v3 运行完成[0.024157s].
[2022-08-22 13:29:36.201810] INFO: moduleinvoker: dl_models_tabnet_train.v1 开始运行..
[2022-08-22 13:29:38.389380] INFO: dl_models_tabnet_train: 准备训练,训练样本个数:424587,迭代次数:30
[2022-08-22 13:30:31.108789] ERROR: moduleinvoker: module name: dl_models_tabnet_train, module version: v1, trackeback: RuntimeError: DataLoader worker (pid 18459) is killed by signal: Bus error. It is possible that dataloader's workers are out of shared memory. Please try to raise your shared memory limit.
[2022-08-22 13:30:31.117715] ERROR: moduleinvoker: module name: hyper_rolling_train, module version: v1, trackeback: RuntimeError: DataLoader worker (pid 18459) is killed by signal: Bus error. It is possible that dataloader's workers are out of shared memory. Please try to raise your shared memory limit.