复制链接
克隆策略

Transformer在量化选股中的应用研究

Transformer:Attention is all you need

paper: https://arxiv.org/abs/1706.03762

The naive transformer implemented here for financial time series prediction follows the paper "Attention is all you need":

Given the input (N, T, F),

  1. An embedding layer that maps the input (N, T, F) to representation (N, T, F’);
  2. A positional encoding layer that adds the positional sigmoid;
  3. An encoder that consists of several encoding layers, each of which uses a self-attention layer as the computing module (function of query, key, and value).
  4. A decoder that consists of an MLP (or a Linear layer) that maps the representation of the last time (N, 1, F') into output (N, 1).
In [1]:
import torch
import torch.nn as nn

from bigmodels.models.base import BaseModel
from bigmodels.models.transformer import Transformer
from bigmodels.schedule import get_cosine_schedule_with_warmup
In [2]:
class Transformer(BaseModel):
    """ Transformer: Attention is all you need
    paper: https://arxiv.org/abs/1706.03762

    Args:
        input_dim: 输入特征的数量
        output_dim: 输出特征的数量
        max_seq: 训练序列的最大窗口数
        enbed_dim: Transformer的d_model
        nhead: 多头的数量,默认8
        num_layers: Transformer中Encoder的层数,默认为4
        dropout: 默认为0.1
    """

    def __init__(self,
                 input_dim=98,
                 output_dim=1,
                 max_seq=5,
                 embed_dim=128,
                 nhead=8,
                 num_layers=4,
                 dropout=0.1):
        super(Transformer, self).__init__()
        self.input_dim = input_dim
        self.output_dim = output_dim

        self.feature_layer = nn.Linear(input_dim, embed_dim)
        # self.feature_pos = nn.Embedding(max_seq, embed_dim)
        # update position encoder with Positional
        self.pos_encoder = PositionalEncoding(embed_dim)

        encoder_layer = nn.TransformerEncoderLayer(d_model=embed_dim, nhead=nhead, dropout=dropout)
        self.encoder = nn.TransformerEncoder(encoder_layer, num_layers=num_layers)

        self.avgpoll1d = nn.AdaptiveAvgPool1d(1)

        # TODO, FNN for Decoder
        self.out = nn.Linear(embed_dim, output_dim)
        self._reset_parameters()

    def _reset_parameters(self):
        r"""Initiate parameters in the model."""
        for p in self.parameters():
            if p.dim() > 1:
                nn.init.xavier_uniform_(p)

    def forward(self, x):
        x = self.feature_layer(x)

        # x = [bs, window, feature] -> [window, bs, feature]
        x = x.permute(1, 0, 2)

        x = self.pos_encoder(x)

        src_mask = None
        x = self.encoder(x, src_mask)
        x = x.permute(1, 0, 2)

        # x = self.avgpoll1d(x)
        # x = x.squeeze(-1)
        x = x[:, -1, :]

        x = self.out(x)
        return x.squeeze()
    
    def train_epoch(self, dataloader):
        model = self.train()
        optimizer = self.optimizer
        criterion = self.criterion

        losses = []
        for item in dataloader:
            feature = item[0].float().to(self.device)
            label = item[1].float().to(self.device)

            optimizer.zero_grad()
            output = model(feature)
            loss = criterion(output, label)

            loss.backward()
            # clip grade
            nn.utils.clip_grad_value_(self.parameters(), 3.0)  
            optimizer.step()
            for scheduler in self.schedulers:
                # transformer need warm up
                scheduler.step()

            losses.append(loss.item())

        return np.mean(losses)


class PositionalEncoding(nn.Module):
    def __init__(self, d_model, max_len=1000):
        super(PositionalEncoding, self).__init__()
        pe = torch.zeros(max_len, d_model)
        position = torch.arange(0, max_len, dtype=torch.float).unsqueeze(1)
        div_term = torch.exp(torch.arange(0, d_model, 2).float() * (-math.log(10000.0) / d_model))
        pe[:, 0::2] = torch.sin(position * div_term)
        pe[:, 1::2] = torch.cos(position * div_term)
        pe = pe.unsqueeze(0).transpose(0, 1)
        self.register_buffer("pe", pe)

    def forward(self, x):
        # [T, N, F]
        return x + self.pe[: x.size(0), :]

    {"description":"实验创建于2017/8/26","graph":{"edges":[{"to_node_id":"287d2cb0-f53c-4101-bdf8-104b137c8601-15:instruments","from_node_id":"287d2cb0-f53c-4101-bdf8-104b137c8601-8:data"},{"to_node_id":"-106:instruments","from_node_id":"287d2cb0-f53c-4101-bdf8-104b137c8601-8:data"},{"to_node_id":"-38256:input_1","from_node_id":"287d2cb0-f53c-4101-bdf8-104b137c8601-15:data"},{"to_node_id":"-106:features","from_node_id":"287d2cb0-f53c-4101-bdf8-104b137c8601-24:data"},{"to_node_id":"-113:features","from_node_id":"287d2cb0-f53c-4101-bdf8-104b137c8601-24:data"},{"to_node_id":"-122:features","from_node_id":"287d2cb0-f53c-4101-bdf8-104b137c8601-24:data"},{"to_node_id":"-129:features","from_node_id":"287d2cb0-f53c-4101-bdf8-104b137c8601-24:data"},{"to_node_id":"-251:features","from_node_id":"287d2cb0-f53c-4101-bdf8-104b137c8601-24:data"},{"to_node_id":"-266:input_2","from_node_id":"287d2cb0-f53c-4101-bdf8-104b137c8601-24:data"},{"to_node_id":"-288:features","from_node_id":"287d2cb0-f53c-4101-bdf8-104b137c8601-24:data"},{"to_node_id":"-293:features","from_node_id":"287d2cb0-f53c-4101-bdf8-104b137c8601-24:data"},{"to_node_id":"-298:input_2","from_node_id":"287d2cb0-f53c-4101-bdf8-104b137c8601-24:data"},{"to_node_id":"-25911:features","from_node_id":"287d2cb0-f53c-4101-bdf8-104b137c8601-24:data"},{"to_node_id":"-25911:input_data","from_node_id":"287d2cb0-f53c-4101-bdf8-104b137c8601-53:data"},{"to_node_id":"-122:instruments","from_node_id":"287d2cb0-f53c-4101-bdf8-104b137c8601-62:data"},{"to_node_id":"-141:instruments","from_node_id":"287d2cb0-f53c-4101-bdf8-104b137c8601-62:data"},{"to_node_id":"-113:input_data","from_node_id":"-106:data"},{"to_node_id":"-266:input_1","from_node_id":"-113:data"},{"to_node_id":"-129:input_data","from_node_id":"-122:data"},{"to_node_id":"-298:input_1","from_node_id":"-129:data"},{"to_node_id":"-2431:input_2","from_node_id":"-129:data"},{"to_node_id":"-436:input_2","from_node_id":"-251:data"},{"to_node_id":"-2431:input_1","from_node_id":"-436:data_1"},{"to_node_id":"-288:input_data","from_node_id":"-266:data"},{"to_node_id":"287d2cb0-f53c-4101-bdf8-104b137c8601-53:data2","from_node_id":"-288:data"},{"to_node_id":"-251:input_data","from_node_id":"-293:data"},{"to_node_id":"-293:input_data","from_node_id":"-298:data"},{"to_node_id":"-141:options_data","from_node_id":"-2431:data_1"},{"to_node_id":"-141:benchmark_ds","from_node_id":"-2431:data_3"},{"to_node_id":"-436:input_1","from_node_id":"-25911:data"},{"to_node_id":"287d2cb0-f53c-4101-bdf8-104b137c8601-53:data1","from_node_id":"-38256:data"}],"nodes":[{"node_id":"287d2cb0-f53c-4101-bdf8-104b137c8601-8","module_id":"BigQuantSpace.instruments.instruments-v2","parameters":[{"name":"start_date","value":"2011-01-01","type":"Literal","bound_global_parameter":null},{"name":"end_date","value":"2013-12-31","type":"Literal","bound_global_parameter":null},{"name":"market","value":"CN_STOCK_A","type":"Literal","bound_global_parameter":null},{"name":"instrument_list","value":"","type":"Literal","bound_global_parameter":null},{"name":"max_count","value":"0","type":"Literal","bound_global_parameter":null}],"input_ports":[{"name":"rolling_conf","node_id":"287d2cb0-f53c-4101-bdf8-104b137c8601-8"}],"output_ports":[{"name":"data","node_id":"287d2cb0-f53c-4101-bdf8-104b137c8601-8"}],"cacheable":true,"seq_num":22,"comment":"","comment_collapsed":true},{"node_id":"287d2cb0-f53c-4101-bdf8-104b137c8601-15","module_id":"BigQuantSpace.advanced_auto_labeler.advanced_auto_labeler-v2","parameters":[{"name":"label_expr","value":"# #号开始的表示注释\n# 0. 每行一个,顺序执行,从第二个开始,可以使用label字段\n# 1. 可用数据字段见 https://bigquant.com/docs/data_history_data.html\n# 添加benchmark_前缀,可使用对应的benchmark数据\n# 2. 可用操作符和函数见 `表达式引擎 <https://bigquant.com/docs/big_expr.html>`_\n\n# 计算收益:5日收盘价(作为卖出价格)除以明日开盘价(作为买入价格)\nshift(close, -5) / shift(open, -1)-1\n\n# 极值处理:用1%和99%分位的值做clip\nclip(label, all_quantile(label, 0.01), all_quantile(label, 0.99))\n\n# 过滤掉一字涨停的情况 (设置label为NaN,在后续处理和训练中会忽略NaN的label)\nwhere(shift(high, -1) == shift(low, -1), NaN, label)\n","type":"Literal","bound_global_parameter":null},{"name":"start_date","value":"","type":"Literal","bound_global_parameter":null},{"name":"end_date","value":"","type":"Literal","bound_global_parameter":null},{"name":"benchmark","value":"000300.SHA","type":"Literal","bound_global_parameter":null},{"name":"drop_na_label","value":"True","type":"Literal","bound_global_parameter":null},{"name":"cast_label_int","value":"False","type":"Literal","bound_global_parameter":null},{"name":"user_functions","value":"","type":"Literal","bound_global_parameter":null}],"input_ports":[{"name":"instruments","node_id":"287d2cb0-f53c-4101-bdf8-104b137c8601-15"}],"output_ports":[{"name":"data","node_id":"287d2cb0-f53c-4101-bdf8-104b137c8601-15"}],"cacheable":true,"seq_num":23,"comment":"","comment_collapsed":true},{"node_id":"287d2cb0-f53c-4101-bdf8-104b137c8601-24","module_id":"BigQuantSpace.input_features.input_features-v1","parameters":[{"name":"features","value":"close_0\nopen_0\nhigh_0\nlow_0 \namount_0\nturn_0 \nreturn_0\n \nclose_1\nopen_1\nhigh_1\nlow_1\nreturn_1\namount_1\nturn_1\n \nclose_2\nopen_2\nhigh_2\nlow_2\namount_2\nturn_2\nreturn_2\n \nclose_3\nopen_3\nhigh_3\nlow_3\namount_3\nturn_3\nreturn_3\n \nclose_4\nopen_4\nhigh_4\nlow_4\namount_4\nturn_4\nreturn_4\n \nmean(close_0, 5)\nmean(low_0, 5)\nmean(open_0, 5)\nmean(high_0, 5)\nmean(turn_0, 5)\nmean(amount_0, 5)\nmean(return_0, 5)\n \nts_max(close_0, 5)\nts_max(low_0, 5)\nts_max(open_0, 5)\nts_max(high_0, 5)\nts_max(turn_0, 5)\nts_max(amount_0, 5)\nts_max(return_0, 5)\n \nts_min(close_0, 5)\nts_min(low_0, 5)\nts_min(open_0, 5)\nts_min(high_0, 5)\nts_min(turn_0, 5)\nts_min(amount_0, 5)\nts_min(return_0, 5) \n \nstd(close_0, 5)\nstd(low_0, 5)\nstd(open_0, 5)\nstd(high_0, 5)\nstd(turn_0, 5)\nstd(amount_0, 5)\nstd(return_0, 5)\n \nts_rank(close_0, 5)\nts_rank(low_0, 5)\nts_rank(open_0, 5)\nts_rank(high_0, 5)\nts_rank(turn_0, 5)\nts_rank(amount_0, 5)\nts_rank(return_0, 5)\n \ndecay_linear(close_0, 5)\ndecay_linear(low_0, 5)\ndecay_linear(open_0, 5)\ndecay_linear(high_0, 5)\ndecay_linear(turn_0, 5)\ndecay_linear(amount_0, 5)\ndecay_linear(return_0, 5)\n \ncorrelation(volume_0, return_0, 5)\ncorrelation(volume_0, high_0, 5)\ncorrelation(volume_0, low_0, 5)\ncorrelation(volume_0, close_0, 5)\ncorrelation(volume_0, open_0, 5)\ncorrelation(volume_0, turn_0, 5)\n \ncorrelation(return_0, high_0, 5)\ncorrelation(return_0, low_0, 5)\ncorrelation(return_0, close_0, 5)\ncorrelation(return_0, open_0, 5)\ncorrelation(return_0, turn_0, 5)\n \ncorrelation(high_0, low_0, 5)\ncorrelation(high_0, close_0, 5)\ncorrelation(high_0, open_0, 5)\ncorrelation(high_0, turn_0, 5)\n \ncorrelation(low_0, close_0, 5)\ncorrelation(low_0, open_0, 5)\ncorrelation(low_0, turn_0, 5)\n \ncorrelation(close_0, open_0, 5)\ncorrelation(close_0, turn_0, 5)\n\ncorrelation(open_0, turn_0, 5)","type":"Literal","bound_global_parameter":null}],"input_ports":[{"name":"features_ds","node_id":"287d2cb0-f53c-4101-bdf8-104b137c8601-24"}],"output_ports":[{"name":"data","node_id":"287d2cb0-f53c-4101-bdf8-104b137c8601-24"}],"cacheable":true,"seq_num":24,"comment":"","comment_collapsed":true},{"node_id":"287d2cb0-f53c-4101-bdf8-104b137c8601-53","module_id":"BigQuantSpace.join.join-v3","parameters":[{"name":"on","value":"date,instrument","type":"Literal","bound_global_parameter":null},{"name":"how","value":"inner","type":"Literal","bound_global_parameter":null},{"name":"sort","value":"True","type":"Literal","bound_global_parameter":null}],"input_ports":[{"name":"data1","node_id":"287d2cb0-f53c-4101-bdf8-104b137c8601-53"},{"name":"data2","node_id":"287d2cb0-f53c-4101-bdf8-104b137c8601-53"}],"output_ports":[{"name":"data","node_id":"287d2cb0-f53c-4101-bdf8-104b137c8601-53"}],"cacheable":true,"seq_num":25,"comment":"","comment_collapsed":true},{"node_id":"287d2cb0-f53c-4101-bdf8-104b137c8601-62","module_id":"BigQuantSpace.instruments.instruments-v2","parameters":[{"name":"start_date","value":"2014-01-01","type":"Literal","bound_global_parameter":null},{"name":"end_date","value":"2014-12-31","type":"Literal","bound_global_parameter":null},{"name":"market","value":"CN_STOCK_A","type":"Literal","bound_global_parameter":null},{"name":"instrument_list","value":"","type":"Literal","bound_global_parameter":null},{"name":"max_count","value":"0","type":"Literal","bound_global_parameter":null}],"input_ports":[{"name":"rolling_conf","node_id":"287d2cb0-f53c-4101-bdf8-104b137c8601-62"}],"output_ports":[{"name":"data","node_id":"287d2cb0-f53c-4101-bdf8-104b137c8601-62"}],"cacheable":true,"seq_num":26,"comment":"预测数据,用于回测和模拟","comment_collapsed":true},{"node_id":"-106","module_id":"BigQuantSpace.general_feature_extractor.general_feature_extractor-v7","parameters":[{"name":"start_date","value":"","type":"Literal","bound_global_parameter":null},{"name":"end_date","value":"","type":"Literal","bound_global_parameter":null},{"name":"before_start_days","value":"10","type":"Literal","bound_global_parameter":null}],"input_ports":[{"name":"instruments","node_id":"-106"},{"name":"features","node_id":"-106"}],"output_ports":[{"name":"data","node_id":"-106"}],"cacheable":true,"seq_num":27,"comment":"","comment_collapsed":true},{"node_id":"-113","module_id":"BigQuantSpace.derived_feature_extractor.derived_feature_extractor-v3","parameters":[{"name":"date_col","value":"date","type":"Literal","bound_global_parameter":null},{"name":"instrument_col","value":"instrument","type":"Literal","bound_global_parameter":null},{"name":"drop_na","value":"True","type":"Literal","bound_global_parameter":null},{"name":"remove_extra_columns","value":"False","type":"Literal","bound_global_parameter":null},{"name":"user_functions","value":"","type":"Literal","bound_global_parameter":null}],"input_ports":[{"name":"input_data","node_id":"-113"},{"name":"features","node_id":"-113"}],"output_ports":[{"name":"data","node_id":"-113"}],"cacheable":true,"seq_num":28,"comment":"","comment_collapsed":true},{"node_id":"-122","module_id":"BigQuantSpace.general_feature_extractor.general_feature_extractor-v7","parameters":[{"name":"start_date","value":"","type":"Literal","bound_global_parameter":null},{"name":"end_date","value":"","type":"Literal","bound_global_parameter":null},{"name":"before_start_days","value":"10","type":"Literal","bound_global_parameter":null}],"input_ports":[{"name":"instruments","node_id":"-122"},{"name":"features","node_id":"-122"}],"output_ports":[{"name":"data","node_id":"-122"}],"cacheable":true,"seq_num":29,"comment":"","comment_collapsed":true},{"node_id":"-129","module_id":"BigQuantSpace.derived_feature_extractor.derived_feature_extractor-v3","parameters":[{"name":"date_col","value":"date","type":"Literal","bound_global_parameter":null},{"name":"instrument_col","value":"instrument","type":"Literal","bound_global_parameter":null},{"name":"drop_na","value":"True","type":"Literal","bound_global_parameter":null},{"name":"remove_extra_columns","value":"False","type":"Literal","bound_global_parameter":null},{"name":"user_functions","value":"","type":"Literal","bound_global_parameter":null}],"input_ports":[{"name":"input_data","node_id":"-129"},{"name":"features","node_id":"-129"}],"output_ports":[{"name":"data","node_id":"-129"}],"cacheable":true,"seq_num":30,"comment":"","comment_collapsed":true},{"node_id":"-251","module_id":"BigQuantSpace.dl_convert_to_bin.dl_convert_to_bin-v2","parameters":[{"name":"window_size","value":"5","type":"Literal","bound_global_parameter":null},{"name":"feature_clip","value":"3","type":"Literal","bound_global_parameter":null},{"name":"flatten","value":"False","type":"Literal","bound_global_parameter":null},{"name":"window_along_col","value":"instrument","type":"Literal","bound_global_parameter":null}],"input_ports":[{"name":"input_data","node_id":"-251"},{"name":"features","node_id":"-251"}],"output_ports":[{"name":"data","node_id":"-251"}],"cacheable":true,"seq_num":32,"comment":"","comment_collapsed":true},{"node_id":"-436","module_id":"BigQuantSpace.cached.cached-v3","parameters":[{"name":"run","value":"# Python 代码入口函数,input_1/2/3 对应三个输入端,data_1/2/3 对应三个输出端\ndef bigquant_run(input_1, input_2, input_3, embed_dim, nhead, num_layers, dropout):\n # 示例代码如下。在这里编写您的代码\n from sklearn.model_selection import train_test_split\n # train data\n train_data = input_1.read()\n x_train, x_val, y_train, y_val = train_test_split(train_data[\"x\"], train_data['y'], test_size=0.1)\n # val data\n test_data = input_2.read()\n x_test = test_data[\"x\"]\n \n model = Transformer(input_dim=98, embed_dim=embed_dim, nhead=nhead, num_layers=num_layers, dropout=dropout)\n opt = torch.optim.Adam(model.parameters(), lr=1e-3)\n # transformer need warmup\n scheduler = get_cosine_schedule_with_warmup(optimizer=opt, num_warmup_steps=4000, num_training_steps=100000)\n loss = nn.MSELoss()\n device = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n\n model.compile(optimizer=opt, loss=loss, device=device)\n model.fit(x_train, \n y_train, \n validation_data=(x_val, y_val), \n batch_size=256, \n epochs=10, \n verbose=1,\n schedulers=[scheduler],\n num_workers=0)\n \n output = model.predict(x_test)\n \n data_1 = DataSource.write_pickle(output)\n return Outputs(data_1=data_1, data_2=None, data_3=None)","type":"Literal","bound_global_parameter":null},{"name":"post_run","value":"# 后处理函数,可选。输入是主函数的输出,可以在这里对数据做处理,或者返回更友好的outputs数据格式。此函数输出不会被缓存。\ndef bigquant_run(outputs):\n return outputs\n","type":"Literal","bound_global_parameter":null},{"name":"input_ports","value":"","type":"Literal","bound_global_parameter":null},{"name":"params","value":"{\n \"embed_dim\": 128,\n \"nhead\": 8,\n \"num_layers\": 2,\n \"dropout\": 0.3\n}","type":"Literal","bound_global_parameter":null},{"name":"output_ports","value":"","type":"Literal","bound_global_parameter":null}],"input_ports":[{"name":"input_1","node_id":"-436"},{"name":"input_2","node_id":"-436"},{"name":"input_3","node_id":"-436"}],"output_ports":[{"name":"data_1","node_id":"-436"},{"name":"data_2","node_id":"-436"},{"name":"data_3","node_id":"-436"}],"cacheable":true,"seq_num":33,"comment":"Transformer训练和预测","comment_collapsed":false},{"node_id":"-266","module_id":"BigQuantSpace.standardlize.standardlize-v8","parameters":[{"name":"columns_input","value":"[]","type":"Literal","bound_global_parameter":null}],"input_ports":[{"name":"input_1","node_id":"-266"},{"name":"input_2","node_id":"-266"}],"output_ports":[{"name":"data","node_id":"-266"}],"cacheable":true,"seq_num":34,"comment":"","comment_collapsed":true},{"node_id":"-288","module_id":"BigQuantSpace.fillnan.fillnan-v1","parameters":[{"name":"fill_value","value":"0.0","type":"Literal","bound_global_parameter":null}],"input_ports":[{"name":"input_data","node_id":"-288"},{"name":"features","node_id":"-288"}],"output_ports":[{"name":"data","node_id":"-288"}],"cacheable":true,"seq_num":35,"comment":"","comment_collapsed":true},{"node_id":"-293","module_id":"BigQuantSpace.fillnan.fillnan-v1","parameters":[{"name":"fill_value","value":"0.0","type":"Literal","bound_global_parameter":null}],"input_ports":[{"name":"input_data","node_id":"-293"},{"name":"features","node_id":"-293"}],"output_ports":[{"name":"data","node_id":"-293"}],"cacheable":true,"seq_num":36,"comment":"","comment_collapsed":true},{"node_id":"-298","module_id":"BigQuantSpace.standardlize.standardlize-v8","parameters":[{"name":"columns_input","value":"[]","type":"Literal","bound_global_parameter":null}],"input_ports":[{"name":"input_1","node_id":"-298"},{"name":"input_2","node_id":"-298"}],"output_ports":[{"name":"data","node_id":"-298"}],"cacheable":true,"seq_num":37,"comment":"","comment_collapsed":true},{"node_id":"-2431","module_id":"BigQuantSpace.cached.cached-v3","parameters":[{"name":"run","value":"# Python 代码入口函数,input_1/2/3 对应三个输入端,data_1/2/3 对应三个输出端\ndef bigquant_run(input_1, input_2, input_3):\n # 示例代码如下。在这里编写您的代码\n pred_label = input_1.read_pickle()\n \n df = input_2.read_df()\n df = pd.DataFrame({'pred_label':pred_label[:], 'instrument':df.instrument, 'date':df.date})\n df.sort_values(['date','pred_label'],inplace=True, ascending=[True,False])\n return Outputs(data_1=DataSource.write_df(df), data_2=None, data_3=None)\n","type":"Literal","bound_global_parameter":null},{"name":"post_run","value":"# 后处理函数,可选。输入是主函数的输出,可以在这里对数据做处理,或者返回更友好的outputs数据格式。此函数输出不会被缓存。\ndef bigquant_run(outputs):\n return outputs\n","type":"Literal","bound_global_parameter":null},{"name":"input_ports","value":"","type":"Literal","bound_global_parameter":null},{"name":"params","value":"{}","type":"Literal","bound_global_parameter":null},{"name":"output_ports","value":"","type":"Literal","bound_global_parameter":null}],"input_ports":[{"name":"input_1","node_id":"-2431"},{"name":"input_2","node_id":"-2431"},{"name":"input_3","node_id":"-2431"}],"output_ports":[{"name":"data_1","node_id":"-2431"},{"name":"data_2","node_id":"-2431"},{"name":"data_3","node_id":"-2431"}],"cacheable":true,"seq_num":41,"comment":"","comment_collapsed":true},{"node_id":"-141","module_id":"BigQuantSpace.trade.trade-v4","parameters":[{"name":"start_date","value":"","type":"Literal","bound_global_parameter":null},{"name":"end_date","value":"","type":"Literal","bound_global_parameter":null},{"name":"initialize","value":"# 回测引擎:初始化函数,只执行一次\ndef bigquant_run(context):\n # 加载预测数据\n context.ranker_prediction = context.options['data'].read_df()\n\n # 系统已经设置了默认的交易手续费和滑点,要修改手续费可使用如下函数\n context.set_commission(PerOrder(buy_cost=0.0003, sell_cost=0.0013, min_cost=5))\n # 预测数据,通过options传入进来,使用 read_df 函数,加载到内存 (DataFrame)\n # 设置买入的股票数量,这里买入预测股票列表排名靠前的5只\n stock_count = 50\n # 每只的股票的权重,如下的权重分配会使得靠前的股票分配多一点的资金,[0.339160, 0.213986, 0.169580, ..]\n context.stock_weights = T.norm([1 / math.log(i + 2) for i in range(0, stock_count)])\n # 设置每只股票占用的最大资金比例\n context.max_cash_per_instrument = 0.2\n context.options['hold_days'] = 5\n","type":"Literal","bound_global_parameter":null},{"name":"handle_data","value":"# 回测引擎:每日数据处理函数,每天执行一次\ndef bigquant_run(context, data):\n # 按日期过滤得到今日的预测数据\n ranker_prediction = context.ranker_prediction[\n context.ranker_prediction.date == data.current_dt.strftime('%Y-%m-%d')]\n\n # 1. 资金分配\n # 平均持仓时间是hold_days,每日都将买入股票,每日预期使用 1/hold_days 的资金\n # 实际操作中,会存在一定的买入误差,所以在前hold_days天,等量使用资金;之后,尽量使用剩余资金(这里设置最多用等量的1.5倍)\n is_staging = context.trading_day_index < context.options['hold_days'] # 是否在建仓期间(前 hold_days 天)\n cash_avg = context.portfolio.portfolio_value / context.options['hold_days']\n cash_for_buy = min(context.portfolio.cash, (1 if is_staging else 1.5) * cash_avg)\n cash_for_sell = cash_avg - (context.portfolio.cash - cash_for_buy)\n positions = {e.symbol: p.amount * p.last_sale_price\n for e, p in context.perf_tracker.position_tracker.positions.items()}\n\n # 2. 生成卖出订单:hold_days天之后才开始卖出;对持仓的股票,按机器学习算法预测的排序末位淘汰\n if not is_staging and cash_for_sell > 0:\n equities = {e.symbol: e for e, p in context.perf_tracker.position_tracker.positions.items()}\n instruments = list(reversed(list(ranker_prediction.instrument[ranker_prediction.instrument.apply(\n lambda x: x in equities and not context.has_unfinished_sell_order(equities[x]))])))\n # print('rank order for sell %s' % instruments)\n for instrument in instruments:\n context.order_target(context.symbol(instrument), 0)\n cash_for_sell -= positions[instrument]\n if cash_for_sell <= 0:\n break\n\n # 3. 生成买入订单:按机器学习算法预测的排序,买入前面的stock_count只股票\n buy_cash_weights = context.stock_weights\n buy_instruments = list(ranker_prediction.instrument[:len(buy_cash_weights)])\n max_cash_per_instrument = context.portfolio.portfolio_value * context.max_cash_per_instrument\n for i, instrument in enumerate(buy_instruments):\n cash = cash_for_buy * buy_cash_weights[i]\n if cash > max_cash_per_instrument - positions.get(instrument, 0):\n # 确保股票持仓量不会超过每次股票最大的占用资金量\n cash = max_cash_per_instrument - positions.get(instrument, 0)\n if cash > 0:\n context.order_value(context.symbol(instrument), cash)\n","type":"Literal","bound_global_parameter":null},{"name":"prepare","value":"# 回测引擎:准备数据,只执行一次\ndef bigquant_run(context):\n pass\n","type":"Literal","bound_global_parameter":null},{"name":"before_trading_start","value":"","type":"Literal","bound_global_parameter":null},{"name":"volume_limit","value":0.025,"type":"Literal","bound_global_parameter":null},{"name":"order_price_field_buy","value":"open","type":"Literal","bound_global_parameter":null},{"name":"order_price_field_sell","value":"close","type":"Literal","bound_global_parameter":null},{"name":"capital_base","value":1000000,"type":"Literal","bound_global_parameter":null},{"name":"auto_cancel_non_tradable_orders","value":"True","type":"Literal","bound_global_parameter":null},{"name":"data_frequency","value":"daily","type":"Literal","bound_global_parameter":null},{"name":"price_type","value":"后复权","type":"Literal","bound_global_parameter":null},{"name":"product_type","value":"股票","type":"Literal","bound_global_parameter":null},{"name":"plot_charts","value":"True","type":"Literal","bound_global_parameter":null},{"name":"backtest_only","value":"False","type":"Literal","bound_global_parameter":null},{"name":"benchmark","value":"000300.SHA","type":"Literal","bound_global_parameter":null}],"input_ports":[{"name":"instruments","node_id":"-141"},{"name":"options_data","node_id":"-141"},{"name":"history_ds","node_id":"-141"},{"name":"benchmark_ds","node_id":"-141"},{"name":"trading_calendar","node_id":"-141"}],"output_ports":[{"name":"raw_perf","node_id":"-141"}],"cacheable":false,"seq_num":42,"comment":"","comment_collapsed":true},{"node_id":"-25911","module_id":"BigQuantSpace.dl_convert_to_bin.dl_convert_to_bin-v2","parameters":[{"name":"window_size","value":"5","type":"Literal","bound_global_parameter":null},{"name":"feature_clip","value":"3","type":"Literal","bound_global_parameter":null},{"name":"flatten","value":"False","type":"Literal","bound_global_parameter":null},{"name":"window_along_col","value":"instrument","type":"Literal","bound_global_parameter":null}],"input_ports":[{"name":"input_data","node_id":"-25911"},{"name":"features","node_id":"-25911"}],"output_ports":[{"name":"data","node_id":"-25911"}],"cacheable":true,"seq_num":2,"comment":"","comment_collapsed":true},{"node_id":"-38256","module_id":"BigQuantSpace.standardlize.standardlize-v8","parameters":[{"name":"columns_input","value":"label","type":"Literal","bound_global_parameter":null}],"input_ports":[{"name":"input_1","node_id":"-38256"},{"name":"input_2","node_id":"-38256"}],"output_ports":[{"name":"data","node_id":"-38256"}],"cacheable":true,"seq_num":1,"comment":"","comment_collapsed":true},{"node_id":"-156","module_id":"BigQuantSpace.hyper_parameter_search.hyper_parameter_search-v1","parameters":[{"name":"param_grid_builder","value":"def bigquant_run():\n param_grid = {}\n\n # 在这里设置需要调优的参数备选\n # param_grid['m3.features'] = ['close_1/close_0', 'close_2/close_0\\nclose_3/close_0']\n param_grid['m33.params'] = [\n \"\"\"{\"embed_dim\": 64, \"nhead\": 8, \"num_layers\": 2, \"dropout\": 0.1}\"\"\",\n \"\"\"{\"embed_dim\": 128, \"nhead\": 8, \"num_layers\": 2, \"dropout\": 0.1}\"\"\",\n \"\"\"{\"embed_dim\": 256, \"nhead\": 8, \"num_layers\": 2, \"dropout\": 0.1}\"\"\",\n \"\"\"{\"embed_dim\": 64, \"nhead\": 8, \"num_layers\": 4, \"dropout\": 0.1}\"\"\",\n \"\"\"{\"embed_dim\": 128, \"nhead\": 8, \"num_layers\": 4, \"dropout\": 0.1}\"\"\",\n \"\"\"{\"embed_dim\": 256, \"nhead\": 8, \"num_layers\": 4, \"dropout\": 0.1}\"\"\",\n \"\"\"{\"embed_dim\": 64, \"nhead\": 8, \"num_layers\": 4, \"dropout\": 0.3}\"\"\",\n \"\"\"{\"embed_dim\": 128, \"nhead\": 8, \"num_layers\": 4, \"dropout\": 0.3}\"\"\",\n \"\"\"{\"embed_dim\": 256, \"nhead\": 8, \"num_layers\": 4, \"dropout\": 0.3}\"\"\"\n ]\n \n return param_grid\n","type":"Literal","bound_global_parameter":null},{"name":"scoring","value":"def bigquant_run(result):\n score = result.get('m19').read_raw_perf()['sharpe'].tail(1)[0]\n\n return {'score': score}\n","type":"Literal","bound_global_parameter":null},{"name":"search_algorithm","value":"网格搜索","type":"Literal","bound_global_parameter":null},{"name":"search_iterations","value":10,"type":"Literal","bound_global_parameter":null},{"name":"random_state","value":"","type":"Literal","bound_global_parameter":null},{"name":"workers","value":1,"type":"Literal","bound_global_parameter":null},{"name":"worker_distributed_run","value":"False","type":"Literal","bound_global_parameter":null},{"name":"worker_silent","value":"False","type":"Literal","bound_global_parameter":null},{"name":"run_now","value":"True","type":"Literal","bound_global_parameter":null},{"name":"bq_graph","value":"True","type":"Literal","bound_global_parameter":null}],"input_ports":[{"name":"bq_graph_port","node_id":"-156"},{"name":"input_1","node_id":"-156"},{"name":"input_2","node_id":"-156"},{"name":"input_3","node_id":"-156"}],"output_ports":[{"name":"result","node_id":"-156"}],"cacheable":false,"seq_num":4,"comment":"","comment_collapsed":true}],"node_layout":"<node_postions><node_position Node='287d2cb0-f53c-4101-bdf8-104b137c8601-8' Position='324,-13,200,200'/><node_position Node='287d2cb0-f53c-4101-bdf8-104b137c8601-15' Position='21,167,200,200'/><node_position Node='287d2cb0-f53c-4101-bdf8-104b137c8601-24' Position='795,-113,200,200'/><node_position Node='287d2cb0-f53c-4101-bdf8-104b137c8601-53' Position='275,445,200,200'/><node_position Node='287d2cb0-f53c-4101-bdf8-104b137c8601-62' Position='1231,36,200,200'/><node_position Node='-106' Position='436,108,200,200'/><node_position Node='-113' Position='440,178,200,200'/><node_position Node='-122' Position='1246,144,200,200'/><node_position Node='-129' Position='1235,230,200,200'/><node_position Node='-251' Position='1228,527,200,200'/><node_position Node='-436' Position='589,673,200,200'/><node_position Node='-266' Position='444,246,200,200'/><node_position Node='-288' Position='433,319,200,200'/><node_position Node='-293' Position='1244,436,200,200'/><node_position Node='-298' Position='1237,335,200,200'/><node_position Node='-2431' Position='592.5690307617188,821.2050170898438,200,200'/><node_position Node='-141' Position='525,968,200,200'/><node_position Node='-25911' Position='281,561,200,200'/><node_position Node='-38256' Position='16,264,200,200'/><node_position Node='-156' Position='51,636,200,200'/></node_postions>"},"nodes_readonly":false,"studio_version":"v2"}
    In [3]:
    # 本代码由可视化策略环境自动生成 2022年5月6日 11:40
    # 本代码单元只能在可视化模式下编辑。您也可以拷贝代码,粘贴到新建的代码单元或者策略,然后修改。
    
    
    # Python 代码入口函数,input_1/2/3 对应三个输入端,data_1/2/3 对应三个输出端
    def m33_run_bigquant_run(input_1, input_2, input_3, embed_dim, nhead, num_layers, dropout):
        # 示例代码如下。在这里编写您的代码
        from sklearn.model_selection import train_test_split
        # train data
        train_data = input_1.read()
        x_train, x_val, y_train, y_val = train_test_split(train_data["x"], train_data['y'], test_size=0.1)
        # val data
        test_data = input_2.read()
        x_test = test_data["x"]
        
        model = Transformer(input_dim=98, embed_dim=embed_dim, nhead=nhead, num_layers=num_layers, dropout=dropout)
        opt = torch.optim.Adam(model.parameters(), lr=1e-3)
        # transformer need warmup
        scheduler = get_cosine_schedule_with_warmup(optimizer=opt, num_warmup_steps=4000, num_training_steps=100000)
        loss = nn.MSELoss()
        device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
    
        model.compile(optimizer=opt, loss=loss, device=device)
        model.fit(x_train, 
                  y_train, 
                  validation_data=(x_val, y_val), 
                  batch_size=256, 
                  epochs=10, 
                  verbose=1,
                  schedulers=[scheduler],
                  num_workers=0)
        
        output = model.predict(x_test)
        
        data_1 = DataSource.write_pickle(output)
        return Outputs(data_1=data_1, data_2=None, data_3=None)
    # 后处理函数,可选。输入是主函数的输出,可以在这里对数据做处理,或者返回更友好的outputs数据格式。此函数输出不会被缓存。
    def m33_post_run_bigquant_run(outputs):
        return outputs
    
    # Python 代码入口函数,input_1/2/3 对应三个输入端,data_1/2/3 对应三个输出端
    def m41_run_bigquant_run(input_1, input_2, input_3):
        # 示例代码如下。在这里编写您的代码
        pred_label = input_1.read_pickle()
        
        df = input_2.read_df()
        df = pd.DataFrame({'pred_label':pred_label[:], 'instrument':df.instrument, 'date':df.date})
        df.sort_values(['date','pred_label'],inplace=True, ascending=[True,False])
        return Outputs(data_1=DataSource.write_df(df), data_2=None, data_3=None)
    
    # 后处理函数,可选。输入是主函数的输出,可以在这里对数据做处理,或者返回更友好的outputs数据格式。此函数输出不会被缓存。
    def m41_post_run_bigquant_run(outputs):
        return outputs
    
    # 回测引擎:初始化函数,只执行一次
    def m42_initialize_bigquant_run(context):
        # 加载预测数据
        context.ranker_prediction = context.options['data'].read_df()
    
        # 系统已经设置了默认的交易手续费和滑点,要修改手续费可使用如下函数
        context.set_commission(PerOrder(buy_cost=0.0003, sell_cost=0.0013, min_cost=5))
        # 预测数据,通过options传入进来,使用 read_df 函数,加载到内存 (DataFrame)
        # 设置买入的股票数量,这里买入预测股票列表排名靠前的5只
        stock_count = 50
        # 每只的股票的权重,如下的权重分配会使得靠前的股票分配多一点的资金,[0.339160, 0.213986, 0.169580, ..]
        context.stock_weights = T.norm([1 / math.log(i + 2) for i in range(0, stock_count)])
        # 设置每只股票占用的最大资金比例
        context.max_cash_per_instrument = 0.2
        context.options['hold_days'] = 5
    
    # 回测引擎:每日数据处理函数,每天执行一次
    def m42_handle_data_bigquant_run(context, data):
        # 按日期过滤得到今日的预测数据
        ranker_prediction = context.ranker_prediction[
            context.ranker_prediction.date == data.current_dt.strftime('%Y-%m-%d')]
    
        # 1. 资金分配
        # 平均持仓时间是hold_days,每日都将买入股票,每日预期使用 1/hold_days 的资金
        # 实际操作中,会存在一定的买入误差,所以在前hold_days天,等量使用资金;之后,尽量使用剩余资金(这里设置最多用等量的1.5倍)
        is_staging = context.trading_day_index < context.options['hold_days'] # 是否在建仓期间(前 hold_days 天)
        cash_avg = context.portfolio.portfolio_value / context.options['hold_days']
        cash_for_buy = min(context.portfolio.cash, (1 if is_staging else 1.5) * cash_avg)
        cash_for_sell = cash_avg - (context.portfolio.cash - cash_for_buy)
        positions = {e.symbol: p.amount * p.last_sale_price
                     for e, p in context.perf_tracker.position_tracker.positions.items()}
    
        # 2. 生成卖出订单:hold_days天之后才开始卖出;对持仓的股票,按机器学习算法预测的排序末位淘汰
        if not is_staging and cash_for_sell > 0:
            equities = {e.symbol: e for e, p in context.perf_tracker.position_tracker.positions.items()}
            instruments = list(reversed(list(ranker_prediction.instrument[ranker_prediction.instrument.apply(
                    lambda x: x in equities and not context.has_unfinished_sell_order(equities[x]))])))
            # print('rank order for sell %s' % instruments)
            for instrument in instruments:
                context.order_target(context.symbol(instrument), 0)
                cash_for_sell -= positions[instrument]
                if cash_for_sell <= 0:
                    break
    
        # 3. 生成买入订单:按机器学习算法预测的排序,买入前面的stock_count只股票
        buy_cash_weights = context.stock_weights
        buy_instruments = list(ranker_prediction.instrument[:len(buy_cash_weights)])
        max_cash_per_instrument = context.portfolio.portfolio_value * context.max_cash_per_instrument
        for i, instrument in enumerate(buy_instruments):
            cash = cash_for_buy * buy_cash_weights[i]
            if cash > max_cash_per_instrument - positions.get(instrument, 0):
                # 确保股票持仓量不会超过每次股票最大的占用资金量
                cash = max_cash_per_instrument - positions.get(instrument, 0)
            if cash > 0:
                context.order_value(context.symbol(instrument), cash)
    
    # 回测引擎:准备数据,只执行一次
    def m42_prepare_bigquant_run(context):
        pass
    
    
    g = T.Graph({
    
        'm22': 'M.instruments.v2',
        'm22.start_date': '2011-01-01',
        'm22.end_date': '2013-12-31',
        'm22.market': 'CN_STOCK_A',
        'm22.instrument_list': '',
        'm22.max_count': 0,
    
        'm23': 'M.advanced_auto_labeler.v2',
        'm23.instruments': T.Graph.OutputPort('m22.data'),
        'm23.label_expr': """# #号开始的表示注释
    # 0. 每行一个,顺序执行,从第二个开始,可以使用label字段
    # 1. 可用数据字段见 https://bigquant.com/docs/data_history_data.html
    #   添加benchmark_前缀,可使用对应的benchmark数据
    # 2. 可用操作符和函数见 `表达式引擎 <https://bigquant.com/docs/big_expr.html>`_
    
    # 计算收益:5日收盘价(作为卖出价格)除以明日开盘价(作为买入价格)
    shift(close, -5) / shift(open, -1)-1
    
    # 极值处理:用1%和99%分位的值做clip
    clip(label, all_quantile(label, 0.01), all_quantile(label, 0.99))
    
    # 过滤掉一字涨停的情况 (设置label为NaN,在后续处理和训练中会忽略NaN的label)
    where(shift(high, -1) == shift(low, -1), NaN, label)
    """,
        'm23.start_date': '',
        'm23.end_date': '',
        'm23.benchmark': '000300.SHA',
        'm23.drop_na_label': True,
        'm23.cast_label_int': False,
    
        'm1': 'M.standardlize.v8',
        'm1.input_1': T.Graph.OutputPort('m23.data'),
        'm1.columns_input': 'label',
    
        'm24': 'M.input_features.v1',
        'm24.features': """close_0
    open_0
    high_0
    low_0 
    amount_0
    turn_0 
    return_0
     
    close_1
    open_1
    high_1
    low_1
    return_1
    amount_1
    turn_1
     
    close_2
    open_2
    high_2
    low_2
    amount_2
    turn_2
    return_2
     
    close_3
    open_3
    high_3
    low_3
    amount_3
    turn_3
    return_3
     
    close_4
    open_4
    high_4
    low_4
    amount_4
    turn_4
    return_4
     
    mean(close_0, 5)
    mean(low_0, 5)
    mean(open_0, 5)
    mean(high_0, 5)
    mean(turn_0, 5)
    mean(amount_0, 5)
    mean(return_0, 5)
     
    ts_max(close_0, 5)
    ts_max(low_0, 5)
    ts_max(open_0, 5)
    ts_max(high_0, 5)
    ts_max(turn_0, 5)
    ts_max(amount_0, 5)
    ts_max(return_0, 5)
     
    ts_min(close_0, 5)
    ts_min(low_0, 5)
    ts_min(open_0, 5)
    ts_min(high_0, 5)
    ts_min(turn_0, 5)
    ts_min(amount_0, 5)
    ts_min(return_0, 5) 
     
    std(close_0, 5)
    std(low_0, 5)
    std(open_0, 5)
    std(high_0, 5)
    std(turn_0, 5)
    std(amount_0, 5)
    std(return_0, 5)
     
    ts_rank(close_0, 5)
    ts_rank(low_0, 5)
    ts_rank(open_0, 5)
    ts_rank(high_0, 5)
    ts_rank(turn_0, 5)
    ts_rank(amount_0, 5)
    ts_rank(return_0, 5)
     
    decay_linear(close_0, 5)
    decay_linear(low_0, 5)
    decay_linear(open_0, 5)
    decay_linear(high_0, 5)
    decay_linear(turn_0, 5)
    decay_linear(amount_0, 5)
    decay_linear(return_0, 5)
     
    correlation(volume_0, return_0, 5)
    correlation(volume_0, high_0, 5)
    correlation(volume_0, low_0, 5)
    correlation(volume_0, close_0, 5)
    correlation(volume_0, open_0, 5)
    correlation(volume_0, turn_0, 5)
      
    correlation(return_0, high_0, 5)
    correlation(return_0, low_0, 5)
    correlation(return_0, close_0, 5)
    correlation(return_0, open_0, 5)
    correlation(return_0, turn_0, 5)
     
    correlation(high_0, low_0, 5)
    correlation(high_0, close_0, 5)
    correlation(high_0, open_0, 5)
    correlation(high_0, turn_0, 5)
     
    correlation(low_0, close_0, 5)
    correlation(low_0, open_0, 5)
    correlation(low_0, turn_0, 5)
     
    correlation(close_0, open_0, 5)
    correlation(close_0, turn_0, 5)
    
    correlation(open_0, turn_0, 5)""",
    
        'm27': 'M.general_feature_extractor.v7',
        'm27.instruments': T.Graph.OutputPort('m22.data'),
        'm27.features': T.Graph.OutputPort('m24.data'),
        'm27.start_date': '',
        'm27.end_date': '',
        'm27.before_start_days': 10,
    
        'm28': 'M.derived_feature_extractor.v3',
        'm28.input_data': T.Graph.OutputPort('m27.data'),
        'm28.features': T.Graph.OutputPort('m24.data'),
        'm28.date_col': 'date',
        'm28.instrument_col': 'instrument',
        'm28.drop_na': True,
        'm28.remove_extra_columns': False,
    
        'm34': 'M.standardlize.v8',
        'm34.input_1': T.Graph.OutputPort('m28.data'),
        'm34.input_2': T.Graph.OutputPort('m24.data'),
        'm34.columns_input': '[]',
    
        'm35': 'M.fillnan.v1',
        'm35.input_data': T.Graph.OutputPort('m34.data'),
        'm35.features': T.Graph.OutputPort('m24.data'),
        'm35.fill_value': '0.0',
    
        'm25': 'M.join.v3',
        'm25.data1': T.Graph.OutputPort('m1.data'),
        'm25.data2': T.Graph.OutputPort('m35.data'),
        'm25.on': 'date,instrument',
        'm25.how': 'inner',
        'm25.sort': True,
    
        'm2': 'M.dl_convert_to_bin.v2',
        'm2.input_data': T.Graph.OutputPort('m25.data'),
        'm2.features': T.Graph.OutputPort('m24.data'),
        'm2.window_size': 5,
        'm2.feature_clip': 3,
        'm2.flatten': False,
        'm2.window_along_col': 'instrument',
    
        'm26': 'M.instruments.v2',
        'm26.start_date': '2014-01-01',
        'm26.end_date': '2014-12-31',
        'm26.market': 'CN_STOCK_A',
        'm26.instrument_list': '',
        'm26.max_count': 0,
    
        'm29': 'M.general_feature_extractor.v7',
        'm29.instruments': T.Graph.OutputPort('m26.data'),
        'm29.features': T.Graph.OutputPort('m24.data'),
        'm29.start_date': '',
        'm29.end_date': '',
        'm29.before_start_days': 10,
    
        'm30': 'M.derived_feature_extractor.v3',
        'm30.input_data': T.Graph.OutputPort('m29.data'),
        'm30.features': T.Graph.OutputPort('m24.data'),
        'm30.date_col': 'date',
        'm30.instrument_col': 'instrument',
        'm30.drop_na': True,
        'm30.remove_extra_columns': False,
    
        'm37': 'M.standardlize.v8',
        'm37.input_1': T.Graph.OutputPort('m30.data'),
        'm37.input_2': T.Graph.OutputPort('m24.data'),
        'm37.columns_input': '[]',
    
        'm36': 'M.fillnan.v1',
        'm36.input_data': T.Graph.OutputPort('m37.data'),
        'm36.features': T.Graph.OutputPort('m24.data'),
        'm36.fill_value': '0.0',
    
        'm32': 'M.dl_convert_to_bin.v2',
        'm32.input_data': T.Graph.OutputPort('m36.data'),
        'm32.features': T.Graph.OutputPort('m24.data'),
        'm32.window_size': 5,
        'm32.feature_clip': 3,
        'm32.flatten': False,
        'm32.window_along_col': 'instrument',
    
        'm33': 'M.cached.v3',
        'm33.input_1': T.Graph.OutputPort('m2.data'),
        'm33.input_2': T.Graph.OutputPort('m32.data'),
        'm33.run': m33_run_bigquant_run,
        'm33.post_run': m33_post_run_bigquant_run,
        'm33.input_ports': '',
        'm33.params': """{
        "embed_dim": 128,
        "nhead": 8,
        "num_layers": 2,
        "dropout": 0.3
    }""",
        'm33.output_ports': '',
    
        'm41': 'M.cached.v3',
        'm41.input_1': T.Graph.OutputPort('m33.data_1'),
        'm41.input_2': T.Graph.OutputPort('m30.data'),
        'm41.run': m41_run_bigquant_run,
        'm41.post_run': m41_post_run_bigquant_run,
        'm41.input_ports': '',
        'm41.params': '{}',
        'm41.output_ports': '',
    
        'm42': 'M.trade.v4',
        'm42.instruments': T.Graph.OutputPort('m26.data'),
        'm42.options_data': T.Graph.OutputPort('m41.data_1'),
        'm42.benchmark_ds': T.Graph.OutputPort('m41.data_3'),
        'm42.start_date': '',
        'm42.end_date': '',
        'm42.initialize': m42_initialize_bigquant_run,
        'm42.handle_data': m42_handle_data_bigquant_run,
        'm42.prepare': m42_prepare_bigquant_run,
        'm42.volume_limit': 0.025,
        'm42.order_price_field_buy': 'open',
        'm42.order_price_field_sell': 'close',
        'm42.capital_base': 1000000,
        'm42.auto_cancel_non_tradable_orders': True,
        'm42.data_frequency': 'daily',
        'm42.price_type': '后复权',
        'm42.product_type': '股票',
        'm42.plot_charts': True,
        'm42.backtest_only': False,
        'm42.benchmark': '000300.SHA',
    })
    
    # g.run({})
    
    
    def m4_param_grid_builder_bigquant_run():
        param_grid = {}
    
        # 在这里设置需要调优的参数备选
        # param_grid['m3.features'] = ['close_1/close_0', 'close_2/close_0\nclose_3/close_0']
        param_grid['m33.params'] = [
                   """{"embed_dim": 64, "nhead": 8, "num_layers": 2, "dropout": 0.1}""",
                   """{"embed_dim": 128, "nhead": 8, "num_layers": 2, "dropout": 0.1}""",
                   """{"embed_dim": 256, "nhead": 8, "num_layers": 2, "dropout": 0.1}""",
                   """{"embed_dim": 64, "nhead": 8, "num_layers": 4, "dropout": 0.1}""",
                   """{"embed_dim": 128, "nhead": 8, "num_layers": 4, "dropout": 0.1}""",
                   """{"embed_dim": 256, "nhead": 8, "num_layers": 4, "dropout": 0.1}""",
                   """{"embed_dim": 64, "nhead": 8, "num_layers": 4, "dropout": 0.3}""",
                   """{"embed_dim": 128, "nhead": 8, "num_layers": 4, "dropout": 0.3}""",
                   """{"embed_dim": 256, "nhead": 8, "num_layers": 4, "dropout": 0.3}"""
                  ]
        
        return param_grid
    
    def m4_scoring_bigquant_run(result):
        score = result.get('m19').read_raw_perf()['sharpe'].tail(1)[0]
    
        return {'score': score}
    
    
    m4 = M.hyper_parameter_search.v1(
        param_grid_builder=m4_param_grid_builder_bigquant_run,
        scoring=m4_scoring_bigquant_run,
        search_algorithm='网格搜索',
        search_iterations=10,
        workers=1,
        worker_distributed_run=False,
        worker_silent=False,
        run_now=True,
        bq_graph=g
    )
    
    Fitting 1 folds for each of 9 candidates, totalling 9 fits
    [Parallel(n_jobs=1)]: Using backend SequentialBackend with 1 concurrent workers.
    [CV 1/1; 1/9] START m33.params={"embed_dim": 64, "nhead": 8, "num_layers": 2, "dropout": 0.1}
    
    • 收益率157.8%
    • 年化收益率164.87%
    • 基准收益率51.66%
    • 阿尔法1.01
    • 贝塔0.67
    • 夏普比率3.86
    • 胜率0.65
    • 盈亏比1.31
    • 收益波动率25.34%
    • 信息比率0.16
    • 最大回撤9.19%
    bigcharts-data-start/{"__type":"tabs","__id":"bigchart-1b31e81034bd4ee7b4ea327ad78dc873"}/bigcharts-data-end
    [CV 1/1; 1/9] END m33.params={"embed_dim": 64, "nhead": 8, "num_layers": 2, "dropout": 0.1}; total time= 8.7min
    [Parallel(n_jobs=1)]: Done   1 out of   1 | elapsed:  8.7min remaining:    0.0s
    [CV 1/1; 2/9] START m33.params={"embed_dim": 128, "nhead": 8, "num_layers": 2, "dropout": 0.1}
    
    • 收益率115.09%
    • 年化收益率119.85%
    • 基准收益率51.66%
    • 阿尔法0.73
    • 贝塔0.57
    • 夏普比率3.39
    • 胜率0.65
    • 盈亏比1.28
    • 收益波动率23.22%
    • 信息比率0.11
    • 最大回撤10.85%
    bigcharts-data-start/{"__type":"tabs","__id":"bigchart-9446d1b57f18479aa6ff74a104dfb1a8"}/bigcharts-data-end
    [CV 1/1; 2/9] END m33.params={"embed_dim": 128, "nhead": 8, "num_layers": 2, "dropout": 0.1}; total time= 2.3min
    [Parallel(n_jobs=1)]: Done   2 out of   2 | elapsed: 11.0min remaining:    0.0s
    [CV 1/1; 3/9] START m33.params={"embed_dim": 256, "nhead": 8, "num_layers": 2, "dropout": 0.1}
    
    • 收益率92.65%
    • 年化收益率96.29%
    • 基准收益率51.66%
    • 阿尔法0.53
    • 贝塔0.6
    • 夏普比率2.67
    • 胜率0.64
    • 盈亏比1.22
    • 收益波动率25.38%
    • 信息比率0.07
    • 最大回撤16.12%
    bigcharts-data-start/{"__type":"tabs","__id":"bigchart-6f6fb20f185147c585027bde41004347"}/bigcharts-data-end
    [CV 1/1; 3/9] END m33.params={"embed_dim": 256, "nhead": 8, "num_layers": 2, "dropout": 0.1}; total time= 1.5min
    [Parallel(n_jobs=1)]: Done   3 out of   3 | elapsed: 12.5min remaining:    0.0s
    [CV 1/1; 4/9] START m33.params={"embed_dim": 64, "nhead": 8, "num_layers": 4, "dropout": 0.1}
    
    • 收益率137.44%
    • 年化收益率143.38%
    • 基准收益率51.66%
    • 阿尔法0.89
    • 贝塔0.62
    • 夏普比率3.46
    • 胜率0.65
    • 盈亏比1.29
    • 收益波动率25.84%
    • 信息比率0.13
    • 最大回撤11.21%
    bigcharts-data-start/{"__type":"tabs","__id":"bigchart-c350bdccadf749d6a120fccd12a2cee2"}/bigcharts-data-end
    [CV 1/1; 4/9] END m33.params={"embed_dim": 64, "nhead": 8, "num_layers": 4, "dropout": 0.1}; total time=  40.1s
    [Parallel(n_jobs=1)]: Done   4 out of   4 | elapsed: 13.2min remaining:    0.0s
    [CV 1/1; 5/9] START m33.params={"embed_dim": 128, "nhead": 8, "num_layers": 4, "dropout": 0.1}
    
    • 收益率30.73%
    • 年化收益率31.74%
    • 基准收益率51.66%
    • 阿尔法-0.07
    • 贝塔0.81
    • 夏普比率1.21
    • 胜率0.51
    • 盈亏比1.38
    • 收益波动率22.46%
    • 信息比率-0.06
    • 最大回撤17.08%
    bigcharts-data-start/{"__type":"tabs","__id":"bigchart-dc284b9ac6484a78b8ab5d39c10a77ba"}/bigcharts-data-end
    [CV 1/1; 5/9] END m33.params={"embed_dim": 128, "nhead": 8, "num_layers": 4, "dropout": 0.1}; total time=  51.8s
    [Parallel(n_jobs=1)]: Done   5 out of   5 | elapsed: 14.0min remaining:    0.0s
    [CV 1/1; 6/9] START m33.params={"embed_dim": 256, "nhead": 8, "num_layers": 4, "dropout": 0.1}
    
    epoch 0   |  train_loss 1.00927|  vall_loss 0.98584|  0:01:46s
    epoch 1   |  train_loss 0.99702|  vall_loss 0.98999|  0:03:35s
    epoch 2   |  train_loss 0.99861|  vall_loss 0.99061|  0:05:23s
    epoch 3   |  train_loss 0.99856|  vall_loss 0.99055|  0:07:11s
    epoch 4   |  train_loss 0.99851|  vall_loss 0.99012|  0:09:01s
    epoch 5   |  train_loss 0.99848|  vall_loss 0.99056|  0:10:52s
    epoch 6   |  train_loss 0.99845|  vall_loss 0.99084|  0:12:51s
    epoch 7   |  train_loss 0.99845|  vall_loss 0.99093|  0:14:46s
    epoch 8   |  train_loss 0.99844|  vall_loss 0.99034|  0:16:46s
    epoch 9   |  train_loss 0.99841|  vall_loss 0.99038|  0:18:33s
    
    • 收益率53.03%
    • 年化收益率54.9%
    • 基准收益率51.66%
    • 阿尔法0.19
    • 贝塔0.59
    • 夏普比率2.38
    • 胜率0.57
    • 盈亏比1.4
    • 收益波动率17.82%
    • 信息比率0.0
    • 最大回撤7.77%
    bigcharts-data-start/{"__type":"tabs","__id":"bigchart-e429713083db43e4a707e84219dfac7d"}/bigcharts-data-end
    [CV 1/1; 6/9] END m33.params={"embed_dim": 256, "nhead": 8, "num_layers": 4, "dropout": 0.1}; total time=41.6min
    [Parallel(n_jobs=1)]: Done   6 out of   6 | elapsed: 55.6min remaining:    0.0s
    [CV 1/1; 7/9] START m33.params={"embed_dim": 64, "nhead": 8, "num_layers": 4, "dropout": 0.3}
    
    epoch 0   |  train_loss 1.01678|  vall_loss 0.98507|  0:01:52s
    epoch 1   |  train_loss 0.98982|  vall_loss 0.98561|  0:03:47s
    epoch 2   |  train_loss 0.98915|  vall_loss 0.98324|  0:05:37s
    epoch 3   |  train_loss 0.98919|  vall_loss 0.98176|  0:07:34s
    epoch 4   |  train_loss 0.98824|  vall_loss 0.98072|  0:09:33s
    epoch 5   |  train_loss 0.98794|  vall_loss 0.98108|  0:11:36s
    epoch 6   |  train_loss 0.98922|  vall_loss 0.98322|  0:13:24s
    epoch 7   |  train_loss 0.98725|  vall_loss 0.98317|  0:15:13s
    epoch 8   |  train_loss 0.98611|  vall_loss 0.98139|  0:16:58s
    epoch 9   |  train_loss 0.98467|  vall_loss 0.98176|  0:18:46s
    
    • 收益率132.67%
    • 年化收益率138.35%
    • 基准收益率51.66%
    • 阿尔法0.83
    • 贝塔0.64
    • 夏普比率3.28
    • 胜率0.66
    • 盈亏比1.19
    • 收益波动率26.68%
    • 信息比率0.12
    • 最大回撤15.57%
    bigcharts-data-start/{"__type":"tabs","__id":"bigchart-f23619359b464c79b32bff6ac1948143"}/bigcharts-data-end
    [CV 1/1; 7/9] END m33.params={"embed_dim": 64, "nhead": 8, "num_layers": 4, "dropout": 0.3}; total time=20.0min
    [Parallel(n_jobs=1)]: Done   7 out of   7 | elapsed: 75.6min remaining:    0.0s
    [CV 1/1; 8/9] START m33.params={"embed_dim": 128, "nhead": 8, "num_layers": 4, "dropout": 0.3}
    
    epoch 0   |  train_loss 1.02812|  vall_loss 0.99750|  0:01:46s
    epoch 1   |  train_loss 0.99159|  vall_loss 1.00375|  0:03:43s
    epoch 2   |  train_loss 0.99581|  vall_loss 1.00686|  0:05:30s
    epoch 3   |  train_loss 0.99681|  vall_loss 1.00710|  0:07:21s
    epoch 4   |  train_loss 0.99675|  vall_loss 1.00694|  0:09:12s
    epoch 5   |  train_loss 0.99670|  vall_loss 1.00741|  0:11:02s
    epoch 6   |  train_loss 0.99669|  vall_loss 1.00606|  0:12:56s
    epoch 7   |  train_loss 0.99666|  vall_loss 1.00602|  0:14:44s
    epoch 8   |  train_loss 0.99666|  vall_loss 1.00648|  0:16:35s
    epoch 9   |  train_loss 0.99664|  vall_loss 1.00657|  0:18:37s
    
    • 收益率56.09%
    • 年化收益率58.09%
    • 基准收益率51.66%
    • 阿尔法0.24
    • 贝塔0.57
    • 夏普比率2.06
    • 胜率0.61
    • 盈亏比1.13
    • 收益波动率21.99%
    • 信息比率0.01
    • 最大回撤12.15%
    bigcharts-data-start/{"__type":"tabs","__id":"bigchart-e383dab0eda8471381472c4f0cd6077a"}/bigcharts-data-end
    [CV 1/1; 8/9] END m33.params={"embed_dim": 128, "nhead": 8, "num_layers": 4, "dropout": 0.3}; total time=21.6min
    [Parallel(n_jobs=1)]: Done   8 out of   8 | elapsed: 97.2min remaining:    0.0s
    [CV 1/1; 9/9] START m33.params={"embed_dim": 256, "nhead": 8, "num_layers": 4, "dropout": 0.3}
    
    epoch 0   |  train_loss 1.03854|  vall_loss 0.99538|  0:01:59s
    epoch 1   |  train_loss 0.99559|  vall_loss 0.99731|  0:03:53s
    epoch 2   |  train_loss 0.99814|  vall_loss 0.99709|  0:05:47s
    epoch 3   |  train_loss 0.99793|  vall_loss 0.99797|  0:07:43s
    epoch 4   |  train_loss 0.99786|  vall_loss 0.99746|  0:09:41s
    epoch 5   |  train_loss 0.99779|  vall_loss 0.99708|  0:11:44s
    epoch 6   |  train_loss 0.99776|  vall_loss 0.99771|  0:13:37s
    epoch 7   |  train_loss 0.99775|  vall_loss 0.99720|  0:15:35s
    epoch 8   |  train_loss 0.99770|  vall_loss 0.99688|  0:17:32s
    epoch 9   |  train_loss 0.99771|  vall_loss 0.99709|  0:19:20s
    
    • 收益率37.35%
    • 年化收益率38.6%
    • 基准收益率51.66%
    • 阿尔法0.07
    • 贝塔0.58
    • 夏普比率1.77
    • 胜率0.58
    • 盈亏比1.25
    • 收益波动率17.66%
    • 信息比率-0.04
    • 最大回撤8.71%
    bigcharts-data-start/{"__type":"tabs","__id":"bigchart-d3aa43423f1e4c0aae0bddd293ad7d97"}/bigcharts-data-end
    [CV 1/1; 9/9] END m33.params={"embed_dim": 256, "nhead": 8, "num_layers": 4, "dropout": 0.3}; total time=22.0min
    [Parallel(n_jobs=1)]: Done   9 out of   9 | elapsed: 119.2min remaining:    0.0s
    [Parallel(n_jobs=1)]: Done   9 out of   9 | elapsed: 119.2min finished
    
    • 收益率157.8%
    • 年化收益率164.87%
    • 基准收益率51.66%
    • 阿尔法1.01
    • 贝塔0.67
    • 夏普比率3.86
    • 胜率0.65
    • 盈亏比1.31
    • 收益波动率25.34%
    • 信息比率0.16
    • 最大回撤9.19%
    bigcharts-data-start/{"__type":"tabs","__id":"bigchart-7ae8a5ced1cf41fa89557efebf072d20"}/bigcharts-data-end