复制链接
克隆策略
In [38]:
# 回测引擎:初始化函数,只执行一次
class conf:
    start_date = '2013-02-01'# 2017-01-01    2016-04-20   '2016-10-27'  2017-03-16  2016-02-26
    end_date = '2019-10-30'#2021-12-31
    start_trade = '2020-01-01'#2022-01-01
    end_trade='2022-04-31'#2022-03-31
#缓存时的处理   
def m89_run_bigquant_run(input_1, input_2, input_3):
    # 示例代码如下。在这里编写您的代码
    #df = pd.DataFrame({'data': [1]})
    df = input_1.read()
    
    risk_score_df=df
    #计算我们风控函数 使用特征的一个模块
    risk_score_df = risk_score_df.sort_values('date', ascending=True).reset_index(drop=True)
    columns = ['bm_ma_3','bm_ma_10','bm_ma_20','bm_ma_30']
    tmp_df2 = risk_score_df[columns].shift(1) #数据向下移动一位
    tmp_df3 = risk_score_df[columns].shift(2)
    for tmp_col in columns:
        risk_score_df[tmp_col + '_trend'] = 0
    
        risk_score_df['rate1'] = (tmp_df2[tmp_col] - tmp_df3[tmp_col]) / (tmp_df3[tmp_col] + 0.00001)  #均线的增长率
        risk_score_df['rate2'] = (risk_score_df[tmp_col] - tmp_df2[tmp_col]) / (tmp_df2[tmp_col] + 0.00001)
        idx = (risk_score_df['rate1'] > 0.006) & (risk_score_df['rate2'] > 0.006)
        risk_score_df.loc[idx, tmp_col + '_trend'] = 1 # 趋势向上

        idx = (risk_score_df['rate1'] < -0.003) & (risk_score_df['rate2'] < -0.003)
        risk_score_df.loc[idx, tmp_col + '_trend'] = 2 # 趋势向下

    data_1 = DataSource.write_df(risk_score_df)
    #data_2 = DataSource.write_pickle(df)
    return Outputs(data_1=data_1)

# 后处理函数,可选。输入是主函数的输出,可以在这里对数据做处理,或者返回更友好的outputs数据格式。此函数输出不会被缓存。
def m89_post_run_bigquant_run(outputs):
    return outputs    

    
def m4_run_bigquant_run(input_1, input_2, input_3):
    # 示例代码如下。在这里编写您的代码
    df =  input_1.read_pickle()
    feature_len = len(input_2.read_pickle())
    
    
    df['x'] = df['x'].reshape(df['x'].shape[0], int(feature_len), int(df['x'].shape[1]/feature_len))
    
    data_1 = DataSource.write_pickle(df)
    return Outputs(data_1=data_1)

# 后处理函数,可选。输入是主函数的输出,可以在这里对数据做处理,或者返回更友好的outputs数据格式。此函数输出不会被缓存。
def m4_post_run_bigquant_run(outputs):
    return outputs

# Python 代码入口函数,input_1/2/3 对应三个输入端,data_1/2/3 对应三个输出端
def m8_run_bigquant_run(input_1, input_2, input_3):
    # 示例代码如下。在这里编写您的代码
    df =  input_1.read_pickle()
    feature_len = len(input_2.read_pickle())
    
    
    df['x'] = df['x'].reshape(df['x'].shape[0], int(feature_len), int(df['x'].shape[1]/feature_len))
    
    data_1 = DataSource.write_pickle(df)
    return Outputs(data_1=data_1)

# 后处理函数,可选。输入是主函数的输出,可以在这里对数据做处理,或者返回更友好的outputs数据格式。此函数输出不会被缓存。
def m8_post_run_bigquant_run(outputs):
    return outputs

# 用户的自定义层需要写到字典中,比如
# {
#   "MyLayer": MyLayer
# }
m5_custom_objects_bigquant_run = {
    
}

# Python 代码入口函数,input_1/2/3 对应三个输入端,data_1/2/3 对应三个输出端
def m24_run_bigquant_run(input_1, input_2, input_3):
    # 示例代码如下。在这里编写您的代码
    pred_label = input_1.read_pickle()
    df = input_2.read_df()
    df = pd.DataFrame({'pred_label':pred_label[:,0], 'instrument':df.instrument, 'date':df.date})
    df.sort_values(['date','pred_label'],inplace=True, ascending=[True,False])
    return Outputs(data_1=DataSource.write_df(df), data_2=None, data_3=None)

# 后处理函数,可选。输入是主函数的输出,可以在这里对数据做处理,或者返回更友好的outputs数据格式。此函数输出不会被缓存。
def m24_post_run_bigquant_run(outputs):
    return outputs

# 回测引擎:初始化函数,只执行一次
def m39_initialize_bigquant_run(context):
    # 加载预测数据
    context.ranker_prediction = context.options['data'].read_df()

    # 系统已经设置了默认的交易手续费和滑点,要修改手续费可使用如下函数
    context.set_commission(PerOrder(buy_cost=0.0003, sell_cost=0.0013, min_cost=5))
    # 预测数据,通过options传入进来,使用 read_df 函数,加载到内存 (DataFrame)
    # 设置买入的股票数量,这里买入预测股票列表排名靠前的5只
    stock_count = 1
    # 每只的股票的权重,如下的权重分配会使得靠前的股票分配多一点的资金,[0.339160, 0.213986, 0.169580, ..]
    context.stock_weights = [1]
    # 设置每只股票占用的最大资金比例
    context.max_cash_per_instrument = 1
    context.options['hold_days'] = 1

# 回测引擎:每日数据处理函数,每天执行一次
def m39_handle_data_bigquant_run(context, data):
    # 获取当前持仓
    positions = {e.symbol: p.amount * p.last_sale_price
                 for e, p in context.portfolio.positions.items()}
    
    today = data.current_dt.strftime('%Y-%m-%d')
    # 按日期过滤得到今日的预测数据
    ranker_prediction = context.ranker_prediction[
        context.ranker_prediction.date == today]
#     try:
#     #大盘风控模块,读取风控数据    
#         benckmark_risk=ranker_prediction['bm_0'].values[0]
#         if benckmark_risk > 0:
#             for instrument in positions.keys():
#                 context.order_target(context.symbol(instrument), 0)
#                 print(today,'大盘风控止损触发,全仓卖出')
#                 return
#     except:
#         print('--!')
        
    #当risk为1时,市场有风险,全部平仓,不再执行其它操作    
    # 按日期过滤得到今日的预测数据
    ranker_prediction = context.ranker_prediction[
        context.ranker_prediction.date == data.current_dt.strftime('%Y-%m-%d')]
    #print('ranker_prediction='+str(ranker_prediction))
    #cash_for_buy = min(context.portfolio.portfolio_value/2,context.portfolio.cash)
    #cash_for_buy = context.portfolio.portfolio_value
    #print(ranker_prediction)
    #cash_for_buy = context.portfolio.portfolio_value
 #------------------------------------------风险控制模块START--------------------------------------------
    index_df = ranker_prediction
    try:
        score = calculate_score(index_df, 0) / 10  #比如 今天市场score得分为10  则 全仓买入 这里传入的score应该 是 10/10=1 (100%)
        print(today,'今天市场的得分:',score)
    
    except Exception as e:
        print('score数据读取出错!:'+str(e))
    #     try:
#     #大盘风控模块,读取仓位管理的数据    
#         #根据市场的得分进行 大盘风控
#         if score <=0:
#             for instrument in positions.keys():
#                 context.order_target(context.symbol(instrument), 0)
#                 print(today,'大盘风控止损触发,全仓卖出')
#             return
#     except:
#         print('大盘score数据读取有误!')

    # 先定义 我们要用来买卖股票的资金
    cash_for_buy = context.portfolio.cash
    print('今日原来仓位:',str(cash_for_buy))
    #乘以当天的市场得分score
    try:
        max_cash_per_instrument=cash_for_buy*score
        print('今日决定开仓仓位:',str(cash_for_buy))
        
    except:
        max_cash_per_instrument=cash_for_buy
        print('今日决定开仓仓位:',cash_for_buy)
    
 #------------------------------------------风险控制模块END--------------------------------------------
#     cash_for_buy = context.portfolio.cash
    buy_instruments = list(ranker_prediction.instrument)
    real_buy = []
#     real_buy = [i for i in buy_instruments[:1] ]
    for instrument in buy_instruments:
        real_buy.append(instrument)
        break
#         if str(instrument)[:1] != '3':
#             print('append='+str(instrument))
            
            
    
    
    sell_instruments = [instrument.symbol for instrument in context.portfolio.positions.keys()]
    
    to_sell = set(sell_instruments) -  set(buy_instruments[:1])
    
    
    for instrument in to_sell:
        context.order_target(context.symbol(instrument), 0)
    if len(real_buy) > 0:
        to_buy = set(real_buy[:1]) - set(sell_instruments) 
        for instrument in to_buy:
#             print('buy='+str(instrument))
            context.order_value(context.symbol(instrument), cash_for_buy)

def m39_prepare_bigquant_run(context):


     # 获取st状态和涨跌停状态
    
    context.status_df = D.features(instruments =context.instruments,start_date = context.start_date, end_date = context.end_date, 
                           fields=['st_status_0','price_limit_status_0','price_limit_status_1'])

def m39_before_trading_start_bigquant_run(context, data):
    pass     
#     # 获取涨跌停状态数据
#     df_price_limit_status=context.status_df.set_index('date')
#     today=data.current_dt.strftime('%Y-%m-%d')
#     # 得到当前未完成订单
#     for orders in get_open_orders().values():
#         # 循环,撤销订单
#         for _order in orders:
#             ins=str(_order.sid.symbol)
#             try:
#                 #判断一下如果当日涨停,则取消卖单
#                 if  df_price_limit_status[df_price_limit_status.instrument==ins].price_limit_status_0.loc[today]>2 and _order.amount<0:
#                     cancel_order(_order)
#                     print(today,'尾盘涨停取消卖单',ins) 
#             except:
#                 continue
  
    
def calculate_score(index_df, i=0):
        score = 0
        tmp_rate = (index_df['bm_close'].values[i]-index_df['bm_ma_3'].values[i])
        #假设 当天收盘价 - ma_3日均线 》0
        if tmp_rate>=0: 
            # score得分+1
            score += 1
         #假设 当天收盘价 - ma_10日均线 》0
        tmp_rate = (index_df['bm_close'].values[i]-index_df['bm_ma_10'].values[i])
        if tmp_rate>=0:
             # score得分+2
            score += 2
            #有如果 收益 》-0.03
        elif tmp_rate>-0.03:
              # score得分+1
            score += 1
            #有如果 收益 》-0.06
        elif tmp_rate>-0.06:
            # score得分+0.5
            score += 0.5
          #假设 当天收盘价 减去 ma_20日均线 大于 0
        tmp_rate = (index_df['bm_close'].values[i]-index_df['bm_ma_20'].values[i])
        #  如果该收益率>0
        if tmp_rate>=0:
             # score得分+2
            score += 2
        #  如果该收益率>-0.03
        elif tmp_rate>-0.03:
            # score得分+1
            score += 1
            #如果该收益率>-0.06
        elif tmp_rate>-0.06:
            # score得分+0.5
            score += 0.5
        #假设 当天收盘价 减去 ma_30日均线 大于 0
        tmp_rate = (index_df['bm_close'].values[i]-index_df['bm_ma_30'].values[i])
        if tmp_rate>=0:
            score += 1

        if index_df['bm_ma_3_trend'].values[i] == 1:
            score += 1

        if index_df['bm_ma_10_trend'].values[i] == 1:
            score += 2
        elif index_df['bm_ma_10_trend'].values[i] == 0:
            score += 1

        if index_df['bm_ma_20_trend'].values[i] == 1:
            score += 1.5
        elif index_df['bm_ma_20_trend'].values[i] == 0:
            score += 0.5

        # 附加 如果当日得分超过10分  最大只能到10分 不然数据会溢出
        if score > 10:
            score=10  
        
        if index_df['bm_Collapse_sum'].values[i] > 0:
    #         if score > 8:
    #             score = score - 2
    #         else:   
            score = score - 1.5
            if score < 0:
                score = 0

        return score     

g = T.Graph({

    'm1': 'M.instruments.v2',
    'm1.start_date': '2013-02-01',
    'm1.end_date':'2019-10-30',
    'm1.market': 'CN_STOCK_A',
    'm1.instrument_list': '',
    'm1.max_count': 0,



'm21' : 'M.use_datasource.v1',
    'm21.instruments':T.Graph.OutputPort('m1.data'),
    'm21.datasource_id':'net_amount_CN_STOCK_A',
    'm21.start_date':'',
    'm21.end_date':'',


'm22': 'M.filter.v3',
    'm22.input_data':T.Graph.OutputPort('m21.data'),
    'm22.expr':'mf_net_amount_l>18000000',
    'm22.output_left_data':False,


'm23':'M.select_columns.v3',
    'm23.input_ds':T.Graph.OutputPort('m22.data'),
    'm23.columns':'date,instrument',
    'm23.reverse_select':False,


'm20': 'M.use_datasource.v1',
    'm20.instruments':T.Graph.OutputPort('m1.data'),
    'm20.datasource_id':'bar1d_CN_STOCK_A',
    'm20.start_date':'',
    'm20.end_date':'',


'm29' : 'M.join.v3',
    'm29.data1':T.Graph.OutputPort('m20.data'),
    'm29.data2':T.Graph.OutputPort('m23.data'),
    'm29.on':'date,instrument',
    'm29.how':'inner',
    'm29.sort':False,


'm31' : 'M.auto_labeler_on_datasource.v1',
    'm31.input_data':T.Graph.OutputPort('m29.data'),
    'm31.label_expr':"""# #号开始的表示注释
# 0. 每行一个,顺序执行,从第二个开始,可以使用label字段
# 1. 可用数据字段见 https://bigquant.com/docs/develop/datasource/deprecated/history_data.html
# 2. 可用操作符和函数见 `表达式引擎 <https://bigquant.com/docs/develop/bigexpr/usage.html>`_

# 计算收益:5日收盘价(作为卖出价格)除以明日开盘价(作为买入价格)
#shift(close, -5) / shift(open, -1)

# 极值处理:用1%和99%分位的值做clip
#clip(label, all_quantile(label, 0.01), all_quantile(label, 0.99))

# 将分数映射到分类,这里使用20个分类
#all_wbins(label, 20)

# 过滤掉一字涨停的情况 (设置label为NaN,在后续处理和训练中会忽略NaN的label)
#where(shift(high, -1) == shift(low, -1), NaN, label)
# 计算收益:5日收盘价(作为卖出价格)除以明日开盘价(作为买入价格)
shift(high, -3) / shift(open, -1)-1

# 极值处理:用1%和99%分位的值做clip
clip(label, all_quantile(label, 0.01), all_quantile(label, 0.99))

# 过滤掉一字涨停的情况 (设置label为NaN,在后续处理和训练中会忽略NaN的label)
where(shift(high, -1) == shift(low, -1), NaN, label)
#where(label>0.5, NaN, label)
#where(label<-0.5, NaN, label)
""",
    'm31.drop_na_label':True,
    'm31.cast_label_int':False,
    'm31.date_col':'date',
    'm31.instrument_col':'instrument',


'm3' : 'M.input_features.v1',
    'm3.features':"""
    open_1
     """,


'm25':'M.input_features.v1',
    'm25.features_ds':T.Graph.OutputPort('m3.data'),
    'm25.features':"""
open_1
close_1
close_0
high_1
open_0
low_0

price_limit_status_0
volume_0
open_0/close_1
cond3=low_0 > mean(close_0,20)
#(今日收盘价-昨日收盘价)/昨日收盘价*100%

cond=list_board_0
cond1=ta_trix(close_0, derive='long')
cond2=ta_dma(close_0, 'long')
#----当日最低价 站稳60日线
cond3=low_0 > mean(close_0,20)
#(今日收盘价-昨日收盘价)/昨日收盘价*100%
cond4= (close_0-close_1)/close_1 >0.04
cond5=True
cond6=st_status_0==0
cond7=ta_macd(close_0,'long')
cond8=ta_ma(close_0,5, derive='long')""",


'm15': 'M.general_feature_extractor.v7',
    'm15.instruments':T.Graph.OutputPort('m1.data'),
    'm15.features':T.Graph.OutputPort('m25.data'),
    'm15.start_date':'',
    'm15.end_date':'',
    'm15.before_start_days':90,


'm16' : 'M.derived_feature_extractor.v3',
    'm16.input_data':T.Graph.OutputPort('m15.data'),
    'm16.features':T.Graph.OutputPort('m25.data'),
    'm16.date_col':'date',
    'm16.instrument_col':'instrument',
    'm16.drop_na':True,
    'm16.remove_extra_columns':False,

'm7' : 'M.join.v',
    'm7.data1':T.Graph.OutputPort('m31.data'),
    'm7.data2':T.Graph.OutputPort('m16.data'),
    'm7.on':'date,instrument',
    'm7.how':'inner',
    'm7.sort':False,

'm2' : 'M.filter.v3',
    'm2.input_data':T.Graph.OutputPort('m7.data'),
    'm2.expr':'cond5 and cond4 and  cond6 and cond7 and cond8',
    'm2.output_left_data':False,

'm38' : 'M.features_short.v1',
    'm38.input_1':T.Graph.OutputPort('m3.data'),


'm26' : 'M.dl_convert_to_bin.v2',
    'm26.input_data':T.Graph.OutputPort('m3.data'),
    'm26.features':T.Graph.OutputPort('m38.data_1'),
    'm26.window_size':2,
    'm26.feature_clip':-2,
    'm26.flatten':True,
    'm26.window_along_col':'instrument',


'm4' : 'M.cached.v3',
    'm4.input_1':T.Graph.OutputPort('m26.data'),
    'm4.input_2':T.Graph.OutputPort('m38.data_1'),
    'm4.run':m4_run_bigquant_run,
    'm4.post_run':m4_post_run_bigquant_run,
    'm4.input_ports':'',
   'm4.params':'{}',
    'm4.output_ports':'',


'm9' : 'M.instruments.v2',
    'm9.start_date':'2022-01-01',
    'm9.end_date':'2022-05-24',
    'm9.market':'CN_STOCK_A',
    'm9.instrument_list':'',
    'm9.max_count':0,


'm17' : 'M.general_feature_extractor.v7',
    'm17.instruments':T.Graph.OutputPort('m9.data'),
    'm17.features':T.Graph.OutputPort('m25.data'),
    'm17.start_date':'',
    'm17.end_date':'',
    'm17.before_start_days':90,


'm18' : 'M.derived_feature_extractor.v3',
    'm18.input_data':T.Graph.OutputPort('m17.data'),
    'm18.features':T.Graph.OutputPort('m25.data'),
    'm18.date_col':'date',
    'm18.instrument_col':'instrument',
    'm18.drop_na':True,
    'm18.remove_extra_columns':False,


'm13' : 'M.use_datasource.v1',
    'm13.instruments':T.Graph.OutputPort('m9.data'),
    'm13.datasource_id':'net_amount_CN_STOCK_A',
    'm13.start_date':'',
    'm13.end_date':'',


'm14' : 'M.filter.v3',
    'm14.input_data':T.Graph.OutputPort('m13.data'),
    'm14.expr':'mf_net_amount_l>18000000',
    'm14.output_left_data':False,

'm35' : 'M.select_columns.v3',
    'm35.input_ds':T.Graph.OutputPort('m14.data'),
    'm35.columns':'date,instrument',
    'm35.reverse_select':False,


'm36' : 'M.join.v3',
    'm36.data1':T.Graph.OutputPort('m18.data'),
    'm36.data2':T.Graph.OutputPort('m35.data'),
    'm36.on':'date,instrument',
    'm36.how':'inner',
    'm36.sort':False,


'm37' : 'M.filter.v3',
    'm37.input_data':T.Graph.OutputPort('m36.data'),
    'm37.expr':'cond5 and cond4 and  cond6 and cond7 and cond8',
    'm37.output_left_data':False,

'm27' : 'M.dl_convert_to_bin.v2',
    'm27.input_data':T.Graph.OutputPort('m37.data'),
    'm27.features':T.Graph.OutputPort('m38.data_1'),
    'm27.window_size':2,
    'm27.feature_clip':2,
    'm27.flatten':True,
    'm27.window_along_col':'instrument',


'm8' : 'M.cached.v3',
    'm8.input_1':T.Graph.OutputPort('m27.data'),
    'm8.input_2':T.Graph.OutputPort('m38.data_1'),
    'm8.run':m8_run_bigquant_run,
    'm8.post_run':m8_post_run_bigquant_run,
    'm8.input_ports':'',
    'm8.params':'{}',
    'm8.output_ports':'',


'm6' : 'M.dl_layer_input.v1',
    'm6.shape':'23,2',
    'm6.batch_shape':'',
    'm6.dtype':'float32',
    'm6.sparse':False,
    'm6.name':'',


'm10' : 'M.dl_layer_conv1d.v1',
    'm10.inputs':T.Graph.OutputPort('m6.data'),
    'm10.filters':32,
    'm10.kernel_size':'5',
    'm10.strides':'1',
    'm10.padding':'valid',
    'm10.dilation_rate':1,
    'm10.activation':'relu',
    'm10.use_bias':True,
    'm10.kernel_initializer':'glorot_uniform',
    'm10.bias_initializer':'Zeros',
    'm10.kernel_regularizer':'None',
    'm10.kernel_regularizer_l1':0,
    'm10.kernel_regularizer_l2':0,
    'm10.bias_regularizer':'None',
    'm10.bias_regularizer_l1':0,
    'm10.bias_regularizer_l2':0,
    'm10.activity_regularizer':'None',
    'm10.activity_regularizer_l1':0,
    'm10.activity_regularizer_l2':0,
    'm10.kernel_constraint':'None',
    'm10.bias_constraint':'None',
    'm10.name':'',


'm12' : 'M.dl_layer_maxpooling1d.v1',
    'm12.inputs':T.Graph.OutputPort('m10.data'),
    'm12.pool_size':1,
    'm12.padding':'valid',
    'm12.name':'',


'm32' : 'M.dl_layer_conv1d.v1',
    'm32.inputs':T.Graph.OutputPort('m12.data'),
    'm32.filters':32,
    'm32.kernel_size':'3',
    'm32.strides':'1',
    'm32.padding':'valid',
    'm32.dilation_rate':1,
    'm32.activation':'relu',
    'm32.use_bias':True,
    'm32.kernel_initializer':'glorot_uniform',
    'm32.bias_initializer':'Zeros',
    'm32.kernel_regularizer':'None',
    'm32kernel_regularizer_l1':0,
    'm32kernel_regularizer_l2':0,
    'm32.bias_regularizer':'None',
    'm32.bias_regularizer_l1':0,
    'm32.bias_regularizer_l2':0,
    'm32.activity_regularizer':'None',
    'm32.activity_regularizer_l1':0,
    'm32.activity_regularizer_l2':0,
    'm32.kernel_constraint':'None',
    'm32.bias_constraint':'None',
    'm32.name':'',


'm33' : 'M.dl_layer_maxpooling1d.v1',
    'm33.inputs':T.Graph.OutputPort('m32.data'),
    'm33.pool_size':1,
    'm33.padding':'valid',
    'm33.name':'',


'm28' : 'M.dl_layer_globalmaxpooling1d.v1',
    'm28.inputs':T.Graph.OutputPort('m33.data'),
    'm28.name':'',

'm30' : 'M.dl_layer_dense.v1',
    'm30.inputs':T.Graph.OutputPort('m28.data'),
    'm30.units':1,
    'm30.activation':'linear',
    'm30.use_bias':True,
    'm30.kernel_initializer':'glorot_uniform',
    'm30.bias_initializer':'Zeros',
    'm30.kernel_regularizer':'None',
    'm30.kernel_regularizer_l1':0,
    'm30.kernel_regularizer_l2':0,
    'm30.bias_regularizer':'None',
    'm30.bias_regularizer_l1':0,
    'm30.bias_regularizer_l2':0,
    'm30.activity_regularizer':'None',
    'm30.activity_regularizer_l1':0,
    'm30.activity_regularizer_l2':0,
    'm30.kernel_constraint':'None',
    'm30.bias_constraint':'None',
    'm30.name':'',


'm34' :'M.dl_model_init.v1',
    'm34.inputs':T.Graph.OutputPort('m6.data'),
    'm34.outputs':T.Graph.OutputPort('m30.data'),


'm5' : 'M.dl_model_train.v1',
    'm5.input_model':T.Graph.OutputPort('m34.data'),
    'm5.training_data':T.Graph.OutputPort('m4.data_1'),
    'm5.optimizer':'RMSprop',
    'm5.loss':'mean_squared_error',
    'm5.metrics':'mae',
    'm5.batch_size':10240,
    'm5.epochs':5,
    'm5.custom_objects':m5_custom_objects_bigquant_run,
    'm5.n_gpus':0,
    'm5.verbose':'2:每个epoch输出一行记录',


'm11' : 'M.dl_model_predict.v1',
    'm11.trained_model':T.Graph.OutputPort('m5.data'),
    'm11.input_data':T.Graph.OutputPort('m8.data_1'),
    'm11.batch_size':1024,
    'm11.n_gpus':0,
    'm11.verbose':'2:每个epoch输出一行记录',


'm24' : 'M.cached.v3',
    'm24.input_1':T.Graph.OutputPort('m11.data'),
    'm24.input_2':T.Graph.OutputPort('m37.data'),
    'm24.run':m24_run_bigquant_run,
    'm24.post_run':m24_post_run_bigquant_run,
    'm24.input_ports':'',
    'm24.params':'{}',
    'm24.output_ports':'',

'm44' : 'M.input_features.v1',
    'm44.features':"""
# #号开始的表示注释,注释需单独一行
# 多个特征,每行一个,可以包含基础特征和衍生特征,特征须为本平台特征
#bm_0 = where(close/shift(close,5)-1<-0.05,1,0)

bm_0=where(ta_macd_dif(close,2,4,4)-ta_macd_dea(close,2,4,4)<0,1,0)
bm_close=close
bm_pre_close=shift(close, 1)
bm_rate=(close-shift(close, 1))/shift(close, 1)
bm_Collapse=bm_rate<-0.02
bm_Collapse_sum=sum(bm_Collapse,4)
bm_ma_3=mean(close, 3)
bm_ma_10=mean(close, 10)
bm_ma_20=mean(close, 20)
bm_ma_30=mean(close, 30)
""",

'm43' : 'M.index_feature_extract.v3',
    'm43.input_1':T.Graph.OutputPort('m9.data'),
    'm43.input_2':T.Graph.OutputPort('m44.data'),
    'm43.before_days':100,
    'm43.index':'000300.HIX',

'm42' : 'M.select_columns.v3',
    'm42.input_ds':T.Graph.OutputPort('m43.data_1'),
    'm42.columns':'date,bm_0,bm_close,bm_pre_close,bm_rate,bm_Collapse,bm_Collapse_sum,bm_ma_3,bm_ma_10,bm_ma_20,bm_ma_30',
    'm42.reverse_select':False,

'm89' : 'M.cached.v3',
    'm89.input_1':T.Graph.OutputPort('m42.data'),
    'm89.run':m89_run_bigquant_run,
    'm89.post_run':m89_post_run_bigquant_run,
    'm89.input_ports':'',
    'm89.params':'{}',
    'm89.output_ports':'',

'm41' : 'M.join.v3',
    'm41.data1':T.Graph.OutputPort('m24.data'),
    'm41.data2':T.Graph.OutputPort('m89.data'),
    'm41.on':'date',
    'm41.how':'left',
    'm41.sort':False,

'm40' : 'M.sort.v4',
    'm40.input_ds':T.Graph.OutputPort('m41.data'),
    'm40.sort_by':'pred_label',
    'm40.group_by':'date',
    'm40.keep_columns':'--',
    'm40.ascending':False,

'm39' :'M.trade.v4',
    'm39.instruments':T.Graph.OutputPort('m9.data'),
    'm39.options_data':T.Graph.OutputPort('m40.sorted_data'),
    'm39.start_date':'',
    'm39.end_date':'',
    'm39.initialize':m39_initialize_bigquant_run,
    'm39.handle_data':m39_handle_data_bigquant_run,
    'm39.prepare':m39_prepare_bigquant_run,
    'm39.before_trading_start':m39_before_trading_start_bigquant_run,
    'm39.volume_limit':0,
    'm39.order_price_field_buy':'open',
    'm39.order_price_field_sell':'close',
    'm39.capital_base':30000,
    'm39.auto_cancel_non_tradable_orders':True,
    'm39.data_frequency':'daily',
    'm39.price_type':'真实价格',
    'm39.product_type':'股票',
    'm39.plot_charts':True,
    'm39.backtest_only':False,
    'm39.benchmark':'000300.SHA',

})

# g.run({})


# def m17_run_bigquant_run(bq_graph, inputs):
#     #Result = pd.read_csv("因子test1组批量测试-排序结果.csv",index_col=0)
#     #Result_feature = list(Result["新增因子"])
#     factor_last=[]#做一个空列表储存已经测试过的因子
#     factor_pool=['sum(low_0/close_0,10)/sum(low_0/close_0,20)',
# 'sum(high_0/close_0,20)/sum(close_0/low_0,10)',
# 'correlation(turn_0,return_0,5)',
# 'rank(mean(amount_0/deal_number_0,10))/rank(mean(amount_0/deal_number_0,5))',
# 'sum(high_0/close_0,5)/sum(high_0/close_0,20)',
# 'ta_wma_20_0/ta_wma_5_0',
# 'correlation(volume_0,return_0,5)',
# 'alpha21=-1*sign(ta_stoch_slowk_5_3_0_3_0_0-ta_stoch_slowd_5_3_0_3_0_0)/price_limit_status_6',
# 'alpha22=close_0/turn_1-close_1/turn_3-close_2/turn_5',
# 'alpha23=daily_return_3/rank_avg_amount_3',
# 'alpha24=close_0*avg_turn_0+close_1*avg_turn_1+close_2*avg_turn_2',
# 'alpha25=(sum((close_0-open_0)/open_0>0.03,5)+sum((close_0-open_0)/open_0>0.03,10)+sum((close_0-open_0)/open_0>0.03,60))/std(sum((close_0-open_0)/open_0>0.03,5), 3)',
# 'alpha26=std(amount_0,6)']
  


#     batch_num = 2# 多少20组,需要跑多少组策略100
#     batch_factor = list()
#     for i in range(batch_num):
#         random.seed(i)
#         factor_num = 1 # 每组多少个因子
#         batch_factor.append(random.sample(factor_pool, factor_num))

#     parameters_list = []
     
#     for feature in  batch_factor:
#         if feature in factor_last:
#             print("continue")
#             continue
#         print(feature)
#         factor_last.append(feature)
# #         Result['因子数']=len(feature)#这里计数总共有测试了多少个因子
# #         Result['新增因子']=[feature]#这里记录新测试的是哪个因子
# #         Result.to_csv('因子表.csv',header=['新增因子','因子数'],mode='a')#把测试好的因子追加写入因子表
#         parameters = {'m25.features': '\n'.join(feature)}
#         parameters_list.append({'parameters': parameters})
        
    
#     def run(parameters):
#         try:
#             return g.run(parameters)
#         except Exception as e:
#             print('ERROR --------', e)
#             return None
 
#     results = T.parallel_map(run, parameters_list, max_workers=2, remote_run=True, silent=True)  # 任务数  # 是否远程  #
#     #---  第一步:下面我们把搜索出来的 因子回测 结果读取出来,放在一个csv文件中储存好
# #     print("results="+str(results))
#       return_5/return_20
# rank_amount_5
# avg_turn_10
# market_cap_float_0<280000000000
# pe_ttm_0>0
# pb_lf_0
# sum(mf_net_pct_main_0>0.12,30)>11
# fs_roa_ttm_0>5
# fs_cash_ratio_0
# close_0>ts_max(close_0,56)
# ta_sma_10_0/ta_sma_30_0
# ta_sar_0
# swing_volatility_10_0/swing_volatility_60_0 
# ta_cci_14_0
# rank_return_3 
# mf_net_amount_0>mf_net_amount_1 
# mf_net_amount_xl_0>mean(mf_net_amount_xl_0, 30)
# cond4= (close_0-close_1)/close_1 >0.05
# (close_0-close_30)/close_30>1.25
# (close_0-close_5)/close_5>1.16
# list_days_0>365
# ta_bbands_middleband_28_0 
# cond28=sum(price_limit_status_0==3,80)>5
#     return results #所有进程结束后返回搜索出来的表

def m19_param_grid_builder_bigquant_run():
    param_grid = {}
    factor_pool=['sum(low_0/close_0,10)/sum(low_0/close_0,20)',
'sum(high_0/close_0,20)/sum(close_0/low_0,10)',
'correlation(turn_0,return_0,5)',
'rank(mean(amount_0/deal_number_0,10))/rank(mean(amount_0/deal_number_0,5))',
'sum(high_0/close_0,5)/sum(high_0/close_0,20)',
'ta_wma_20_0/ta_wma_5_0',
'mean(mf_net_amount_0,90)',
'mean(mf_net_amount_l_0,30)',
'mean(mf_net_amount_l_0,60)',
'alpha21=-1*sign(ta_stoch_slowk_5_3_0_3_0_0-ta_stoch_slowd_5_3_0_3_0_0)/price_limit_status_6',
'alpha22=close_0/turn_1-close_1/turn_3-close_2/turn_5',
'alpha23=daily_return_3/rank_avg_amount_3',
'alpha24=close_0*avg_turn_0+close_1*avg_turn_1+close_2*avg_turn_2',
'alpha25=(sum((close_0-open_0)/open_0>0.03,5)+sum((close_0-open_0)/open_0>0.03,10)+sum((close_0-open_0)/open_0>0.03,60))/std(sum((close_0-open_0)/open_0>0.03,5), 3)',
'std(amount_0,6)',
'scale(((correlation(mean(volume_0,20),low_0,5)+((high_0+low_0)/2))-close_0))',            
 'fs_deducted_profit_ttm_0/market_cap_0',
                 'std(turn_0,90)',
                 'std(turn_0,90)',
                'mean(mf_net_pct_l_0,5)',
                 'mean(mf_net_pct_l_0,10)',
                 'beta_csi300_5_0',
                 'beta_sse50_120_0',
                 'beta_sse50_180_0',
                 '(ts_rank((volume_0/mean(volume_0,20)),20)*ts_rank((-1*delta(close_0,7)),8))',
                 'alpha1=a_sma2(volume_0*((close_0-low_0)-(high_0-close_0))/(high_0-low_0),11,2)-ta_sma2(volume_0*((close_0-low_0)-(high_0-close_0))/(high_0-low_0),4,2)',
                 'alpha2=(rank(((high_0+low_0+close_0+open_0)/4-close_0))/rank(((high_0+low_0+close_0+open_0)/4+close_0)))',
                 'mean(mf_net_pct_s_0,5)',
                 'alpha3=((rank(rank(rank(decay_linear((-1*rank(rank(delta(close_0,10)))),10))))+rank((-1*delta(close_0,3))))+sign(scale(correlation(mean(volume_0,20),low_0,12))))',
                 'alpha4=(0-(1*((2*scale(rank((((close_0-low_0)-(high_0-close_0))/(high_0-low_0)*volume_0))))-scale(rank(ts_argmax(close_0,10))))))',
                 'alpha5=(rank(delay((high_0-low_0)/(sum(close_0,5)/5),2))*rank(rank(volume_0)))/((high_0-low_0)/(sum(close_0,5)/5))/(((high_0+low_0+open_0+close_0)*0.25)-close_0)',
                 ' (-1*delta((((close_0-low_0)-(high_0-close_0))/(high_0-low_0)),1))',
                 'ta_sma2((ts_max(high_0,6)-close_0)/(ts_max(high_0,6)-ts_min(low_0,6))*100,9,1)'
                 'std(volume_0,10)',
                 'mean(mf_net_amount_0,120)',
                 'ta_sma2((ts_max(high_0,6)-close_0)/(ts_max(high_0,6)-ts_min(low_0,6))*100,15,1)',
                    ' beta_sse50_90_0',
                 'ta_sma2((ts_max(high_0,6)-close_0)/(ts_max(high_0,6)-ts_min(low_0,6))*100,20,1)'
                 ,'mean(mf_net_amount_l_0,90)'
                 ,'beta_sse50_10_0'
                 ,'alpha6=rank((((high_0+low_0+open_0+close_0)*0.25)-close_0))/rank((((high_0+low_0+open_0+close_0)*0.25)+close_0))'
                 ,'alpha7=(ts_rank(volume_0/mean(volume_0,20),20)*ts_rank((-1*delta(close_0,7)),8))'
                 ,'alpha8=((rank(decay_linear(delta((open_0+close_0+high_0+low_0)/4,4),7))+ts_rank(decay_linear(((((low_0*0.9)+(low_0*0.1))-(high_0+low_0+open_0+close_0)/4)/(open_0-((high_0+low_0)/2))),11),7))*-1)'
                 ,'ta_adx_14_0'
                 ,'beta_gem_90_0'
                 ,'mean(mf_net_pct_l_0,180)'
                 ,'beta_csi300_10_0'
                 ,'ta_aroon_up_14_0'
                 ,'beta_csi300_5_0'
                 ,'rank_volatility_10_0'
                 ,'mean(mf_net_pct_l_0,120)'
                  ,'beta_sse50_120_0'
                 ,'ta_aroon_up_28_0'
                 ,'volatility_30_0'
                 ,'beta_sse50_180_0'
                 ,'rank_swing_volatility_60_0'
                 ,'beta_gem_120_0'
                 ,'beta_csi300_30_0'
                 ,'beta_csi300_30_0'
                 ,'rank_swing_volatility_120_0'
                 ,'volatility_60_0'
                 ,'beta_gem_180_0'
                 ,'(-1*rank(covariance(rank(high_0),rank(volume_0),5)))'
                ]
    batch_num = 1 # 多少20组,需要跑多少组策略100
    batch_factor = list()
    
    for i in range(batch_num):
        #random.seed(i)
        num = random.randint(1,3)
        
      
        temp = random.sample(factor_pool, num)
        sss = '''
      
        
        '''
        
        for v in temp:
            sss = sss + v + '''\n'''  
        s1 = '''{}'''.format(sss)
        
        batch_factor.append(s1)
                

    
    # 标准测试  349%
    standard=["""return_5/return_20
rank_amount_5
avg_turn_10
market_cap_float_0<280000000000
pe_ttm_0>0
pb_lf_0
sum(mf_net_pct_main_0>0.12,30)>11
fs_roa_ttm_0>5
fs_cash_ratio_0
close_0>ts_max(close_0,56)
ta_sma_10_0/ta_sma_30_0
ta_sar_0
swing_volatility_10_0/swing_volatility_60_0 
ta_cci_14_0
rank_return_3 
mf_net_amount_0>mf_net_amount_1 
mf_net_amount_xl_0>mean(mf_net_amount_xl_0, 30)
cond4= (close_0-close_1)/close_1 >0.05
(close_0-close_30)/close_30>1.25
(close_0-close_5)/close_5>1.16
list_days_0>365
ta_bbands_middleband_28_0 
cond28=sum(price_limit_status_0==3,80)>5
"""]
    param_grid["m3.features"] = standard
    return param_grid 
def m19_scoring_bigquant_run(result):
    # 评分:收益/最大回撤
    score = result.get('m40').read_raw_perf()['sharpe'].tail(1)[0]
    return {'score': score}
m19 = M.hyper_parameter_search.v1(
    param_grid_builder=m19_param_grid_builder_bigquant_run,
    scoring=m19_scoring_bigquant_run,
    search_algorithm='网格搜索',
    search_iterations=10,
    workers=1,
    worker_distributed_run=False,
    worker_silent=False,
    run_now=True,
    bq_graph=g
)
# m19 = M.hyper_run.v1(
#     run=m19_run_bigquant_run,
#     run_now=True,
#     bq_graph=g
# )
Fitting 1 folds for each of 1 candidates, totalling 1 fits
[Parallel(n_jobs=1)]: Using backend SequentialBackend with 1 concurrent workers.
[CV 1/1; 1/1] START m3.features=return_5/return_20
rank_amount_5
avg_turn_10
market_cap_float_0<280000000000
pe_ttm_0>0
pb_lf_0
sum(mf_net_pct_main_0>0.12,30)>11
fs_roa_ttm_0>5
fs_cash_ratio_0
close_0>ts_max(close_0,56)
ta_sma_10_0/ta_sma_30_0
ta_sar_0
swing_volatility_10_0/swing_volatility_60_0 
ta_cci_14_0
rank_return_3 
mf_net_amount_0>mf_net_amount_1 
mf_net_amount_xl_0>mean(mf_net_amount_xl_0, 30)
cond4= (close_0-close_1)/close_1 >0.05
(close_0-close_30)/close_30>1.25
(close_0-close_5)/close_5>1.16
list_days_0>365
ta_bbands_middleband_28_0 
cond28=sum(price_limit_status_0==3,80)>5

[CV 1/1; 1/1] END m3.features=return_5/return_20
rank_amount_5
avg_turn_10
market_cap_float_0<280000000000
pe_ttm_0>0
pb_lf_0
sum(mf_net_pct_main_0>0.12,30)>11
fs_roa_ttm_0>5
fs_cash_ratio_0
close_0>ts_max(close_0,56)
ta_sma_10_0/ta_sma_30_0
ta_sar_0
swing_volatility_10_0/swing_volatility_60_0 
ta_cci_14_0
rank_return_3 
mf_net_amount_0>mf_net_amount_1 
mf_net_amount_xl_0>mean(mf_net_amount_xl_0, 30)
cond4= (close_0-close_1)/close_1 >0.05
(close_0-close_30)/close_30>1.25
(close_0-close_5)/close_5>1.16
list_days_0>365
ta_bbands_middleband_28_0 
cond28=sum(price_limit_status_0==3,80)>5
; total time=   0.0s
[Parallel(n_jobs=1)]: Done   1 out of   1 | elapsed:    0.0s remaining:    0.0s
[Parallel(n_jobs=1)]: Done   1 out of   1 | elapsed:    0.0s finished
---------------------------------------------------------------------------
AttributeError                            Traceback (most recent call last)
<ipython-input-38-43135d00a15a> in <module>
    932     score = result.get('m40').read_raw_perf()['sharpe'].tail(1)[0]
    933     return {'score': score}
--> 934 m19 = M.hyper_parameter_search.v1(
    935     param_grid_builder=m19_param_grid_builder_bigquant_run,
    936     scoring=m19_scoring_bigquant_run,

AttributeError: 'int' object has no attribute 'startswith'