复制链接
克隆策略
In [1]:
import tensorflow as tf
gpus = tf.config.list_physical_devices("GPU")
tf.config.experimental.set_memory_growth(gpus[0], True)

    {"description":"实验创建于2022/9/6","graph":{"edges":[{"to_node_id":"-613:inputs","from_node_id":"-605:data"}],"nodes":[{"node_id":"-605","module_id":"BigQuantSpace.dl_layer_input.dl_layer_input-v1","parameters":[{"name":"shape","value":"5,98","type":"Literal","bound_global_parameter":null},{"name":"batch_shape","value":"","type":"Literal","bound_global_parameter":null},{"name":"dtype","value":"float32","type":"Literal","bound_global_parameter":null},{"name":"sparse","value":"False","type":"Literal","bound_global_parameter":null},{"name":"name","value":"","type":"Literal","bound_global_parameter":null}],"input_ports":[{"name":"inputs","node_id":"-605"}],"output_ports":[{"name":"data","node_id":"-605"}],"cacheable":false,"seq_num":1,"comment":"","comment_collapsed":true},{"node_id":"-613","module_id":"BigQuantSpace.dl_layer_lstm.dl_layer_lstm-v1","parameters":[{"name":"units","value":"64","type":"Literal","bound_global_parameter":null},{"name":"activation","value":"tanh","type":"Literal","bound_global_parameter":null},{"name":"user_activation","value":"","type":"Literal","bound_global_parameter":null},{"name":"recurrent_activation","value":"sigmoid","type":"Literal","bound_global_parameter":null},{"name":"user_recurrent_activation","value":"","type":"Literal","bound_global_parameter":null},{"name":"use_bias","value":"True","type":"Literal","bound_global_parameter":null},{"name":"kernel_initializer","value":"glorot_uniform","type":"Literal","bound_global_parameter":null},{"name":"user_kernel_initializer","value":"","type":"Literal","bound_global_parameter":null},{"name":"recurrent_initializer","value":"Orthogonal","type":"Literal","bound_global_parameter":null},{"name":"user_recurrent_initializer","value":"","type":"Literal","bound_global_parameter":null},{"name":"bias_initializer","value":"Zeros","type":"Literal","bound_global_parameter":null},{"name":"user_bias_initializer","value":"","type":"Literal","bound_global_parameter":null},{"name":"unit_forget_bias","value":"True","type":"Literal","bound_global_parameter":null},{"name":"kernel_regularizer","value":"None","type":"Literal","bound_global_parameter":null},{"name":"kernel_regularizer_l1","value":0,"type":"Literal","bound_global_parameter":null},{"name":"kernel_regularizer_l2","value":0,"type":"Literal","bound_global_parameter":null},{"name":"user_kernel_regularizer","value":"","type":"Literal","bound_global_parameter":null},{"name":"recurrent_regularizer","value":"None","type":"Literal","bound_global_parameter":null},{"name":"recurrent_regularizer_l1","value":0,"type":"Literal","bound_global_parameter":null},{"name":"recurrent_regularizer_l2","value":0,"type":"Literal","bound_global_parameter":null},{"name":"user_recurrent_regularizer","value":"","type":"Literal","bound_global_parameter":null},{"name":"bias_regularizer","value":"None","type":"Literal","bound_global_parameter":null},{"name":"bias_regularizer_l1","value":0,"type":"Literal","bound_global_parameter":null},{"name":"bias_regularizer_l2","value":0,"type":"Literal","bound_global_parameter":null},{"name":"user_bias_regularizer","value":"","type":"Literal","bound_global_parameter":null},{"name":"activity_regularizer","value":"None","type":"Literal","bound_global_parameter":null},{"name":"activity_regularizer_l1","value":0,"type":"Literal","bound_global_parameter":null},{"name":"activity_regularizer_l2","value":0,"type":"Literal","bound_global_parameter":null},{"name":"user_activity_regularizer","value":"","type":"Literal","bound_global_parameter":null},{"name":"kernel_constraint","value":"None","type":"Literal","bound_global_parameter":null},{"name":"user_kernel_constraint","value":"","type":"Literal","bound_global_parameter":null},{"name":"recurrent_constraint","value":"None","type":"Literal","bound_global_parameter":null},{"name":"user_recurrent_constraint","value":"","type":"Literal","bound_global_parameter":null},{"name":"bias_constraint","value":"None","type":"Literal","bound_global_parameter":null},{"name":"user_bias_constraint","value":"","type":"Literal","bound_global_parameter":null},{"name":"dropout","value":0,"type":"Literal","bound_global_parameter":null},{"name":"recurrent_dropout","value":0,"type":"Literal","bound_global_parameter":null},{"name":"return_sequences","value":"False","type":"Literal","bound_global_parameter":null},{"name":"implementation","value":"2","type":"Literal","bound_global_parameter":null},{"name":"name","value":"","type":"Literal","bound_global_parameter":null}],"input_ports":[{"name":"inputs","node_id":"-613"}],"output_ports":[{"name":"data","node_id":"-613"}],"cacheable":false,"seq_num":2,"comment":"","comment_collapsed":true}],"node_layout":"<node_postions><node_position Node='-605' Position='305.661865234375,97.67628479003906,200,200'/><node_position Node='-613' Position='306,197,200,200'/></node_postions>"},"nodes_readonly":false,"studio_version":"v2"}
    In [2]:
    # 本代码由可视化策略环境自动生成 2022年9月8日 09:59
    # 本代码单元只能在可视化模式下编辑。您也可以拷贝代码,粘贴到新建的代码单元或者策略,然后修改。
    
    
    m1 = M.dl_layer_input.v1(
        shape='5,98',
        batch_shape='',
        dtype='float32',
        sparse=False,
        name=''
    )
    
    m2 = M.dl_layer_lstm.v1(
        inputs=m1.data,
        units=64,
        activation='tanh',
        recurrent_activation='sigmoid',
        use_bias=True,
        kernel_initializer='glorot_uniform',
        recurrent_initializer='Orthogonal',
        bias_initializer='Zeros',
        unit_forget_bias=True,
        kernel_regularizer='None',
        kernel_regularizer_l1=0,
        kernel_regularizer_l2=0,
        recurrent_regularizer='None',
        recurrent_regularizer_l1=0,
        recurrent_regularizer_l2=0,
        bias_regularizer='None',
        bias_regularizer_l1=0,
        bias_regularizer_l2=0,
        activity_regularizer='None',
        activity_regularizer_l1=0,
        activity_regularizer_l2=0,
        kernel_constraint='None',
        recurrent_constraint='None',
        bias_constraint='None',
        dropout=0,
        recurrent_dropout=0,
        return_sequences=False,
        implementation='2',
        name=''
    )