复制链接
克隆策略

这个是改进后的score排序刷票 2023.918

结合实时人气表 做交集 用 板块强度前3板块的 主力买入 排序前15的股票做池子

买入人气靠前的股票

不按照人气排名来分类 因为发现 用rank排名 人气 效果很差,直接用人气指数值 效果会更好

这里不知道怎么读取个股热度指数的数据 类似于问财的 个股热度 开盘啦没有

这里叠加了 涨停 和溢价基因作为股性 ,用溢价>5%排序 其实感觉跟大单差不多,但是我感觉比单纯大单更好,这个有一个复盘了历时数据K线的功能,可以看到这个股票的股性如何,坑不坑人

有一定参考意义,

总结 这个版本实时刷票就是 大单+ 人气+股性 还缺一个五线谱 和K线形态

---- 去除了 st 并且做了一个遗漏表对照, 用csv存储 历史 刷票记录 ,并标记时间 防止丢失

In [ ]:
data_levle2[data_levle2['capital'].str.contains('吃|砸')]
In [ ]:
import numpy as np 
import requests
import time
import json
import pandas as pd
timestamp = int(time.time() * 1000)


headers= {
    "Accept": "*/*", 
    "Cookie":'v=AzKBM_yJg0Q6Gb94nm_QJ70Jh3IRwzZdaMcqgfwLXuXQj90t5FOGbThXep7P; hxmPid=sns_index_578803804.user.my.all.yes.show; IFUserCookieKey={"userid":"578803804","escapename":"mx_578803804"}; escapename=mx_578803804; ticket=c1607d10a99760a8b17b29c6384d7c93; u_name=mx_578803804; user=MDpteF81Nzg4MDM4MDQ6Ok5vbmU6NTAwOjU4ODgwMzgwNDo1LDEsNTs2LDEsNTs3LDExMTExMTExMTExMCw1OzgsMTExMTAxMTEwMDAwMTExMTEwMDEwMDEwMDEwMDAwMDAsNTszMywwMDAxMDAwMDAwMDAsNTszNiwxMDAxMTExMTAwMDAxMTAwMTAxMTExMTEsNTs0NiwwMDAwMTExMTEwMDAwMDExMTExMTExMTEsNTs1MSwxMTAwMDAwMDAwMDAwMDAwLDU7NTgsMDAwMDAwMDAwMDAwMDAwMDEsNTs3OCwxLDU7ODcsMDAwMDAwMDAwMDAwMDAwMDAwMDEwMDAwLDU7NDQsMTEsNDA7MSwxMDEsNDA7MiwxLDQwOzMsMSw0MDsxMDIsMSw0MDoxNjo6OjU3ODgwMzgwNDoxNjk1MTM0MjIxOjo6MTYxOTYxNjYwMDo1NTM3OTowOjFmYjMwNTk3YjU5NzhmNDI3ZDYxZjcxMTg3NGQyMjY0NDo6MQ%3D%3D; user_status=0; userid=578803804; utk=4a5aaf35f18c80270571f93e314ba42e',
    "User-Agent":"IHexin/11.30.81 (cn.com.10jqka.IHexin; build:11.30.82; iOS 17.0.0) Alamofire/5.4.0",
    "Accept-Language": "zh-Hans-CN;q=1.0, en-CN;q=0.9, zh-Hans;q=0.8", 
    "Accept-Encoding": "br;q=1.0, gzip;q=0.9, deflate;q=0.8",  
    "Connection":"keep-alive"
    }

data_levle2 = pd.DataFrame()
random_sleep = np.abs(np.random.normal(loc=0.6, scale=0.8, size=(1, 5)))[0]
for i  in range(5):
    url = 'https://dq.10jqka.com.cn/fuyao/b2cweb_l2/l2/v1/l2/dde/stock/monitor?_=%s&page_num={}&page_size=20&reqLockKey=reqMonitorApi'%timestamp
    response = requests.get(url.format(i,),headers=headers)
    rst = pd.DataFrame(json.loads(response.text)['data']['list'])
    #print(random_sleep[i])
    #print(rst)
    
    data_levle2 = pd.concat([data_levle2, rst], ignore_index=True)
    time.sleep(random_sleep[i])
data_levle2
#for i in range(0,5,1):
#     response = requests.get(url,headers=headers)
#     rst = pd.DataFrame(json.loads(response.text)['data']['list'])
#     rst
In [2]:
import datetime
import time
import requests
import numpy as np
import pandas as pd
from pyecharts.charts import *
from pyecharts import options as opts
import json 
def get_dde_hq(stock_id,src='kpl'):
    try:
        if src=='ths':
            headers= {
        "Accept": "*/*", 
        "Cookie":'v=AzKBM_yJg0Q6Gb94nm_QJ70Jh3IRwzZdaMcqgfwLXuXQj90t5FOGbThXep7P; hxmPid=sns_index_578803804.user.my.all.yes.show; IFUserCookieKey={"userid":"578803804","escapename":"mx_578803804"}; escapename=mx_578803804; ticket=c1607d10a99760a8b17b29c6384d7c93; u_name=mx_578803804; user=MDpteF81Nzg4MDM4MDQ6Ok5vbmU6NTAwOjU4ODgwMzgwNDo1LDEsNTs2LDEsNTs3LDExMTExMTExMTExMCw1OzgsMTExMTAxMTEwMDAwMTExMTEwMDEwMDEwMDEwMDAwMDAsNTszMywwMDAxMDAwMDAwMDAsNTszNiwxMDAxMTExMTAwMDAxMTAwMTAxMTExMTEsNTs0NiwwMDAwMTExMTEwMDAwMDExMTExMTExMTEsNTs1MSwxMTAwMDAwMDAwMDAwMDAwLDU7NTgsMDAwMDAwMDAwMDAwMDAwMDEsNTs3OCwxLDU7ODcsMDAwMDAwMDAwMDAwMDAwMDAwMDEwMDAwLDU7NDQsMTEsNDA7MSwxMDEsNDA7MiwxLDQwOzMsMSw0MDsxMDIsMSw0MDoxNjo6OjU3ODgwMzgwNDoxNjk1MTM0MjIxOjo6MTYxOTYxNjYwMDo1NTM3OTowOjFmYjMwNTk3YjU5NzhmNDI3ZDYxZjcxMTg3NGQyMjY0NDo6MQ%3D%3D; user_status=0; userid=578803804; utk=4a5aaf35f18c80270571f93e314ba42e',
        "User-Agent":"IHexin/11.30.81 (cn.com.10jqka.IHexin; build:11.30.82; iOS 17.0.0) Alamofire/5.4.0",
        "Accept-Language": "zh-Hans-CN;q=1.0, en-CN;q=0.9, zh-Hans;q=0.8", 
        "Accept-Encoding": "br;q=1.0, gzip;q=0.9, deflate;q=0.8",  
        "Connection":"keep-alive"
        }

            data_levle2 = pd.DataFrame()
            random_sleep = np.abs(np.random.normal(loc=0.6, scale=0.8, size=(1, 5)))[0]
            timestamp = str(int(time.time() * 1000))
            url ='https://vaserviece.10jqka.com.cn/Level2/index.php?op=mainMonitorDetail&_={}&stockcode={}&userid=578803804'.format(timestamp,stock_id)
            response = requests.get(url,headers=headers)
            rst = pd.DataFrame(json.loads(response.text)['list'])
            bs_map = {'1':1,'2':-1}
            rst['成交量'] = rst['volume'].apply(lambda x:int(x.replace('手','00')))
            rst = rst[['tradetype','成交量','money','avgprice','ctime']]
            rst.columns = ['买卖类型','成交量','成交金额','价格','时间']
        elif src=='kpl':
            #第一步先获取实时大单条数
            u='https://apphq.longhuvip.com/w1/api/index.php?Order=0&st=20&a=GetMainMonitor_w30&c=StockYiDongKanPan&PhoneOSNew=1&DeviceID=00000000-296c-20ad-0000-00003eb74e84&VerSion=5.7.0.12&Token=4e7fa8458a2add3f14a50ca79e863772&Index=0&Money=2&apiv=w31&StockID=%s&UserID=1973778&IsBS=0&'%stock_id
            rst = requests.get(u).text
            rst = json.loads(rst)
            #第二步,获取当前所有大单数据
            u='https://apphq.longhuvip.com/w1/api/index.php?Order=0&st=%s&a=GetMainMonitor_w30&c=StockYiDongKanPan&PhoneOSNew=1&DeviceID=00000000-296c-20ad-0000-00003eb74e84&VerSion=5.7.0.12&Token=4e7fa8458a2add3f14a50ca79e863772&Index=0&Money=2&apiv=w31&StockID=%s&UserID=1973778&IsBS=0&'%(rst['Total'],stock_id)
            rst = requests.get(u).text
            rst = json.loads(rst)
            # --------大单这里记得要跟下面的   longhu_ss_data = pd.DataFrame(np.array(longhu_ss_data['trend'])[:,:5]).head(2) 是9:31分 相统一 不然会出现未来数据
            rst = pd.DataFrame(rst['List'])
            bs_map = {'1':-1,'2':1,'3':1,'4':-1}
            rst.columns = ['买卖类型','时间戳','成交量','成交金额','价格','时间']
            
        rst['成交金额'] = rst['成交金额'].astype(int)
        rst['成交量'] = rst['成交量'].astype(int)
        rst['价格'] = rst['价格'].astype(float)
        rst['买卖类型'] = rst['买卖类型'].map(bs_map)
        rst['分钟大单净额'] = rst['成交金额']*rst['买卖类型']
        rst['成交量'] = rst['成交量']*rst['买卖类型']
        rst['分钟'] = rst['时间'].apply(lambda x:int(str(x)[-8:-3].replace(':','')))
        rst_buy = rst[rst['买卖类型']==1]
        rst_sell = rst[rst['买卖类型']==-1]

        rst_buyall= rst_buy.groupby('分钟').agg({'买卖类型':'sum','成交金额':'sum','成交量':'sum'}).reset_index()
        rst_sellall = rst_sell.groupby('分钟').agg({'买卖类型':'sum','成交金额':'sum','成交量':'sum'}).reset_index()
        rst = pd.merge(rst_buyall,rst_sellall,on='分钟',how='outer')
        rst = rst.fillna(0)
        rst.columns = ['时间','分钟大单买笔数','分钟大单买额','分钟大单买量','分钟大单卖笔数','分钟大单卖额','分钟大单卖量']
        rst['分钟净流笔数'] = rst['分钟大单买笔数']-rst['分钟大单卖笔数']
        rst['分钟净流金额'] = rst['分钟大单买额']-rst['分钟大单卖额']

        rst_buy300 = rst_buy[rst_buy['成交金额']>=3e6].groupby('分钟').agg({'买卖类型':'sum','成交金额':'sum','成交量':'sum'}).reset_index()
        rst_sell300 = rst_sell[rst_sell['成交金额']>=3e6].groupby('分钟').agg({'买卖类型':'sum','成交金额':'sum','成交量':'sum'}).reset_index()
        rst300 = pd.merge(rst_buy300,rst_sell300,on='分钟',how='outer')
        rst300 = rst300.fillna(0)
        rst300.columns = ['时间','分钟大单买笔数','分钟大单买额','分钟大单买量','分钟大单卖笔数','分钟大单卖额','分钟大单卖量']
        rst300['分钟净流金额'] = rst300['分钟大单买额']-rst300['分钟大单卖额']
        rst300 = rst300[['时间','分钟净流金额']]
        rst300.columns = ['时间','分钟300w净额']
        rst = pd.merge(rst,rst300,on='时间',how='outer')

        rst_buy1000 = rst_buy[rst_buy['成交金额']>=1e7].groupby('分钟').agg({'买卖类型':'sum','成交金额':'sum','成交量':'sum'}).reset_index()
        rst_sell1000 = rst_sell[rst_sell['成交金额']>=1e7].groupby('分钟').agg({'买卖类型':'sum','成交金额':'sum','成交量':'sum'}).reset_index()
        rst1000 = pd.merge(rst_buy1000,rst_sell1000,on='分钟',how='outer')
        rst1000 = rst1000.fillna(0)
        rst1000.columns = ['时间','分钟大单买笔数','分钟大单买额','分钟大单买量','分钟大单卖笔数','分钟大单卖额','分钟大单卖量']
        rst1000['分钟净流金额'] = rst1000['分钟大单买额']-rst1000['分钟大单卖额']
        rst1000 = rst1000[['时间','分钟净流金额']]
        rst1000.columns = ['时间','分钟1000w净额']
        rst = pd.merge(rst,rst1000,on='时间',how='outer')
        rst = rst.fillna(0)
        
        df = rst.iloc[:,1:].cumsum()
        df.columns = ['累计大单买笔数','累计大单买额','累计大单买量','累计大单卖笔数','累计大单卖额','累计大单卖量','累计大单净笔数','累计大单净额','累计300w净额','累计1000w净额']
        df['股票代码'] = stock_id 
        df['时间'] = rst['时间']
        df = pd.merge(df,rst,on=['时间'])
        return df.tail(1)
    except:
        cols = ['累计大单买笔数','累计大单买额','累计大单买量','累计大单卖笔数','累计大单卖额','累计大单卖量','累计大单净笔数','累计大单净额','累计300w净额','累计1000w净额','股票代码','时间','分钟大单买笔数','分钟大单买额','分钟大单买量','分钟大单卖笔数','分钟大单卖额','分钟大单卖量','分钟净流笔数','分钟净流金额', '分钟300w净额','分钟1000w净额']
        df = pd.DataFrame(columns=cols)
        return df.tail(1)

--这里是增加了 板块强度过滤的版本

In [5]:
import datetime
from joblib import Parallel,delayed
date_end = datetime.datetime.now().strftime("%Y-%m-%d")
print(datetime.datetime.now())

longhu_ss_api = 'https://apphq.longhuvip.com/w1/api/index.php?&Index=0&Order=1&PhoneOSNew=2&Token=4e7fa8458a2add3f14a50ca79e863772&Type=4&UserID=1973778&VerSion=5.7.0.12&a=KanPanNew&apiv=w31&c=YiDongKanPan&st=200'#.format(date)
longhu_ss_data = json.loads(requests.get(longhu_ss_api).text)

# 提取数据并转换为DataFrame
trend_data = longhu_ss_data['List'] # columns=['date', 'hu_gutong_input', 'shen_gutong_input', 'beixiang_cashflow']
longhu_ss_data = pd.DataFrame(trend_data,)
longhu_ss_data = longhu_ss_data.rename(columns={'stock_code': '股票代码', 'stock_name': '股票名称', 'MIcon': '融1/非融0','ZJJE': '主力净流入', 'jiage': '价格', 'zhangfu': '涨幅', 'index': '异动', 'plate': '板块',}, inplace=False)
longhu_ss_data = longhu_ss_data[['股票代码', '股票名称','涨幅', '融1/非融0', '主力净流入','异动', '板块',  '价格', ]]    
#longhu_ss_data=longhu_ss_data[(longhu_ss_data['异动']>=170)&(longhu_ss_data['异动']<=255)]  #index是异动
longhu_ss_data=longhu_ss_data.sort_values(by=['主力净流入','异动',], ascending=[False,False,])#.head(30)
stock_list_yidong=longhu_ss_data['股票代码'].tolist()

#---今天的日期
sorted_dates= date_end #today_date
#这记录要画图的数据
huatu_data_dict= {}
dates = sorted_dates #pd.date_range(start=sentence_day_1, end=sentence_day_2, freq='D')
data = pd.DataFrame()
# 自定义函数来处理股票代码一列的字符串  这里修改位bigquant正确的格式
def add_suffix(code):
    if code.startswith('0') or code.startswith('3'):
        return code + '.SZA'
    elif code.startswith('6'):
        return code + '.SHA'

    elif code.startswith('8'):
        return code + '.BJA'
    else:
        return code
# 显示所有列
pd.set_option('display.max_columns', None)
#---这里是循环遍历功能 用来采集数据

longhu_ss_api = 'https://apphq.longhuvip.com/w1/api/index.php?Order=1&a=RealRankingInfo&st=60&apiv=w26&Type=1&c=ZhiShuRanking&PhoneOSNew=1&DeviceID=20ad85ca-becb-3bed-b3d4-30032a0f5923&Index=0&ZSType=7'#.format(date)

response = requests.get(longhu_ss_api)
#--检查一下 爬取数据的 返回的状态时候为空
if response.status_code == 200:
    try:
        longhu_ss_data = json.loads(requests.get(longhu_ss_api).text)
    except:
        print('数据为空1')
    # 判断字典是否为空
    if not longhu_ss_data['list']:
        # 字典为空,跳过代码块
        print(date,"数据为空2")

#     longhu_ss_data = pd.DataFrame(np.array(longhu_ss_data['list'])[:,:3])
#     longhu_ss_cols = ['板块代码','板块名称','板块强度',]
#     longhu_ss_data

#     longhu_ss_data.columns = longhu_ss_cols

#     longhu_ss_data['板块强度'] = longhu_ss_data['板块强度'].astype(float)
#     longhu_ss_data = longhu_ss_data.sort_values(by='板块强度', ascending=False).head(10)
    

    longhu_ss_data_1=longhu_ss_data
    longhu_ss_data_1 = pd.DataFrame(np.array(longhu_ss_data_1['list'])[:,:7])
    longhu_ss_cols_1 = ['板块代码','板块名称','板块强度','板块涨幅','板块涨速','区间成交','主力买入']
    longhu_ss_data_1.columns = longhu_ss_cols_1

    longhu_ss_data_1['板块强度'] = longhu_ss_data_1['板块强度'].astype(float)
    longhu_ss_data_1['板块涨幅'] = longhu_ss_data_1['板块涨幅'].astype(float)
    longhu_ss_data_1['区间成交'] = longhu_ss_data_1['区间成交'].astype(float)
    longhu_ss_data_1['主力买入'] = longhu_ss_data_1['主力买入'].astype(float)
    
    longhu_ss_data_1['主力占比']=longhu_ss_data_1['主力买入']/longhu_ss_data_1['区间成交']
    longhu_ss_data_1['板块涨速'] = longhu_ss_data_1['板块涨速'].astype(float)
    
    
    
    df=longhu_ss_data_1
#     df=df[(df['区间成交']<=5*10000000000)&(df['区间成交']>=9.15*1000000)&(df['板块强度']>=-50)&(df['板块强度']<=5406)&(df['主力占比']<=16)&(df['主力占比']>=-13)&(df['主力买入']>=-6*100000000)&(df['主力买入']<=3.8*100000000)]  #这个可以有

    #df=df[(df['区间成交']<=4.99*10000000000)&(df['区间成交']>=9.15*1000000)&(df['主力占比']<=7.75)&(df['主力占比']>=-12.1)&(df['主力买入']>=-6.2*100000000)&(df['主力买入']<=2.72*100000000)] 

#     df=df[(df['区间成交']<=5*10000000000)&(df['区间成交']>=9.15*1000000)&(df['板块强度']>=150)&(df['板块强度']<=650)&(df['主力占比']<=16)&(df['主力占比']>=-13)&(df['主力买入']>=-6*100000000)&(df['主力买入']<=3.8*100000000)] #&(df['板块强度']>=-37)&(df['板块强度']<=2882)&
    longhu_ss_data_1=df
    #longhu_ss_data_1 = longhu_ss_data_1[(longhu_ss_data_1['板块涨速']>=-1)&(longhu_ss_data_1['板块涨速']<=5)] #&(longhu_ss_data_1['板块强度']<=1150)&(longhu_ss_data_1['板块强度']>=50)
    longhu_ss_data_1 = longhu_ss_data_1.sort_values(by='板块强度', ascending=False).head(10)    
    
    
    
    
    
    
    
    
    
    
    bankuai_list=longhu_ss_data_1['板块代码'].tolist()
    
    del_blk = ['801314','801071','801027']
    bankuai_list = [blk for blk in bankuai_list if blk not in del_blk][:5]
    print( longhu_ss_data_1[['板块名称','板块强度']])
    #bankuai_list
    today_stock_list =pd.DataFrame()  # 使用df来存储唯一的股票代码
    checked_stocks = []
    #---这里是板块代码 循环抽取
    print(bankuai_list)
    for i in bankuai_list:
        longhu_ss_api = 'https://apphq.longhuvip.com/w1/api/index.php?st=30&Index=0&old=1&IsKZZType=0&Order=1&PhoneOSNew=1&PlateID={}&Token=4e7fa8458a2add3f14a50ca79e863772&a=ZhiShuStockList_W8&UserID=1973778&apiv=w31&Type=6&c=ZhiShuRanking&VerSion=5.7.0.12&DeviceID=00000000-296c-20ad-0000-00003eb74e84&IsZZ=0&'.format(i)
        longhu_ss_data = json.loads(requests.get(longhu_ss_api).text)
        longhu_ss_data = pd.DataFrame(np.array(longhu_ss_data['list'])[:,:25])#   51   20   20是区间涨幅  23连扳数 24 龙头标记  #11是主力买 12 是主力卖  13 
        longhu_ss_data = longhu_ss_data.rename(columns={0: '股票代码', 1: '股票名称', 2: '类型', 4: '概念', 5: '价格', 6: '涨幅', 9: '涨速', 19: '净流入占比',20:'区间涨幅',23:'连扳空间',24:'龙头标记',11:'主力买',13:'区间净额',}, inplace=False)
        longhu_ss_data = longhu_ss_data[['股票代码', '股票名称','涨幅', '涨速', '净流入占比','类型', '概念',  '价格', '区间涨幅','连扳空间','龙头标记','主力买',]]            

        #longhu_ss_data = longhu_ss_data[~longhu_ss_data['龙头标记'].str.contains('破板')]
        longhu_ss_data = longhu_ss_data[~longhu_ss_data['股票名称'].str.contains('ST')]
        #longhu_ss_data = longhu_ss_data[longhu_ss_data['龙头标记'].str.contains('龙一|龙二|龙三|龙四|龙五')]            

        #--只取板块强度前3的 龙 123   用涨速或者涨幅排序
        longhu_ss_data=longhu_ss_data.sort_values(by='主力买',ascending=False)#.head(20)
        longhu_ss_data=longhu_ss_data#.head(20)

        bankuai_stock_list=longhu_ss_data['股票代码'].tolist()
        #---这里循环把 强势板块中的 强势股票的  大单爬取出来 并且合并
        print(bankuai_stock_list)
        #---合并大单的数据和当天  龙头股票的数据    
        # 使用apply方法调用自定义函数处理股票代码一列的字符串
        longhu_ss_data['instrument']=longhu_ss_data['股票代码']     
        longhu_ss_data['instrument'] = longhu_ss_data['股票代码'].apply(add_suffix)
        intersected_df = longhu_ss_data

        today_stock_list = pd.concat([today_stock_list, intersected_df], ignore_index=True)   
        today_stock_list = today_stock_list.drop_duplicates(subset=['股票代码']) 
        #time.sleep(0.3)  # 停顿2秒钟 
    dde_results = Parallel(n_jobs=-1, backend='threading')(delayed(get_dde_hq)(stock,'kpl') for stock in list(set(stock_list_yidong)&set(today_stock_list['股票代码'])))
    data_2 = pd.concat(dde_results)
    today_stock_list=pd.merge(today_stock_list ,data_2,on=['股票代码'],)
else:
    print('数据有错误3')

#---  特征1:'净流入占比'   特征2:'净流笔数' 特征3:'净流入金额'  #  '涨速', '净流笔数',
#today_stock_list['净流入占比']=today_stock_list['净流入占比'].rank(pct=True)*100
#today_stock_list['净流金额']=today_stock_list['净流金额'].rank(pct=True)*100
#today_stock_list['净流笔数']=today_stock_list['净流笔数'].rank(pct=True)*100
data=today_stock_list[['股票代码','股票名称','涨幅','净流入占比','主力买','累计大单净额','累计300w净额','累计1000w净额','连扳空间','龙头标记','概念','累计大单买笔数', '累计大单净笔数']]

#---这里人气榜  实时  对 所选板块强度股票跟  人气榜取交集
longhu_ss_api = 'https://apphq.longhuvip.com/w1/api/index.php?Order=1&a=GetHotPHB&st=200&apiv=w29&Type=1&c=StockBidYiDong&PhoneOSNew=1&DeviceID=20ad85ca-becb-3bed-b3d4-30032a0f5923'#.format(i)
longhu_ss_data = json.loads(requests.get(longhu_ss_api).text)
longhu_ss_data = pd.DataFrame(np.array(longhu_ss_data['List'])[:,:7])#20   20是区间涨幅  23连扳数 24 龙头标记  #11是主力买 12 是主力卖  13 是区间净额 26是换手 21 是量比  50 300w大单净额
longhu_ss_data = longhu_ss_data.rename(columns={0: '股票代码', 1: '股票名称', 2: '涨幅', 3: '排名变化', 4: '人气排名', 5: '人气激增', 6: '持续上榜',}, inplace=False)       


data= data.merge(longhu_ss_data ,on=['股票代码','股票名称',],how='left')

#print(data)
#---这里找到交集股票池子
stock_list_yidong=data['股票代码'].tolist()
duokongbi_list = pd.DataFrame( )

for i in stock_list_yidong:
    try:
        url = "https://apphq.longhuvip.com/w1/api/index.php?PhoneOSNew=2&StockID={}&Time=&VerSion=5.7.0.12&a=GetStockBsvolumeIncremental&apiv=w31&c=StockL2Data".format(i)
        rst=pd.DataFrame(json.loads(requests.get(url).text)['bsentrust'])#.head(5)
        rst.columns =['时间','总买量','总卖量','总买额','总卖额']
        rst['股票代码']=i
#         rst['总买额']=rst['总买额'].astype(float)
#         rst['总卖额']=rst['总卖额'].astype(float)
#         rst['委买差']=(rst['总买额']-rst['总卖额'])#/rst['总卖额']
#         rst['多空比']=rst['委买差'].cumsum()


        rst['总买额']=rst['总买额'].astype(float)
        rst['总卖额']=rst['总卖额'].astype(float)

        rst['总买量']=rst['总买量'].astype(float)
        rst['总卖量']=rst['总卖量'].astype(float)    


        rst['委买差']=rst['总买额']-rst['总卖额']


        rst['委买差额']=(rst['总买额']-rst['总卖额'])/rst['总卖额']
        rst['委买差量']=(rst['总买量']-rst['总卖量'])/rst['总卖量']
        rst['多空额比']=rst['委买差额'].cumsum()
        rst['多空量比']=rst['委买差量'].cumsum()

        # 创建加速度变量
        acceleration_1 = np.gradient(rst['委买差额'])  # 使用numpy库的gradient函数来计算加速度
        acceleration_2 = np.gradient(rst['委买差量']) 
        # 将加速度添加到数据帧中
        rst['加速度_1'] = acceleration_1 
        rst['加速度_2'] = acceleration_2     
            
        
        
        
        
        
        
        
        
        #合并大单数据
        #df_dadan=get_dde_hq(i,src='kpl')
        #now_mintune = int(rst['时间'].iloc[-1].replace(':',''))
        #df_dadan_tail = df_dadan[df_dadan['时间']<=now_mintune].tail(1)
        #rst = pd.merge(rst,df_dadan_tail,on='股票代码',how='left')
        duokongbi_list = pd.concat([duokongbi_list, rst.tail(1)], ignore_index=True)
    except:
        pass

data= data.merge(duokongbi_list ,on=['股票代码',],how='left')
#data=data[(data['累计300w净额']>0)&(data['累计大单净笔数']>-39)&(data['累计1000w净额']>=0)&(data['委买差']>=-5.381851e+08)]     #(data['多空比']>0)&
data=data[(data['累计大单净额']>8*1000000)] #&(data['累计1000w净额']>8*1000000)&(data['累计1000w净额']<=8*1000000)
#stock_list_total=stock_list_total[(stock_list_total["委买差"]>0)&(stock_list_total["红绿差"]>=-1)&(stock_list_total["累计主力净流入"]>=8*1000000  )]
print(datetime.datetime.now())
data=data.sort_values(by=['加速度_1','加速度_2',], ascending=[False,False,]).head(10)  # '累计大单买笔数', #累计大单净笔数
#data=data.sort_values(by=['委买差','主力买',], ascending=[False,False,])#.head(20)  # '累计大单买笔数', #累计大单净笔数
data=data[['股票代码','股票名称','涨幅_x','委买差额','多空量比','加速度_1','加速度_2','委买差','净流入占比','累计大单净额','累计300w净额','累计1000w净额','连扳空间','龙头标记','概念','主力买','时间','人气排名','累计大单买笔数', '累计大单净笔数']] # '溢价5%次数', '涨停次数','次日红盘率','连扳率',
#record_stock_list =pd.concat([record_stock_list , data], axis=0)
#--原来的表  ---跟现在的合并  显示 时间  这样如果有 一开始早盘显示 午盘消失,后面不见的 也可以显示在表中。
data
2023-11-09 00:13:51.373916
    板块名称    板块强度
0   文化传媒  6438.0
1   华为概念  6378.0
2     芯片  5736.0
3  汽车零部件  3476.0
4   人工智能  2854.0
5     医药  2760.0
6     游戏  2472.0
7   消费电子  1928.0
8   数字经济  1353.0
9    地产链  1222.0
['801031', '801218', '801001', '801199', '801085']
['605577', '603533', '300459', '002657', '300063', '603630', '002291', '300133', '002712', '002137', '601928', '002995', '002354', '002517', '603729', '002238', '300571', '300364', '301262', '301052', '601595', '300426', '000892', '300654', '000802', '601599', '300528', '601900', '002181', '300770']
['688258', '300047', '603533', '300494', '300459', '002657', '300063', '300507', '002855', '603158', '000829', '688327', '000925', '300052', '688229', '600071', '300608', '600839', '600960', '603037', '600733', '300364', '300277', '301489', '300735', '002222', '000034', '688383', '301236', '300826']
['688258', '603388', '000056', '002046', '301297', '688102', '688361', '600520', '600641', '300042', '603991', '000925', '603306', '600071', '688072', '002137', '300031', '300554', '002584', '300287', '688037', '300260', '300567', '002222', '301236', '300045', '603726', '688256', '600246', '300429']
['688123', '605258', '300680', '688533', '603586', '300731', '603297', '300507', '688326', '603158', '605555', '605255', '603178', '603306', '300322', '300572', '600960', '002988', '603037', '600733', '605218', '301489', '603390', '300735', '002931', '002222', '300177', '300684', '301488', '603768']
['688095', '300315', '688258', '300047', '603357', '603533', '688102', '002657', '300063', '603203', '000829', '688327', '301208', '000925', '603003', '002712', '300608', '002995', '600839', '002238', '300364', '301262', '301052', '300277', '603390', '000892', '301236', '300045', '688256']
2023-11-09 00:14:25.818966
Out[5]:
股票代码 股票名称 涨幅_x 委买差额 多空量比 加速度_1 加速度_2 委买差 净流入占比 累计大单净额 累计300w净额 累计1000w净额 连扳空间 龙头标记 概念 主力买 时间 人气排名 累计大单买笔数 累计大单净笔数
41 301297 富乐德 20.01 0.747517 -0.103436 1.017780 1.084915 12931061.0 12.42 336136156.0 242564157.0 0.0 首板 龙四 光刻机、芯片 668749315 09:34 NaN 160.0 237.0
32 603037 凯众股份 10.01 -0.327343 -1.416412 0.221366 0.246047 -11333830.0 0.64 48729277.0 61123350.0 92876869.0 3连板 龙二 华为汽车、华为概念 237807829 09:34 115 31.0 68.0
15 301052 果麦文化 12.27 -0.343879 0.459686 0.218628 0.232840 -6409752.0 2.48 53180101.0 39095329.0 11492392.0 龙八 文化传媒、人工智能 219412629 09:34 NaN 47.0 76.0
10 002354 天娱数科 5.05 -0.086337 -1.013432 0.168058 0.178666 -4037831.0 0.36 41455136.0 19745412.0 0.0 虚拟人、短剧 303079194 09:34 80 73.0 134.0
26 603158 腾龙股份 10.02 -0.045733 0.899553 0.130846 0.139993 -4033078.0 3.05 160785762.0 121184783.0 89255999.0 3天2板 龙三 华为汽车、新能源汽车 503001491 09:34 12 87.0 141.0
6 002712 思美传媒 9.86 -0.367227 -2.399728 0.123352 0.132754 -8756173.0 2.7 39921460.0 10056485.0 0.0 昨日首板 破板 文化传媒、互动真人 352773949 09:34 NaN 73.0 123.0
1 603533 掌阅科技 9.98 -0.061399 0.423347 0.120074 0.138464 -2860147.0 3.92 155496150.0 76248566.0 15112531.0 3天2板 龙四 短剧、网红经济 687178699 09:34 61 159.0 259.0
56 300315 掌趣科技 14.38 -0.360813 -1.683747 0.108291 0.109890 -24155210.0 1.16 101391314.0 0.0 0.0 游戏、AIGC 911415080 09:34 97 250.0 434.0
34 301489 思泉新材 12.93 -0.250843 -0.426038 0.090110 0.109394 -5235562.0 3.29 19750764.0 10534167.0 0.0 龙七 汽车零部件、华为概念 202284274 09:34 NaN 33.0 57.0
46 300045 华力创通 6.89 -0.563132 -2.775636 0.073807 0.080740 -109560323.0 0.53 157215677.0 85286142.0 0.0 卫星导航、人工智能 1353702673 09:34 43 272.0 500.0
In [ ]:
import datetime
from joblib import Parallel,delayed
today_date=['2023-10-27']#datetime.datetime.now().strftime("%Y-%m-%d ")
print(today_date)
print(datetime.datetime.now())

longhu_ss_api = 'https://apphq.longhuvip.com/w1/api/index.php?&Index=0&Order=1&PhoneOSNew=2&Token=4e7fa8458a2add3f14a50ca79e863772&Type=4&UserID=1973778&VerSion=5.7.0.12&a=KanPanNew&apiv=w31&c=YiDongKanPan&st=200'#.format(date)
longhu_ss_data = json.loads(requests.get(longhu_ss_api).text)

# 提取数据并转换为DataFrame
trend_data = longhu_ss_data['List'] # columns=['date', 'hu_gutong_input', 'shen_gutong_input', 'beixiang_cashflow']
longhu_ss_data = pd.DataFrame(trend_data,)
longhu_ss_data = longhu_ss_data.rename(columns={'stock_code': '股票代码', 'stock_name': '股票名称', 'MIcon': '融1/非融0','ZJJE': '主力净流入', 'jiage': '价格', 'zhangfu': '涨幅', 'index': '异动', 'plate': '板块',}, inplace=False)
longhu_ss_data = longhu_ss_data[['股票代码', '股票名称','涨幅', '融1/非融0', '主力净流入','异动', '板块',  '价格', ]]    
#longhu_ss_data=longhu_ss_data[(longhu_ss_data['异动']>=170)&(longhu_ss_data['异动']<=255)]  #index是异动
longhu_ss_data=longhu_ss_data.sort_values(by=['主力净流入','异动',], ascending=[False,False,])#.head(30)
stock_list_yidong=longhu_ss_data['股票代码'].tolist()

#---今天的日期
sorted_dates=today_date
#这记录要画图的数据
huatu_data_dict= {}
dates = sorted_dates #pd.date_range(start=sentence_day_1, end=sentence_day_2, freq='D')
data = pd.DataFrame()
# 自定义函数来处理股票代码一列的字符串  这里修改位bigquant正确的格式
def add_suffix(code):
    if code.startswith('0') or code.startswith('3'):
        return code + '.SZA'
    elif code.startswith('6'):
        return code + '.SHA'

    elif code.startswith('8'):
        return code + '.BJA'
    else:
        return code
# 显示所有列
pd.set_option('display.max_columns', None)
#---这里是循环遍历功能 用来采集数据
for date in dates:

    #date=date.strftime('%Y-%m-%d')
    print(date)
    longhu_ss_api = 'https://apphq.longhuvip.com/w1/api/index.php?Order=1&a=RealRankingInfo&st=60&apiv=w26&Type=1&c=ZhiShuRanking&PhoneOSNew=1&DeviceID=20ad85ca-becb-3bed-b3d4-30032a0f5923&Index=0&ZSType=7'#.format(date)
    
    response = requests.get(longhu_ss_api)
    #--检查一下 爬取数据的 返回的状态时候为空
    if response.status_code == 200:
        try:
            longhu_ss_data = json.loads(requests.get(longhu_ss_api).text)
        except:
            print('数据为空1')
            continue
        #print(longhu_ss_data)
        # 判断字典是否为空
        if not longhu_ss_data['list']:
            # 字典为空,跳过代码块
            print(date,"数据为空2")
            continue
        
        longhu_ss_data = pd.DataFrame(np.array(longhu_ss_data['list'])[:,:3])
        longhu_ss_cols = ['板块代码','板块名称','板块强度',]
        longhu_ss_data

        longhu_ss_data.columns = longhu_ss_cols

        longhu_ss_data['板块强度'] = longhu_ss_data['板块强度'].astype(float)
        longhu_ss_data = longhu_ss_data.sort_values(by='板块强度', ascending=False).head(4)
        bankuai_list=longhu_ss_data['板块代码'].tolist()
        print( longhu_ss_data[['板块名称','板块强度']])
        #bankuai_list
        today_stock_list =pd.DataFrame()  # 使用df来存储唯一的股票代码
        checked_stocks = []
        #---这里是板块代码 循环抽取
        print(bankuai_list)
        for i in bankuai_list:
            longhu_ss_api = 'https://apphq.longhuvip.com/w1/api/index.php?st=30&Index=0&old=1&IsKZZType=0&Order=1&PhoneOSNew=1&PlateID={}&Token=4e7fa8458a2add3f14a50ca79e863772&a=ZhiShuStockList_W8&UserID=1973778&apiv=w31&Type=6&c=ZhiShuRanking&VerSion=5.7.0.12&DeviceID=00000000-296c-20ad-0000-00003eb74e84&IsZZ=0&'.format(i)
            longhu_ss_data = json.loads(requests.get(longhu_ss_api).text)
            longhu_ss_data = pd.DataFrame(np.array(longhu_ss_data['list'])[:,:51])#20   20是区间涨幅  23连扳数 24 龙头标记  #11是主力买 12 是主力卖  13 
            longhu_ss_data = longhu_ss_data.rename(columns={0: '股票代码', 1: '股票名称', 2: '类型', 4: '概念', 5: '价格', 6: '涨幅', 9: '涨速', 19: '净流入占比',20:'区间涨幅',23:'连扳空间',24:'龙头标记',11:'主力买',13:'区间净额',50:'300w大单净额',25:'换手率',21:'量比',10:'实际流通盘'}, inplace=False)
            longhu_ss_data = longhu_ss_data[['股票代码', '股票名称','涨幅', '涨速', '净流入占比','类型', '概念',  '价格', '区间涨幅','连扳空间','龙头标记','主力买','区间净额','300w大单净额','换手率','量比','实际流通盘']]            
            
            #longhu_ss_data = longhu_ss_data[~longhu_ss_data['龙头标记'].str.contains('破板')]
            longhu_ss_data = longhu_ss_data[~longhu_ss_data['股票名称'].str.contains('ST')]
            #longhu_ss_data = longhu_ss_data[longhu_ss_data['龙头标记'].str.contains('龙一|龙二|龙三|龙四|龙五')]            
            
            #--只取板块强度前3的 龙 123   用涨速或者涨幅排序
            longhu_ss_data=longhu_ss_data.sort_values(by='主力买',ascending=False)#.head(10)
            longhu_ss_data=longhu_ss_data#.head(20)
            
            bankuai_stock_list=longhu_ss_data['股票代码'].tolist()
            #---这里循环把 强势板块中的 强势股票的  大单爬取出来 并且合并
            data_2 = pd.DataFrame()
            print(bankuai_stock_list)
            """
            for j in bankuai_stock_list:
                StockID=j
                if StockID in checked_stocks:continue
                try:
                    df_summary = get_dde_hq(StockID,src='kpl')
                    huatu_data_dict[StockID] = df_summary
                    data_2 = pd.concat([data_2, df_summary], ignore_index=True)
                    checked_stocks.append(StockID)
                except:
                    continue
            """ 
            #---合并大单的数据和当天  龙头股票的数据    
            # 使用apply方法调用自定义函数处理股票代码一列的字符串
            longhu_ss_data['instrument']=longhu_ss_data['股票代码']
            if  len(data_2)<=0:
                continue        
            #longhu_ss_data=pd.merge(longhu_ss_data ,data_2,on=['股票代码'])
            longhu_ss_data['instrument'] = longhu_ss_data['股票代码'].apply(add_suffix)
            intersected_df = longhu_ss_data
            
            today_stock_list = pd.concat([today_stock_list, intersected_df], ignore_index=True)   
            today_stock_list = today_stock_list.drop_duplicates(subset=['股票代码']) 
            #time.sleep(0.3)  # 停顿2秒钟 
        dde_results = Parallel(n_jobs=-1, backend='threading')(delayed(get_dde_hq)(stock) for stock in list(set(stock_list_yidong)&set(today_stock_list['股票代码'])))
        print(datetime.datetime.now())
        data_2 = pd.concat(dde_results)
        today_stock_list=pd.merge(today_stock_list ,data_2,on=['股票代码'])
    else:
        print('数据有错误3')
        continue

#---  特征1:'净流入占比'   特征2:'净流笔数' 特征3:'净流入金额'  #  '涨速', '净流笔数',
#today_stock_list['净流入占比']=today_stock_list['净流入占比'].rank(pct=True)*100
#today_stock_list['净流金额']=today_stock_list['净流金额'].rank(pct=True)*100
#today_stock_list['净流笔数']=today_stock_list['净流笔数'].rank(pct=True)*100
today_stock_list=today_stock_list[['股票代码','股票名称','涨幅','净流入占比','主力买','区间净额','累计300w净额','累计1000w净额','换手率','量比','区间涨幅','连扳空间','龙头标记','概念','实际流通盘']]
#获取昨日收盘价
from bigdatasource.api import DataSource
day_close = DataSource("bar1d_CN_STOCK_A").read(start_date=today_date[0], end_date=today_date[0])
day_close['close'] /= day_close['adjust_factor']
day_close['instrument'] = day_close['instrument'].apply(lambda x:x[:-4])
day_close = day_close[['instrument','close']]
day_close.columns = ['股票代码','昨日收盘价']
data = today_stock_list.merge(day_close,on='股票代码')


#---这里人气榜  实时  对 所选板块强度股票跟  人气榜取交集
longhu_ss_api = 'https://apphq.longhuvip.com/w1/api/index.php?Order=1&a=GetHotPHB&st=200&apiv=w29&Type=1&c=StockBidYiDong&PhoneOSNew=1&DeviceID=20ad85ca-becb-3bed-b3d4-30032a0f5923'#.format(i)
            #url = longhu_ss_api.format(s_time, e_time, date)
longhu_ss_data = json.loads(requests.get(longhu_ss_api).text)
longhu_ss_data = pd.DataFrame(np.array(longhu_ss_data['List'])[:,:7])#20   20是区间涨幅  23连扳数 24 龙头标记  #11是主力买 12 是主力卖  13 是区间净额 26是换手 21 是量比  50 300w大单净额
longhu_ss_data = longhu_ss_data.rename(columns={0: '股票代码', 1: '股票名称', 2: '涨幅', 3: '排名变化', 4: '人气排名', 5: '人气激增', 6: '持续上榜',}, inplace=False)        
#longhu_ss_data 
data['实际流通盘']=data['实际流通盘'].astype(float)
data= data.merge(longhu_ss_data ,on=['股票代码','股票名称','涨幅'])
data['实际流通盘'] =(data['实际流通盘']  / 100000000).round(1).astype(str) + '亿'

#---这里创造一个列表用来合并数据
empty_df = pd.DataFrame(columns=['date', '股票代码', '股票名称', '涨停次数', '溢价5%次数', '次日红盘率', '首版封板率', '首日破板率', '连扳率'])
zhangting_jiyin = data['股票代码'].tolist()

for k in zhangting_jiyin:
    longhu_ss_api = 'https://apphq.longhuvip.com/w1/api/index.php?DeviceID=20ad85ca-becb-3bed-b3d4-30032a0f5923&PhoneOSNew=1&State=1&StockID={}&Token=d89167ce77e16e62fd5fceaed32a5f72&Order&UserID=1973778&VerSion=5.7.0.12&a=GetStockPanKou_Narrow&apiv=w31&c=StockL2Data'.format(k)
    longhu_ss_data = json.loads(requests.get(longhu_ss_api).text)
    longhu_ss_data = pd.DataFrame([longhu_ss_data])
    longhu_ss_data = longhu_ss_data[['day', 'code', 'name', 'YJJY']]
    
    yjjy_columns = ['YJJY_{}'.format(i+1) for i in range(len(longhu_ss_data['YJJY'].values[0]))]
    yjjy_df = longhu_ss_data['YJJY'].apply(pd.Series)
    yjjy_df.columns = yjjy_columns
    
    longhu_ss_data = pd.concat([longhu_ss_data, yjjy_df], axis=1)
    longhu_ss_data = longhu_ss_data.drop('YJJY', axis=1)
    longhu_ss_data = longhu_ss_data.rename(columns={'code': '股票代码', 'name': '股票名称', 'day': 'date', 'YJJY_1': '涨停次数', 'YJJY_2': '溢价5%次数', 'YJJY_3': '次日红盘率', 'YJJY_4': '首版封板率', 'YJJY_5': '首日破板率', 'YJJY_6': '连扳率'}, inplace=False)
    
    empty_df = empty_df.append(longhu_ss_data)

data= data.merge(empty_df ,on=['股票代码','股票名称'])


#---这里找到交集股票池子
stock_list_yidong=data['股票代码'].tolist()
duokongbi_list = pd.DataFrame( )

for i in stock_list_yidong:
    try:
        url = "https://apphq.longhuvip.com/w1/api/index.php?PhoneOSNew=2&StockID={}&Time=&VerSion=5.7.0.12&a=GetStockBsvolumeIncremental&apiv=w31&c=StockL2Data".format(i)
        rst=pd.DataFrame(json.loads(requests.get(url).text)['bsentrust']).head(5)
        rst.columns =['时间','总买量','总卖量','总买额','总卖额']
        rst['股票代码']=i
        rst['总买额']=rst['总买额'].astype(float)
        rst['总卖额']=rst['总卖额'].astype(float)
        rst['委买差']=(rst['总买额']-rst['总卖额'])/rst['总卖额']
        rst['多空比']=rst['委买差'].cumsum()
        #合并大单数据
        df_dadan=get_dde_hq(i,src='ths')
        now_mintune = int(rst['时间'].iloc[-1].replace(':',''))
        df_dadan_tail = df_dadan[df_dadan['时间']<=now_mintune].tail()
        rst = pd.merge(rst,df_dadan_tail,on='股票代码',how='left')
        duokongbi_list = pd.concat([duokongbi_list, rst.tail(1)], ignore_index=True)
    except:
        pass

data= duokongbi_list.merge(data ,on=['股票代码',])






data=data[(data['多空比']>0)&(data['累计300w净额']>=0)&(data['主力净流入']>0)&(data['累计1000w净额']>=0)]
data=data[['股票代码','股票名称','涨幅','人气排名','溢价5%次数','净流入占比','累计300w净额','累计1000w净额','实际流通盘','排名变化','人气激增','持续上榜','涨停次数','次日红盘率','连扳率','连扳空间','龙头标记','概念','区间净额','换手率','区间涨幅','主力买','首日破板率','昨日收盘价','date','时间']]
#record_stock_list =pd.concat([record_stock_list , data], axis=0)
#--原来的表  ---跟现在的合并  显示 时间  这样如果有 一开始早盘显示 午盘消失,后面不见的 也可以显示在表中。
data
print(datetime.datetime.now())
In [ ]:
data
In [ ]:
record_stock_list=pd.DataFrame(columns=['股票代码','股票名称','涨幅','累计大单笔数','人气排名','溢价5%次数','净流入占比','实际流通盘','排名变化','人气激增','持续上榜','涨停次数','次日红盘率','连扳率','连扳空间','龙头标记','概念','区间净额','300w大单净额','换手率','量比','净流金额','区间涨幅','主力买','首日破板率','昨日收盘价','date'])
record_stock_list.to_csv("record_stock_list.csv")
record_stock_list
In [ ]:
list_start=pd.read_csv("record_stock_list.csv")


record_stock_list =pd.concat([data , list_start], axis=0)
list_start = list_start.drop_duplicates()
#record_stock_list

record_stock_list.to_csv("record_stock_list.csv",mode="a",header=False)

list_start.sort_values(by='溢价5%次数',ascending=False)#.head(10)
In [ ]:
#record_stock_list = pd.DataFrame(columns=['股票代码','股票名称','涨幅','累计大单笔数','人气排名','溢价5%次数','净流入占比','实际流通盘','排名变化','人气激增','持续上榜','涨停次数','次日红盘率','连扳率','连扳空间','龙头标记','概念','区间净额','300w大单净额','换手率','量比','净流金额','区间涨幅','主力买','首日破板率','昨日收盘价','date'])
record_stock_list.to_csv("record_stock_list.csv",)
##record_stock_list.to_csv("record_stock_list.csv",mode="a",header=False)
#kks=pd.read_csv("record_stock_list.csv")
#kks
In [ ]:
def show_dde_hq(stock_id):
    #下面是画图
    dde_data = huatu_data_dict[stock_id]
    dde_data['昨日收盘价'] = data[data['股票代码']==stock_id]['昨日收盘价'].values[0]
    dde_data['涨跌幅'] =(dde_data['价格']/dde_data['昨日收盘价']-1)*100
    time_series1 = pd.date_range(start='2022-01-01 09:30', end='2022-01-01 11:30', freq='1min')
    time_series2 = pd.date_range(start='2022-01-01 13:00', end='2022-01-01 15:00', freq='1min')
    k_x_data = ['09:25']+[m.strftime('%H:%M') for m in list(time_series1)+list(time_series2)]
    x_data = list(dde_data['分钟'])
    y_data = list(np.round(dde_data['涨跌幅'],2))
    z_data = list(dde_data['净流金额'])
    my_colors = ['red' if i>0 else 'green' for i in z_data]

    markpoints = [
        opts.MarkPointItem(coord=[x_data[i], y_data[i]],value=z_data[i],itemstyle_opts=opts.ItemStyleOpts(color=my_colors[i]))
        for i in range(len(y_data))
    ]
    line = Line(init_opts=opts.InitOpts(theme='light',
                                        width='800px',
                                        height='400px'))
    line.add_xaxis(k_x_data)
    line.add_yaxis('', y_data,markpoint_opts=opts.MarkPointOpts(data=markpoints,symbol_size=25,label_opts = opts.LabelOpts(is_show=False)),
    label_opts = opts.LabelOpts(is_show=False))
    return line
#--循环打印代码和图像
page = Page(layout=Page.SimplePageLayout)
for i in  range(len(data)):
    code = data['股票代码'].values[i]
    name = data['股票名称'].values[i]
    line = show_dde_hq(data['股票代码'].values[i])
    line.set_global_opts(title_opts=opts.TitleOpts(title=name+code))
    page.add(line)
    line.render_notebook()
page.render_notebook()
In [ ]:
def show_dde_hq(stock_id):
    #下面是画图
    dde_data = get_dde_hq(stock_id)
    dde_data['昨日收盘价'] = data[data['股票代码']==stock_id]['昨日收盘价'].values[0]
    dde_data['涨跌幅'] =(dde_data['价格']/dde_data['昨日收盘价']-1)*100
    time_series1 = pd.date_range(start='2022-01-01 09:30', end='2022-01-01 11:30', freq='1min')
    time_series2 = pd.date_range(start='2022-01-01 13:00', end='2022-01-01 15:00', freq='1min')
    k_x_data = ['09:25']+[m.strftime('%H:%M') for m in list(time_series1)+list(time_series2)]
    x_data = list(dde_data['分钟'])
    y_data = list(np.round(dde_data['涨跌幅'],2))
    z_data = list(dde_data['净流金额'])
    my_colors = ['red' if i>0 else 'green' for i in z_data]

    markpoints = [
        opts.MarkPointItem(coord=[x_data[i], y_data[i]],value=z_data[i],itemstyle_opts=opts.ItemStyleOpts(color=my_colors[i]))
        for i in range(len(y_data))
    ]
    line = Line(init_opts=opts.InitOpts(theme='light',
                                        width='1000px',
                                        height='600px'))
    line.add_xaxis(k_x_data)
    line.add_yaxis('', y_data,markpoint_opts=opts.MarkPointOpts(data=markpoints,symbol_size=25,label_opts = opts.LabelOpts(is_show=False)),
    label_opts = opts.LabelOpts(is_show=False))
    return line
#--循环打印代码和图像
#for i in  range(0,5,1):
print(data['股票代码'].values[2])
print(data['股票名称'].values[2])
line = show_dde_hq(data['股票代码'].values[2])
line.render_notebook()
    #show_dde_hq
In [ ]:
import requests
import pandas as pd
import time

import json
import numpy as np
 
# 发送HTTP请求并获取网页内容

date = '2023-09-11'
s_time = '0925'
e_time = '0930'
pd.set_option('display.max_columns', None)
longhu_ss_api = 'https://apphq.longhuvip.com/w1/api/index.php?Order=1&a=RealRankingInfo_W8&st=60&c=NewStockRanking&PhoneOSNew=1&RStart=0925&DeviceID=20ad85ca-becb-3bed-b3d4-30032a0f5923&VerSion=5.8.0.2&index=0&REnd=0930&apiv=w29&Type=1&FilterMotherboard=0&Filter=0&Ratio=6&FilterTIB=0&FilterGem=0&Date={}&index=0'.format(date)
url = longhu_ss_api.format(s_time, e_time, date)
longhu_ss_data = json.loads(requests.get(longhu_ss_api).text)


longhu_ss_data = pd.DataFrame(np.array(longhu_ss_data['list'])[:,:20])
longhu_ss_cols = ['股票代码','股票名称','空行','排序','板块名称','价格','涨幅','未知1','未知2','涨速','未知3','未知4','未知5','未知6','未知7','未知8','未知9','未知10','未知10','未知11',]
longhu_ss_data.columns = longhu_ss_cols
longhu_ss_data
In [ ]:
longhu_ss_api ='https://apphis.longhuvip.com/w1/api/index.php?Order=1&a=HisRankingInfo_W8&st=60&c=HisStockRanking&PhoneOSNew=1&RStart=0925&DeviceID=20ad85ca-becb-3bed-b3d4-30032a0f5923&VerSion=5.8.0.2&index=0&REnd=0930&apiv=w29&Type=1&FilterMotherboard=0&Filter=0&Ratio=6&FilterTIB=0&FilterGem=0&Date=2023-09-07'
longhu_ss_data = json.loads(requests.get(longhu_ss_api).text)
longhu_ss_cols = ['股票代码','股票名称','未知1','未知2','板块','价格','当前涨幅','区间成交金额','实际换手率','未知3','实际流通金额','主力买金额',
 '主力卖金额','区间净额','买成占比','卖成占比','净成占比','买流占比','卖流占比','净流占比','区间涨幅','量比','未知4','涨停天数']
longhu_ss_data = pd.DataFrame(np.array(longhu_ss_data['list'])[:,:24])
longhu_ss_data.columns = longhu_ss_cols
longhu_ss_data=longhu_ss_data.sort_values(by='净成占比',ascending=False)#.head(10)
longhu_ss_data
#today_stock_list=today_stock_list.sort_values(by='累计大单笔数',ascending=False)#.head(10)
In [ ]:
longhu_ss_data['净成占比']=longhu_ss_data['净成占比'].astype(float)
longhu_ss_data['区间净额']=longhu_ss_data['区间净额'].astype(float)
longhu_ss_data['买流占比']=longhu_ss_data['买流占比'].astype(float)
longhu_ss_data=longhu_ss_data.sort_values(by='净成占比',ascending=False)#.head(10)
longhu_ss_data

    {"description":"实验创建于2017/8/26","graph":{"edges":[{"to_node_id":"287d2cb0-f53c-4101-bdf8-104b137c8601-15:instruments","from_node_id":"287d2cb0-f53c-4101-bdf8-104b137c8601-8:data"},{"to_node_id":"-215:instruments","from_node_id":"287d2cb0-f53c-4101-bdf8-104b137c8601-8:data"},{"to_node_id":"287d2cb0-f53c-4101-bdf8-104b137c8601-53:data1","from_node_id":"287d2cb0-f53c-4101-bdf8-104b137c8601-15:data"},{"to_node_id":"-215:features","from_node_id":"287d2cb0-f53c-4101-bdf8-104b137c8601-24:data"},{"to_node_id":"-222:features","from_node_id":"287d2cb0-f53c-4101-bdf8-104b137c8601-24:data"},{"to_node_id":"-231:features","from_node_id":"287d2cb0-f53c-4101-bdf8-104b137c8601-24:data"},{"to_node_id":"-238:features","from_node_id":"287d2cb0-f53c-4101-bdf8-104b137c8601-24:data"},{"to_node_id":"287d2cb0-f53c-4101-bdf8-104b137c8601-60:model","from_node_id":"287d2cb0-f53c-4101-bdf8-104b137c8601-43:model"},{"to_node_id":"287d2cb0-f53c-4101-bdf8-104b137c8601-84:input_data","from_node_id":"287d2cb0-f53c-4101-bdf8-104b137c8601-53:data"},{"to_node_id":"-250:options_data","from_node_id":"287d2cb0-f53c-4101-bdf8-104b137c8601-60:predictions"},{"to_node_id":"-231:instruments","from_node_id":"287d2cb0-f53c-4101-bdf8-104b137c8601-62:data"},{"to_node_id":"-250:instruments","from_node_id":"287d2cb0-f53c-4101-bdf8-104b137c8601-62:data"},{"to_node_id":"287d2cb0-f53c-4101-bdf8-104b137c8601-43:training_ds","from_node_id":"287d2cb0-f53c-4101-bdf8-104b137c8601-84:data"},{"to_node_id":"-1563:input_data","from_node_id":"-86:data"},{"to_node_id":"-222:input_data","from_node_id":"-215:data"},{"to_node_id":"287d2cb0-f53c-4101-bdf8-104b137c8601-53:data2","from_node_id":"-222:data"},{"to_node_id":"-238:input_data","from_node_id":"-231:data"},{"to_node_id":"-86:input_data","from_node_id":"-238:data"},{"to_node_id":"287d2cb0-f53c-4101-bdf8-104b137c8601-43:features","from_node_id":"-1558:data"},{"to_node_id":"287d2cb0-f53c-4101-bdf8-104b137c8601-60:data","from_node_id":"-1563:data"}],"nodes":[{"node_id":"287d2cb0-f53c-4101-bdf8-104b137c8601-8","module_id":"BigQuantSpace.instruments.instruments-v2","parameters":[{"name":"start_date","value":"2018-01-01","type":"Literal","bound_global_parameter":null},{"name":"end_date","value":"2020-12-31","type":"Literal","bound_global_parameter":null},{"name":"market","value":"CN_STOCK_A","type":"Literal","bound_global_parameter":null},{"name":"instrument_list","value":"","type":"Literal","bound_global_parameter":null},{"name":"max_count","value":"0","type":"Literal","bound_global_parameter":null}],"input_ports":[{"name":"rolling_conf","node_id":"287d2cb0-f53c-4101-bdf8-104b137c8601-8"}],"output_ports":[{"name":"data","node_id":"287d2cb0-f53c-4101-bdf8-104b137c8601-8"}],"cacheable":true,"seq_num":1,"comment":"","comment_collapsed":true},{"node_id":"287d2cb0-f53c-4101-bdf8-104b137c8601-15","module_id":"BigQuantSpace.advanced_auto_labeler.advanced_auto_labeler-v2","parameters":[{"name":"label_expr","value":"# #号开始的表示注释\n# 0. 每行一个,顺序执行,从第二个开始,可以使用label字段\n# 1. 可用数据字段见 https://bigquant.com/docs/develop/datasource/deprecated/history_data.html\n# 添加benchmark_前缀,可使用对应的benchmark数据\n# 2. 可用操作符和函数见 `表达式引擎 <https://bigquant.com/docs/develop/bigexpr/usage.html>`_\n\n# 计算收益:5日收盘价(作为卖出价格)除以明日开盘价(作为买入价格)\nshift(close, -5) / shift(open, -1)\n\n# 极值处理:用1%和99%分位的值做clip\nclip(label, all_quantile(label, 0.01), all_quantile(label, 0.99))\n\n# 将分数映射到分类,这里使用20个分类\nall_wbins(label, 20)\n\n# 过滤掉一字涨停的情况 (设置label为NaN,在后续处理和训练中会忽略NaN的label)\nwhere(shift(high, -1) == shift(low, -1), NaN, label)\n","type":"Literal","bound_global_parameter":null},{"name":"start_date","value":"","type":"Literal","bound_global_parameter":null},{"name":"end_date","value":"","type":"Literal","bound_global_parameter":null},{"name":"benchmark","value":"000300.HIX","type":"Literal","bound_global_parameter":null},{"name":"drop_na_label","value":"True","type":"Literal","bound_global_parameter":null},{"name":"cast_label_int","value":"True","type":"Literal","bound_global_parameter":null},{"name":"user_functions","value":"","type":"Literal","bound_global_parameter":null}],"input_ports":[{"name":"instruments","node_id":"287d2cb0-f53c-4101-bdf8-104b137c8601-15"}],"output_ports":[{"name":"data","node_id":"287d2cb0-f53c-4101-bdf8-104b137c8601-15"}],"cacheable":true,"seq_num":2,"comment":"","comment_collapsed":true},{"node_id":"287d2cb0-f53c-4101-bdf8-104b137c8601-24","module_id":"BigQuantSpace.input_features.input_features-v1","parameters":[{"name":"features","value":"# #号开始的表示注释\n# 多个特征,每行一个,可以包含基础特征和衍生特征\nreturn_5\nreturn_10\nreturn_20\navg_amount_0/avg_amount_5\navg_amount_5/avg_amount_20\nrank_avg_amount_0/rank_avg_amount_5\nrank_avg_amount_5/rank_avg_amount_10\nrank_return_0\nrank_return_5\nrank_return_10\nrank_return_0/rank_return_5\nrank_return_5/rank_return_10\npe_ttm_0\n\nlist_days_0\n","type":"Literal","bound_global_parameter":null}],"input_ports":[{"name":"features_ds","node_id":"287d2cb0-f53c-4101-bdf8-104b137c8601-24"}],"output_ports":[{"name":"data","node_id":"287d2cb0-f53c-4101-bdf8-104b137c8601-24"}],"cacheable":true,"seq_num":3,"comment":"","comment_collapsed":true},{"node_id":"287d2cb0-f53c-4101-bdf8-104b137c8601-43","module_id":"BigQuantSpace.stock_ranker_train.stock_ranker_train-v6","parameters":[{"name":"learning_algorithm","value":"排序","type":"Literal","bound_global_parameter":null},{"name":"number_of_leaves","value":30,"type":"Literal","bound_global_parameter":null},{"name":"minimum_docs_per_leaf","value":1000,"type":"Literal","bound_global_parameter":null},{"name":"number_of_trees","value":20,"type":"Literal","bound_global_parameter":null},{"name":"learning_rate","value":0.1,"type":"Literal","bound_global_parameter":null},{"name":"max_bins","value":1023,"type":"Literal","bound_global_parameter":null},{"name":"feature_fraction","value":1,"type":"Literal","bound_global_parameter":null},{"name":"data_row_fraction","value":1,"type":"Literal","bound_global_parameter":null},{"name":"plot_charts","value":"True","type":"Literal","bound_global_parameter":null},{"name":"ndcg_discount_base","value":1,"type":"Literal","bound_global_parameter":null},{"name":"m_lazy_run","value":"False","type":"Literal","bound_global_parameter":null}],"input_ports":[{"name":"training_ds","node_id":"287d2cb0-f53c-4101-bdf8-104b137c8601-43"},{"name":"features","node_id":"287d2cb0-f53c-4101-bdf8-104b137c8601-43"},{"name":"test_ds","node_id":"287d2cb0-f53c-4101-bdf8-104b137c8601-43"},{"name":"base_model","node_id":"287d2cb0-f53c-4101-bdf8-104b137c8601-43"}],"output_ports":[{"name":"model","node_id":"287d2cb0-f53c-4101-bdf8-104b137c8601-43"},{"name":"feature_gains","node_id":"287d2cb0-f53c-4101-bdf8-104b137c8601-43"},{"name":"m_lazy_run","node_id":"287d2cb0-f53c-4101-bdf8-104b137c8601-43"}],"cacheable":true,"seq_num":6,"comment":"","comment_collapsed":true},{"node_id":"287d2cb0-f53c-4101-bdf8-104b137c8601-53","module_id":"BigQuantSpace.join.join-v3","parameters":[{"name":"on","value":"date,instrument","type":"Literal","bound_global_parameter":null},{"name":"how","value":"inner","type":"Literal","bound_global_parameter":null},{"name":"sort","value":"False","type":"Literal","bound_global_parameter":null}],"input_ports":[{"name":"data1","node_id":"287d2cb0-f53c-4101-bdf8-104b137c8601-53"},{"name":"data2","node_id":"287d2cb0-f53c-4101-bdf8-104b137c8601-53"}],"output_ports":[{"name":"data","node_id":"287d2cb0-f53c-4101-bdf8-104b137c8601-53"}],"cacheable":true,"seq_num":7,"comment":"","comment_collapsed":true},{"node_id":"287d2cb0-f53c-4101-bdf8-104b137c8601-60","module_id":"BigQuantSpace.stock_ranker_predict.stock_ranker_predict-v5","parameters":[{"name":"m_lazy_run","value":"False","type":"Literal","bound_global_parameter":null}],"input_ports":[{"name":"model","node_id":"287d2cb0-f53c-4101-bdf8-104b137c8601-60"},{"name":"data","node_id":"287d2cb0-f53c-4101-bdf8-104b137c8601-60"}],"output_ports":[{"name":"predictions","node_id":"287d2cb0-f53c-4101-bdf8-104b137c8601-60"},{"name":"m_lazy_run","node_id":"287d2cb0-f53c-4101-bdf8-104b137c8601-60"}],"cacheable":true,"seq_num":8,"comment":"","comment_collapsed":true},{"node_id":"287d2cb0-f53c-4101-bdf8-104b137c8601-62","module_id":"BigQuantSpace.instruments.instruments-v2","parameters":[{"name":"start_date","value":"2021-01-01","type":"Literal","bound_global_parameter":"交易日期"},{"name":"end_date","value":"2021-12-31","type":"Literal","bound_global_parameter":"交易日期"},{"name":"market","value":"CN_STOCK_A","type":"Literal","bound_global_parameter":null},{"name":"instrument_list","value":"","type":"Literal","bound_global_parameter":null},{"name":"max_count","value":"0","type":"Literal","bound_global_parameter":null}],"input_ports":[{"name":"rolling_conf","node_id":"287d2cb0-f53c-4101-bdf8-104b137c8601-62"}],"output_ports":[{"name":"data","node_id":"287d2cb0-f53c-4101-bdf8-104b137c8601-62"}],"cacheable":true,"seq_num":9,"comment":"预测数据,用于回测和模拟","comment_collapsed":false},{"node_id":"287d2cb0-f53c-4101-bdf8-104b137c8601-84","module_id":"BigQuantSpace.dropnan.dropnan-v1","parameters":[],"input_ports":[{"name":"input_data","node_id":"287d2cb0-f53c-4101-bdf8-104b137c8601-84"}],"output_ports":[{"name":"data","node_id":"287d2cb0-f53c-4101-bdf8-104b137c8601-84"}],"cacheable":true,"seq_num":13,"comment":"","comment_collapsed":true},{"node_id":"-86","module_id":"BigQuantSpace.dropnan.dropnan-v1","parameters":[],"input_ports":[{"name":"input_data","node_id":"-86"}],"output_ports":[{"name":"data","node_id":"-86"}],"cacheable":true,"seq_num":14,"comment":"","comment_collapsed":true},{"node_id":"-215","module_id":"BigQuantSpace.general_feature_extractor.general_feature_extractor-v7","parameters":[{"name":"start_date","value":"","type":"Literal","bound_global_parameter":null},{"name":"end_date","value":"","type":"Literal","bound_global_parameter":null},{"name":"before_start_days","value":90,"type":"Literal","bound_global_parameter":null}],"input_ports":[{"name":"instruments","node_id":"-215"},{"name":"features","node_id":"-215"}],"output_ports":[{"name":"data","node_id":"-215"}],"cacheable":true,"seq_num":15,"comment":"","comment_collapsed":true},{"node_id":"-222","module_id":"BigQuantSpace.derived_feature_extractor.derived_feature_extractor-v3","parameters":[{"name":"date_col","value":"date","type":"Literal","bound_global_parameter":null},{"name":"instrument_col","value":"instrument","type":"Literal","bound_global_parameter":null},{"name":"drop_na","value":"False","type":"Literal","bound_global_parameter":null},{"name":"remove_extra_columns","value":"False","type":"Literal","bound_global_parameter":null},{"name":"user_functions","value":"","type":"Literal","bound_global_parameter":null}],"input_ports":[{"name":"input_data","node_id":"-222"},{"name":"features","node_id":"-222"}],"output_ports":[{"name":"data","node_id":"-222"}],"cacheable":true,"seq_num":16,"comment":"","comment_collapsed":true},{"node_id":"-231","module_id":"BigQuantSpace.general_feature_extractor.general_feature_extractor-v7","parameters":[{"name":"start_date","value":"","type":"Literal","bound_global_parameter":null},{"name":"end_date","value":"","type":"Literal","bound_global_parameter":null},{"name":"before_start_days","value":90,"type":"Literal","bound_global_parameter":null}],"input_ports":[{"name":"instruments","node_id":"-231"},{"name":"features","node_id":"-231"}],"output_ports":[{"name":"data","node_id":"-231"}],"cacheable":true,"seq_num":17,"comment":"","comment_collapsed":true},{"node_id":"-238","module_id":"BigQuantSpace.derived_feature_extractor.derived_feature_extractor-v3","parameters":[{"name":"date_col","value":"date","type":"Literal","bound_global_parameter":null},{"name":"instrument_col","value":"instrument","type":"Literal","bound_global_parameter":null},{"name":"drop_na","value":"False","type":"Literal","bound_global_parameter":null},{"name":"remove_extra_columns","value":"False","type":"Literal","bound_global_parameter":null},{"name":"user_functions","value":"","type":"Literal","bound_global_parameter":null}],"input_ports":[{"name":"input_data","node_id":"-238"},{"name":"features","node_id":"-238"}],"output_ports":[{"name":"data","node_id":"-238"}],"cacheable":true,"seq_num":18,"comment":"","comment_collapsed":true},{"node_id":"-250","module_id":"BigQuantSpace.trade.trade-v4","parameters":[{"name":"start_date","value":"","type":"Literal","bound_global_parameter":null},{"name":"end_date","value":"","type":"Literal","bound_global_parameter":null},{"name":"initialize","value":"# 回测引擎:初始化函数,只执行一次\ndef bigquant_run(context):\n import math\n from zipline.finance.commission import PerOrder\n\n # 加载预测数据\n context.ranker_prediction = context.options['data'].read_df()\n\n # 系统已经设置了默认的交易手续费和滑点,要修改手续费可使用如下函数\n context.set_commission(PerOrder(buy_cost=0.0003, sell_cost=0.0013, min_cost=5))\n # 预测数据,通过options传入进来,使用 read_df 函数,加载到内存 (DataFrame)\n # 设置买入的股票数量,这里买入预测股票列表排名靠前的5只\n stock_count = 5\n # 每只的股票的权重,如下的权重分配会使得靠前的股票分配多一点的资金,[0.339160, 0.213986, 0.169580, ..]\n context.stock_weights = T.norm([1 / math.log(i + 2) for i in range(0, stock_count)])\n # 设置每只股票占用的最大资金比例\n context.max_cash_per_instrument = 0.2\n context.options['hold_days'] = 5\n","type":"Literal","bound_global_parameter":null},{"name":"handle_data","value":"# 回测引擎:每日数据处理函数,每天执行一次\ndef bigquant_run(context, data):\n # 按日期过滤得到今日的预测数据\n ranker_prediction = context.ranker_prediction[\n context.ranker_prediction.date == data.current_dt.strftime('%Y-%m-%d')]\n\n # 1. 资金分配\n # 平均持仓时间是hold_days,每日都将买入股票,每日预期使用 1/hold_days 的资金\n # 实际操作中,会存在一定的买入误差,所以在前hold_days天,等量使用资金;之后,尽量使用剩余资金(这里设置最多用等量的1.5倍)\n is_staging = context.trading_day_index < context.options['hold_days'] # 是否在建仓期间(前 hold_days 天)\n cash_avg = context.portfolio.portfolio_value / context.options['hold_days']\n cash_for_buy = min(context.portfolio.cash, (1 if is_staging else 1.5) * cash_avg)\n cash_for_sell = cash_avg - (context.portfolio.cash - cash_for_buy)\n positions = {e.symbol: p.amount * p.last_sale_price\n for e, p in context.portfolio.positions.items()}\n\n # 2. 生成卖出订单:hold_days天之后才开始卖出;对持仓的股票,按机器学习算法预测的排序末位淘汰\n if not is_staging and cash_for_sell > 0:\n equities = {e.symbol: e for e, p in context.portfolio.positions.items()}\n instruments = list(reversed(list(ranker_prediction.instrument[ranker_prediction.instrument.apply(\n lambda x: x in equities)])))\n\n for instrument in instruments:\n context.order_target(context.symbol(instrument), 0)\n cash_for_sell -= positions[instrument]\n if cash_for_sell <= 0:\n break\n\n # 3. 生成买入订单:按机器学习算法预测的排序,买入前面的stock_count只股票\n buy_cash_weights = context.stock_weights\n buy_instruments = list(ranker_prediction.instrument[:len(buy_cash_weights)])\n max_cash_per_instrument = context.portfolio.portfolio_value * context.max_cash_per_instrument\n for i, instrument in enumerate(buy_instruments):\n cash = cash_for_buy * buy_cash_weights[i]\n if cash > max_cash_per_instrument - positions.get(instrument, 0):\n # 确保股票持仓量不会超过每次股票最大的占用资金量\n cash = max_cash_per_instrument - positions.get(instrument, 0)\n if cash > 0:\n context.order_value(context.symbol(instrument), cash)\n","type":"Literal","bound_global_parameter":null},{"name":"prepare","value":"# 回测引擎:准备数据,只执行一次\ndef bigquant_run(context):\n pass\n","type":"Literal","bound_global_parameter":null},{"name":"before_trading_start","value":"","type":"Literal","bound_global_parameter":null},{"name":"volume_limit","value":0.025,"type":"Literal","bound_global_parameter":null},{"name":"order_price_field_buy","value":"open","type":"Literal","bound_global_parameter":null},{"name":"order_price_field_sell","value":"close","type":"Literal","bound_global_parameter":null},{"name":"capital_base","value":1000000,"type":"Literal","bound_global_parameter":null},{"name":"auto_cancel_non_tradable_orders","value":"True","type":"Literal","bound_global_parameter":null},{"name":"data_frequency","value":"daily","type":"Literal","bound_global_parameter":null},{"name":"price_type","value":"真实价格","type":"Literal","bound_global_parameter":null},{"name":"product_type","value":"股票","type":"Literal","bound_global_parameter":null},{"name":"plot_charts","value":"True","type":"Literal","bound_global_parameter":null},{"name":"backtest_only","value":"False","type":"Literal","bound_global_parameter":null},{"name":"benchmark","value":"000300.HIX","type":"Literal","bound_global_parameter":null}],"input_ports":[{"name":"instruments","node_id":"-250"},{"name":"options_data","node_id":"-250"},{"name":"history_ds","node_id":"-250"},{"name":"benchmark_ds","node_id":"-250"},{"name":"trading_calendar","node_id":"-250"}],"output_ports":[{"name":"raw_perf","node_id":"-250"}],"cacheable":false,"seq_num":19,"comment":"","comment_collapsed":true},{"node_id":"-1558","module_id":"BigQuantSpace.input_features.input_features-v1","parameters":[{"name":"features","value":"# #号开始的表示注释\n# 多个特征,每行一个,可以包含基础特征和衍生特征\nreturn_5\nreturn_10\nreturn_20\navg_amount_0/avg_amount_5\navg_amount_5/avg_amount_20\nrank_avg_amount_0/rank_avg_amount_5\nrank_avg_amount_5/rank_avg_amount_10\nrank_return_0\nrank_return_5\nrank_return_10\nrank_return_0/rank_return_5\nrank_return_5/rank_return_10\npe_ttm_0\n\n","type":"Literal","bound_global_parameter":null}],"input_ports":[{"name":"features_ds","node_id":"-1558"}],"output_ports":[{"name":"data","node_id":"-1558"}],"cacheable":true,"seq_num":4,"comment":"","comment_collapsed":true},{"node_id":"-1563","module_id":"BigQuantSpace.filter.filter-v3","parameters":[{"name":"expr","value":"list_days_0>30","type":"Literal","bound_global_parameter":null},{"name":"output_left_data","value":"False","type":"Literal","bound_global_parameter":null}],"input_ports":[{"name":"input_data","node_id":"-1563"}],"output_ports":[{"name":"data","node_id":"-1563"},{"name":"left_data","node_id":"-1563"}],"cacheable":true,"seq_num":5,"comment":"","comment_collapsed":true}],"node_layout":"<node_postions><node_position Node='287d2cb0-f53c-4101-bdf8-104b137c8601-8' Position='211,64,200,200'/><node_position Node='287d2cb0-f53c-4101-bdf8-104b137c8601-15' Position='70,183,200,200'/><node_position Node='287d2cb0-f53c-4101-bdf8-104b137c8601-24' Position='756.7845458984375,-70.27043151855469,200,200'/><node_position Node='287d2cb0-f53c-4101-bdf8-104b137c8601-43' Position='638,561,200,200'/><node_position Node='287d2cb0-f53c-4101-bdf8-104b137c8601-53' Position='249,375,200,200'/><node_position Node='287d2cb0-f53c-4101-bdf8-104b137c8601-60' Position='639.2332763671875,701.1931762695312,200,200'/><node_position Node='287d2cb0-f53c-4101-bdf8-104b137c8601-62' Position='1015.8291015625,46.00000476837158,200,200'/><node_position Node='287d2cb0-f53c-4101-bdf8-104b137c8601-84' Position='376,467,200,200'/><node_position Node='-86' Position='1035.099609375,379.668701171875,200,200'/><node_position Node='-215' Position='381,188,200,200'/><node_position Node='-222' Position='385,280,200,200'/><node_position Node='-231' Position='1023.2376708984375,155.16046142578125,200,200'/><node_position Node='-238' Position='1031.453125,293.09954833984375,200,200'/><node_position Node='-250' Position='358.58099365234375,889.0281982421875,200,200'/><node_position Node='-1558' Position='700.6522827148438,220.71770477294922,200,200'/><node_position Node='-1563' Position='1035.635986328125,465.5260314941406,200,200'/></node_postions>"},"nodes_readonly":false,"studio_version":"v2"}
    In [ ]:
    # 本代码由可视化策略环境自动生成 2023年11月9日 00:16
    # 本代码单元只能在可视化模式下编辑。您也可以拷贝代码,粘贴到新建的代码单元或者策略,然后修改。
    
    
    # 回测引擎:初始化函数,只执行一次
    def m19_initialize_bigquant_run(context):
        import math
        from zipline.finance.commission import PerOrder
    
        # 加载预测数据
        context.ranker_prediction = context.options['data'].read_df()
    
        # 系统已经设置了默认的交易手续费和滑点,要修改手续费可使用如下函数
        context.set_commission(PerOrder(buy_cost=0.0003, sell_cost=0.0013, min_cost=5))
        # 预测数据,通过options传入进来,使用 read_df 函数,加载到内存 (DataFrame)
        # 设置买入的股票数量,这里买入预测股票列表排名靠前的5只
        stock_count = 5
        # 每只的股票的权重,如下的权重分配会使得靠前的股票分配多一点的资金,[0.339160, 0.213986, 0.169580, ..]
        context.stock_weights = T.norm([1 / math.log(i + 2) for i in range(0, stock_count)])
        # 设置每只股票占用的最大资金比例
        context.max_cash_per_instrument = 0.2
        context.options['hold_days'] = 5
    
    # 回测引擎:每日数据处理函数,每天执行一次
    def m19_handle_data_bigquant_run(context, data):
        # 按日期过滤得到今日的预测数据
        ranker_prediction = context.ranker_prediction[
            context.ranker_prediction.date == data.current_dt.strftime('%Y-%m-%d')]
    
        # 1. 资金分配
        # 平均持仓时间是hold_days,每日都将买入股票,每日预期使用 1/hold_days 的资金
        # 实际操作中,会存在一定的买入误差,所以在前hold_days天,等量使用资金;之后,尽量使用剩余资金(这里设置最多用等量的1.5倍)
        is_staging = context.trading_day_index < context.options['hold_days'] # 是否在建仓期间(前 hold_days 天)
        cash_avg = context.portfolio.portfolio_value / context.options['hold_days']
        cash_for_buy = min(context.portfolio.cash, (1 if is_staging else 1.5) * cash_avg)
        cash_for_sell = cash_avg - (context.portfolio.cash - cash_for_buy)
        positions = {e.symbol: p.amount * p.last_sale_price
                     for e, p in context.portfolio.positions.items()}
    
        # 2. 生成卖出订单:hold_days天之后才开始卖出;对持仓的股票,按机器学习算法预测的排序末位淘汰
        if not is_staging and cash_for_sell > 0:
            equities = {e.symbol: e for e, p in context.portfolio.positions.items()}
            instruments = list(reversed(list(ranker_prediction.instrument[ranker_prediction.instrument.apply(
                    lambda x: x in equities)])))
    
            for instrument in instruments:
                context.order_target(context.symbol(instrument), 0)
                cash_for_sell -= positions[instrument]
                if cash_for_sell <= 0:
                    break
    
        # 3. 生成买入订单:按机器学习算法预测的排序,买入前面的stock_count只股票
        buy_cash_weights = context.stock_weights
        buy_instruments = list(ranker_prediction.instrument[:len(buy_cash_weights)])
        max_cash_per_instrument = context.portfolio.portfolio_value * context.max_cash_per_instrument
        for i, instrument in enumerate(buy_instruments):
            cash = cash_for_buy * buy_cash_weights[i]
            if cash > max_cash_per_instrument - positions.get(instrument, 0):
                # 确保股票持仓量不会超过每次股票最大的占用资金量
                cash = max_cash_per_instrument - positions.get(instrument, 0)
            if cash > 0:
                context.order_value(context.symbol(instrument), cash)
    
    # 回测引擎:准备数据,只执行一次
    def m19_prepare_bigquant_run(context):
        pass
    
    
    m1 = M.instruments.v2(
        start_date='2018-01-01',
        end_date='2020-12-31',
        market='CN_STOCK_A',
        instrument_list='',
        max_count=0
    )
    
    m2 = M.advanced_auto_labeler.v2(
        instruments=m1.data,
        label_expr="""# #号开始的表示注释
    # 0. 每行一个,顺序执行,从第二个开始,可以使用label字段
    # 1. 可用数据字段见 https://bigquant.com/docs/develop/datasource/deprecated/history_data.html
    #   添加benchmark_前缀,可使用对应的benchmark数据
    # 2. 可用操作符和函数见 `表达式引擎 <https://bigquant.com/docs/develop/bigexpr/usage.html>`_
    
    # 计算收益:5日收盘价(作为卖出价格)除以明日开盘价(作为买入价格)
    shift(close, -5) / shift(open, -1)
    
    # 极值处理:用1%和99%分位的值做clip
    clip(label, all_quantile(label, 0.01), all_quantile(label, 0.99))
    
    # 将分数映射到分类,这里使用20个分类
    all_wbins(label, 20)
    
    # 过滤掉一字涨停的情况 (设置label为NaN,在后续处理和训练中会忽略NaN的label)
    where(shift(high, -1) == shift(low, -1), NaN, label)
    """,
        start_date='',
        end_date='',
        benchmark='000300.HIX',
        drop_na_label=True,
        cast_label_int=True
    )
    
    m3 = M.input_features.v1(
        features="""# #号开始的表示注释
    # 多个特征,每行一个,可以包含基础特征和衍生特征
    return_5
    return_10
    return_20
    avg_amount_0/avg_amount_5
    avg_amount_5/avg_amount_20
    rank_avg_amount_0/rank_avg_amount_5
    rank_avg_amount_5/rank_avg_amount_10
    rank_return_0
    rank_return_5
    rank_return_10
    rank_return_0/rank_return_5
    rank_return_5/rank_return_10
    pe_ttm_0
    
    list_days_0
    """
    )
    
    m15 = M.general_feature_extractor.v7(
        instruments=m1.data,
        features=m3.data,
        start_date='',
        end_date='',
        before_start_days=90
    )
    
    m16 = M.derived_feature_extractor.v3(
        input_data=m15.data,
        features=m3.data,
        date_col='date',
        instrument_col='instrument',
        drop_na=False,
        remove_extra_columns=False
    )
    
    m7 = M.join.v3(
        data1=m2.data,
        data2=m16.data,
        on='date,instrument',
        how='inner',
        sort=False
    )
    
    m13 = M.dropnan.v1(
        input_data=m7.data
    )
    
    m9 = M.instruments.v2(
        start_date=T.live_run_param('trading_date', '2021-01-01'),
        end_date=T.live_run_param('trading_date', '2021-12-31'),
        market='CN_STOCK_A',
        instrument_list='',
        max_count=0
    )
    
    m17 = M.general_feature_extractor.v7(
        instruments=m9.data,
        features=m3.data,
        start_date='',
        end_date='',
        before_start_days=90
    )
    
    m18 = M.derived_feature_extractor.v3(
        input_data=m17.data,
        features=m3.data,
        date_col='date',
        instrument_col='instrument',
        drop_na=False,
        remove_extra_columns=False
    )
    
    m14 = M.dropnan.v1(
        input_data=m18.data
    )
    
    m5 = M.filter.v3(
        input_data=m14.data,
        expr='list_days_0>30',
        output_left_data=False
    )
    
    m4 = M.input_features.v1(
        features="""# #号开始的表示注释
    # 多个特征,每行一个,可以包含基础特征和衍生特征
    return_5
    return_10
    return_20
    avg_amount_0/avg_amount_5
    avg_amount_5/avg_amount_20
    rank_avg_amount_0/rank_avg_amount_5
    rank_avg_amount_5/rank_avg_amount_10
    rank_return_0
    rank_return_5
    rank_return_10
    rank_return_0/rank_return_5
    rank_return_5/rank_return_10
    pe_ttm_0
    
    """
    )
    
    m6 = M.stock_ranker_train.v6(
        training_ds=m13.data,
        features=m4.data,
        learning_algorithm='排序',
        number_of_leaves=30,
        minimum_docs_per_leaf=1000,
        number_of_trees=20,
        learning_rate=0.1,
        max_bins=1023,
        feature_fraction=1,
        data_row_fraction=1,
        plot_charts=True,
        ndcg_discount_base=1,
        m_lazy_run=False
    )
    
    m8 = M.stock_ranker_predict.v5(
        model=m6.model,
        data=m5.data,
        m_lazy_run=False
    )
    
    m19 = M.trade.v4(
        instruments=m9.data,
        options_data=m8.predictions,
        start_date='',
        end_date='',
        initialize=m19_initialize_bigquant_run,
        handle_data=m19_handle_data_bigquant_run,
        prepare=m19_prepare_bigquant_run,
        volume_limit=0.025,
        order_price_field_buy='open',
        order_price_field_sell='close',
        capital_base=1000000,
        auto_cancel_non_tradable_orders=True,
        data_frequency='daily',
        price_type='真实价格',
        product_type='股票',
        plot_charts=True,
        backtest_only=False,
        benchmark='000300.HIX'
    )