nwo
stringlengths 10
28
| sha
stringlengths 40
40
| path
stringlengths 11
97
| identifier
stringlengths 1
64
| parameters
stringlengths 2
2.24k
| return_statement
stringlengths 0
2.17k
| docstring
stringlengths 0
5.45k
| docstring_summary
stringlengths 0
3.83k
| func_begin
int64 1
13.4k
| func_end
int64 2
13.4k
| function
stringlengths 28
56.4k
| url
stringlengths 106
209
| project
int64 1
48
| executed_lines
list | executed_lines_pc
float64 0
153
| missing_lines
list | missing_lines_pc
float64 0
100
| covered
bool 2
classes | filecoverage
float64 2.53
100
| function_lines
int64 2
1.46k
| mccabe
int64 1
253
| coverage
float64 0
100
| docstring_lines
int64 0
112
| function_nodoc
stringlengths 9
56.4k
| id
int64 0
29.8k
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
akfamily/akshare
|
087025d8d6f799b30ca114013e82c1ad22dc9294
|
akshare/futures_derivative/futures_index_price_nh.py
|
futures_index_symbol_table_nh
|
()
|
return temp_df
|
南华期货-南华指数所有品种一览表
http://www.nanhua.net/ianalysis/varietyindex/price/A.json?t=1574932974280
:return: 南华指数所有品种一览表
:rtype: pandas.DataFrame
|
南华期货-南华指数所有品种一览表
http://www.nanhua.net/ianalysis/varietyindex/price/A.json?t=1574932974280
:return: 南华指数所有品种一览表
:rtype: pandas.DataFrame
| 16 | 28 |
def futures_index_symbol_table_nh() -> pd.DataFrame:
"""
南华期货-南华指数所有品种一览表
http://www.nanhua.net/ianalysis/varietyindex/price/A.json?t=1574932974280
:return: 南华指数所有品种一览表
:rtype: pandas.DataFrame
"""
url = "http://www.nanhua.net/ianalysis/plate-variety.json"
r = requests.get(url)
data_json = r.json()
temp_df = pd.DataFrame(data_json)
temp_df['firstday'] = pd.to_datetime(temp_df['firstday']).dt.date
return temp_df
|
https://github.com/akfamily/akshare/blob/087025d8d6f799b30ca114013e82c1ad22dc9294/project25/akshare/futures_derivative/futures_index_price_nh.py#L16-L28
| 25 |
[
0,
1,
2,
3,
4,
5,
6
] | 53.846154 |
[
7,
8,
9,
10,
11,
12
] | 46.153846 | false | 25 | 13 | 1 | 53.846154 | 4 |
def futures_index_symbol_table_nh() -> pd.DataFrame:
url = "http://www.nanhua.net/ianalysis/plate-variety.json"
r = requests.get(url)
data_json = r.json()
temp_df = pd.DataFrame(data_json)
temp_df['firstday'] = pd.to_datetime(temp_df['firstday']).dt.date
return temp_df
| 18,235 |
akfamily/akshare
|
087025d8d6f799b30ca114013e82c1ad22dc9294
|
akshare/futures_derivative/futures_index_price_nh.py
|
futures_price_index_nh
|
(symbol: str = "A")
|
南华期货-南华指数单品种-价格-所有历史数据
https://www.nanhua.net/ianalysis/varietyindex/price/A.json?t=1574932974280
:param symbol: 通过 ak.futures_index_symbol_table_nh() 获取
:type symbol: str
:return: 南华期货-南华指数单品种-价格-所有历史数据
:rtype: pandas.Series
|
南华期货-南华指数单品种-价格-所有历史数据
https://www.nanhua.net/ianalysis/varietyindex/price/A.json?t=1574932974280
:param symbol: 通过 ak.futures_index_symbol_table_nh() 获取
:type symbol: str
:return: 南华期货-南华指数单品种-价格-所有历史数据
:rtype: pandas.Series
| 31 | 50 |
def futures_price_index_nh(symbol: str = "A") -> pd.DataFrame:
"""
南华期货-南华指数单品种-价格-所有历史数据
https://www.nanhua.net/ianalysis/varietyindex/price/A.json?t=1574932974280
:param symbol: 通过 ak.futures_index_symbol_table_nh() 获取
:type symbol: str
:return: 南华期货-南华指数单品种-价格-所有历史数据
:rtype: pandas.Series
"""
symbol_df = futures_index_symbol_table_nh()
symbol_list = symbol_df["code"].tolist()
if symbol in symbol_list:
t = time.time()
url = f"http://www.nanhua.net/ianalysis/varietyindex/price/{symbol}.json?t={int(round(t * 1000))}"
r = requests.get(url)
data_json = r.json()
temp_df = pd.DataFrame(data_json)
temp_df.columns = ["date", "value"]
temp_df['date'] = (pd.to_datetime(temp_df["date"], unit='ms') - pd.Timedelta(hours=-8)).dt.date
return temp_df
|
https://github.com/akfamily/akshare/blob/087025d8d6f799b30ca114013e82c1ad22dc9294/project25/akshare/futures_derivative/futures_index_price_nh.py#L31-L50
| 25 |
[
0,
1,
2,
3,
4,
5,
6,
7,
8
] | 45 |
[
9,
10,
11,
12,
13,
14,
15,
16,
17,
18,
19
] | 55 | false | 25 | 20 | 2 | 45 | 6 |
def futures_price_index_nh(symbol: str = "A") -> pd.DataFrame:
symbol_df = futures_index_symbol_table_nh()
symbol_list = symbol_df["code"].tolist()
if symbol in symbol_list:
t = time.time()
url = f"http://www.nanhua.net/ianalysis/varietyindex/price/{symbol}.json?t={int(round(t * 1000))}"
r = requests.get(url)
data_json = r.json()
temp_df = pd.DataFrame(data_json)
temp_df.columns = ["date", "value"]
temp_df['date'] = (pd.to_datetime(temp_df["date"], unit='ms') - pd.Timedelta(hours=-8)).dt.date
return temp_df
| 18,236 |
|
akfamily/akshare
|
087025d8d6f799b30ca114013e82c1ad22dc9294
|
akshare/futures_derivative/futures_hog.py
|
futures_hog_info
|
(symbol: str = "猪肉批发价") -> pd.Dat
|
养猪数据中心
https://zhujia.zhuwang.cc/
:param symbol: choice of {"猪肉批发价", "仔猪价格", "生猪期货指数", "二元母猪价格", "生猪产能数据", "饲料原料数据", "中央储备冻猪肉", "白条肉", "育肥猪配合饲料", "肉类价格指数", "猪粮比价", "猪企销售简报-销售量", "猪企销售简报-销售额", "猪企销售简报-销售均价"}
:type symbol: str
:return: 猪肉信息
:rtype: pandas.DataFrame
|
养猪数据中心
https://zhujia.zhuwang.cc/
:param symbol: choice of {"猪肉批发价", "仔猪价格", "生猪期货指数", "二元母猪价格", "生猪产能数据", "饲料原料数据", "中央储备冻猪肉", "白条肉", "育肥猪配合饲料", "肉类价格指数", "猪粮比价", "猪企销售简报-销售量", "猪企销售简报-销售额", "猪企销售简报-销售均价"}
:type symbol: str
:return: 猪肉信息
:rtype: pandas.DataFrame
| 12 | 173 |
def futures_hog_info(symbol: str = "猪肉批发价") -> pd.DataFrame:
"""
养猪数据中心
https://zhujia.zhuwang.cc/
:param symbol: choice of {"猪肉批发价", "仔猪价格", "生猪期货指数", "二元母猪价格", "生猪产能数据", "饲料原料数据", "中央储备冻猪肉", "白条肉", "育肥猪配合饲料", "肉类价格指数", "猪粮比价", "猪企销售简报-销售量", "猪企销售简报-销售额", "猪企销售简报-销售均价"}
:type symbol: str
:return: 猪肉信息
:rtype: pandas.DataFrame
"""
if symbol == "猪肉批发价":
url = "https://zhujia.zhuwang.cc/new_map/zhujiapork/chart1.json"
params = {"timestamp": "1627567846422"}
r = requests.get(url, params=params)
data_json = r.json()
temp_df = pd.DataFrame(data_json)
temp_df.columns = ["date", "item", "value"]
del temp_df["item"]
return temp_df
elif symbol == "仔猪价格":
url = "https://zhujia.zhuwang.cc/new_map/zhizhu/chart2.json"
params = {"timestamp": "1627567846422"}
r = requests.get(url, params=params)
data_json = r.json()
temp_df = pd.DataFrame(data_json).T
temp_df.columns = ["date", "value"]
temp_df["date"] = pd.to_datetime(temp_df["date"], format="%Y年%m月%d日").dt.date
temp_df["value"] = pd.to_numeric(temp_df["value"])
return temp_df
elif symbol == "生猪期货指数":
url = "https://zhujia.zhuwang.cc/new_map/shengzhuqihuo/chart1.json"
params = {"timestamp": "1627567846422"}
r = requests.get(url, params=params)
data_json = r.json()
temp_df = pd.DataFrame(data_json)
need_list = temp_df.iloc[-1, [1, 3, 5, 7, 9, 11, 13, 15]].tolist()
temp_df.columns = list("abcdefghijklmnopq")
temp_df = temp_df.drop(["b", "d", "f", "h", "j", "l", "n", "p"], axis="columns")
temp_df.columns = ["日期"] + need_list
return temp_df
elif symbol == "二元母猪价格":
url = "https://zhujia.zhuwang.cc/new_map/eryuanpig/chart2.json"
params = {"timestamp": "1627567846422"}
r = requests.get(url, params=params)
data_json = r.json()
temp_df = pd.DataFrame(data_json).T
temp_df.columns = ["date", "value"]
temp_df["date"] = pd.to_datetime(temp_df["date"], format="%Y年%m月%d日").dt.date
temp_df["value"] = pd.to_numeric(temp_df["value"])
return temp_df
elif symbol == "生猪产能数据":
url = "https://zhujia.zhuwang.cc/new_map/shengzhuchanneng/chart1.json"
params = {"timestamp": "1627567846422"}
r = requests.get(url, params=params)
data_json = r.json()
temp_df = pd.DataFrame(data_json).T
temp_df.columns = ["周期", "能繁母猪存栏", "猪肉产量", "生猪存栏", "生猪出栏"]
temp_df["能繁母猪存栏"] = pd.to_numeric(temp_df["能繁母猪存栏"], errors="coerce")
temp_df["猪肉产量"] = pd.to_numeric(temp_df["猪肉产量"], errors="coerce")
temp_df["生猪存栏"] = pd.to_numeric(temp_df["生猪存栏"], errors="coerce")
temp_df["生猪出栏"] = pd.to_numeric(temp_df["生猪出栏"], errors="coerce")
return temp_df
elif symbol == "饲料原料数据":
url = "https://zhujia.zhuwang.cc/new_map/pigfeed/chart1.json"
params = {"timestamp": "1627567846422"}
r = requests.get(url, params=params)
data_json = r.json()
temp_df = pd.DataFrame(data_json).T
temp_df.columns = ["周期", "大豆进口金额", "大豆播种面积", "玉米进口金额", "玉米播种面积"]
temp_df["周期"] = temp_df["周期"].astype(int).astype(str)
temp_df["大豆进口金额"] = pd.to_numeric(temp_df["大豆进口金额"], errors="coerce")
temp_df["大豆播种面积"] = pd.to_numeric(temp_df["大豆播种面积"], errors="coerce")
temp_df["玉米进口金额"] = pd.to_numeric(temp_df["玉米进口金额"], errors="coerce")
temp_df["玉米播种面积"] = pd.to_numeric(temp_df["玉米播种面积"], errors="coerce")
return temp_df
elif symbol == "中央储备冻猪肉":
url = "https://zhujia.zhuwang.cc/new_map/chubeidongzhurou/chart2.json"
params = {"timestamp": "1627567846422"}
r = requests.get(url, params=params)
data_json = r.json()
temp_df = pd.DataFrame(data_json).T
temp_df.columns = ["date", "value"]
temp_df["date"] = pd.to_datetime(temp_df["date"], format="%Y年%m月%d日").dt.date
temp_df["value"] = pd.to_numeric(temp_df["value"])
return temp_df
elif symbol == "白条肉":
url = "https://zhujia.zhuwang.cc/new_map/baitiaozhurou/chart1.json"
params = {"timestamp": "1627567846422"}
r = requests.get(url, params=params)
data_json = r.json()
temp_df = pd.DataFrame(data_json).T
temp_df.columns = ["周期", "白条肉平均出厂价格", "环比", "同比"]
temp_df["白条肉平均出厂价格"] = pd.to_numeric(temp_df["白条肉平均出厂价格"])
temp_df["环比"] = pd.to_numeric(temp_df["环比"])
temp_df["同比"] = pd.to_numeric(temp_df["同比"])
return temp_df
elif symbol == "育肥猪配合饲料":
url = "https://zhujia.zhuwang.cc/new_map/yufeipig/chart1.json"
params = {"timestamp": "1627567846422"}
r = requests.get(url, params=params)
data_json = r.json()
temp_df = pd.DataFrame(data_json)
temp_df.columns = ["周期", "发布日期", "_", "本周", "去年同期", "上一周", "_", "_", "_"]
temp_df = temp_df[["发布日期", "周期", "本周", "去年同期", "上一周"]]
temp_df["去年同期"] = pd.to_numeric(temp_df["去年同期"])
temp_df["上一周"] = pd.to_numeric(temp_df["上一周"])
return temp_df
elif symbol == "肉类价格指数":
url = "https://zhujia.zhuwang.cc/new_map/meatindex/chart1.json"
params = {"timestamp": "1627567846422"}
r = requests.get(url, params=params)
data_json = r.json()
temp_df = pd.DataFrame(data_json)
temp_df.columns = ["date", "_", "value"]
temp_df = temp_df[["date", "value"]]
temp_df["value"] = pd.to_numeric(temp_df["value"])
return temp_df
elif symbol == "猪粮比价":
url = "https://zhujia.zhuwang.cc/new_map/zhuliangbi/chart2.json"
params = {"timestamp": "1627567846422"}
r = requests.get(url, params=params)
data_json = r.json()
temp_df = pd.DataFrame(data_json).T
temp_df.columns = ["date", "value"]
temp_df["date"] = pd.to_datetime(temp_df["date"], format="%Y年%m月%d日").dt.date
temp_df["value"] = pd.to_numeric(temp_df["value"])
return temp_df
elif symbol == "猪企销售简报-销售量":
url = "https://zhujia.zhuwang.cc/new_map/zhuqixiaoshoujianbao/xiaoliang.json"
params = {"timestamp": "1627567846422"}
r = requests.get(url, params=params)
data_json = r.json()
temp_df = pd.DataFrame(data_json).T
temp_df.columns = ["周期", "温氏", "正邦", "新希望", "牧原"]
temp_df["温氏"] = pd.to_numeric(temp_df["温氏"])
temp_df["正邦"] = pd.to_numeric(temp_df["正邦"])
temp_df["新希望"] = pd.to_numeric(temp_df["新希望"])
temp_df["牧原"] = pd.to_numeric(temp_df["牧原"])
return temp_df
elif symbol == "猪企销售简报-销售额":
url = "https://zhujia.zhuwang.cc/new_map/zhuqixiaoshoujianbao/xiaoshoue.json"
params = {"timestamp": "1627567846422"}
r = requests.get(url, params=params)
data_json = r.json()
temp_df = pd.DataFrame(data_json).T
temp_df.columns = ["周期", "温氏", "正邦", "新希望", "牧原"]
temp_df["温氏"] = pd.to_numeric(temp_df["温氏"])
temp_df["正邦"] = pd.to_numeric(temp_df["正邦"])
temp_df["新希望"] = pd.to_numeric(temp_df["新希望"])
temp_df["牧原"] = pd.to_numeric(temp_df["牧原"])
return temp_df
elif symbol == "猪企销售简报-销售均价":
url = "https://zhujia.zhuwang.cc/new_map/zhuqixiaoshoujianbao/junjia.json"
params = {"timestamp": "1627567846422"}
r = requests.get(url, params=params)
data_json = r.json()
temp_df = pd.DataFrame(data_json).T
temp_df.columns = ["周期", "温氏", "正邦", "新希望", "牧原"]
temp_df["温氏"] = pd.to_numeric(temp_df["温氏"])
temp_df["正邦"] = pd.to_numeric(temp_df["正邦"])
temp_df["新希望"] = pd.to_numeric(temp_df["新希望"])
temp_df["牧原"] = pd.to_numeric(temp_df["牧原"])
return temp_df
|
https://github.com/akfamily/akshare/blob/087025d8d6f799b30ca114013e82c1ad22dc9294/project25/akshare/futures_derivative/futures_hog.py#L12-L173
| 25 |
[
0,
1,
2,
3,
4,
5,
6,
7,
8
] | 5.555556 |
[
9,
10,
11,
12,
13,
14,
15,
16,
17,
18,
19,
20,
21,
22,
23,
24,
25,
26,
27,
28,
29,
30,
31,
32,
33,
34,
35,
36,
37,
38,
39,
40,
41,
42,
43,
44,
45,
46,
47,
48,
49,
50,
51,
52,
53,
54,
55,
56,
57,
58,
59,
60,
61,
62,
63,
64,
65,
66,
67,
68,
69,
70,
71,
72,
73,
74,
75,
76,
77,
78,
79,
80,
81,
82,
83,
84,
85,
86,
87,
88,
89,
90,
91,
92,
93,
94,
95,
96,
97,
98,
99,
100,
101,
102,
103,
104,
105,
106,
107,
108,
109,
110,
111,
112,
113,
114,
115,
116,
117,
118,
119,
120,
121,
122,
123,
124,
125,
126,
127,
128,
129,
130,
131,
132,
133,
134,
135,
136,
137,
138,
139,
140,
141,
142,
143,
144,
145,
146,
147,
148,
149,
150,
151,
152,
153,
154,
155,
156,
157,
158,
159,
160,
161
] | 94.444444 | false | 2.531646 | 162 | 15 | 5.555556 | 6 |
def futures_hog_info(symbol: str = "猪肉批发价") -> pd.DataFrame:
if symbol == "猪肉批发价":
url = "https://zhujia.zhuwang.cc/new_map/zhujiapork/chart1.json"
params = {"timestamp": "1627567846422"}
r = requests.get(url, params=params)
data_json = r.json()
temp_df = pd.DataFrame(data_json)
temp_df.columns = ["date", "item", "value"]
del temp_df["item"]
return temp_df
elif symbol == "仔猪价格":
url = "https://zhujia.zhuwang.cc/new_map/zhizhu/chart2.json"
params = {"timestamp": "1627567846422"}
r = requests.get(url, params=params)
data_json = r.json()
temp_df = pd.DataFrame(data_json).T
temp_df.columns = ["date", "value"]
temp_df["date"] = pd.to_datetime(temp_df["date"], format="%Y年%m月%d日").dt.date
temp_df["value"] = pd.to_numeric(temp_df["value"])
return temp_df
elif symbol == "生猪期货指数":
url = "https://zhujia.zhuwang.cc/new_map/shengzhuqihuo/chart1.json"
params = {"timestamp": "1627567846422"}
r = requests.get(url, params=params)
data_json = r.json()
temp_df = pd.DataFrame(data_json)
need_list = temp_df.iloc[-1, [1, 3, 5, 7, 9, 11, 13, 15]].tolist()
temp_df.columns = list("abcdefghijklmnopq")
temp_df = temp_df.drop(["b", "d", "f", "h", "j", "l", "n", "p"], axis="columns")
temp_df.columns = ["日期"] + need_list
return temp_df
elif symbol == "二元母猪价格":
url = "https://zhujia.zhuwang.cc/new_map/eryuanpig/chart2.json"
params = {"timestamp": "1627567846422"}
r = requests.get(url, params=params)
data_json = r.json()
temp_df = pd.DataFrame(data_json).T
temp_df.columns = ["date", "value"]
temp_df["date"] = pd.to_datetime(temp_df["date"], format="%Y年%m月%d日").dt.date
temp_df["value"] = pd.to_numeric(temp_df["value"])
return temp_df
elif symbol == "生猪产能数据":
url = "https://zhujia.zhuwang.cc/new_map/shengzhuchanneng/chart1.json"
params = {"timestamp": "1627567846422"}
r = requests.get(url, params=params)
data_json = r.json()
temp_df = pd.DataFrame(data_json).T
temp_df.columns = ["周期", "能繁母猪存栏", "猪肉产量", "生猪存栏", "生猪出栏"]
temp_df["能繁母猪存栏"] = pd.to_numeric(temp_df["能繁母猪存栏"], errors="coerce")
temp_df["猪肉产量"] = pd.to_numeric(temp_df["猪肉产量"], errors="coerce")
temp_df["生猪存栏"] = pd.to_numeric(temp_df["生猪存栏"], errors="coerce")
temp_df["生猪出栏"] = pd.to_numeric(temp_df["生猪出栏"], errors="coerce")
return temp_df
elif symbol == "饲料原料数据":
url = "https://zhujia.zhuwang.cc/new_map/pigfeed/chart1.json"
params = {"timestamp": "1627567846422"}
r = requests.get(url, params=params)
data_json = r.json()
temp_df = pd.DataFrame(data_json).T
temp_df.columns = ["周期", "大豆进口金额", "大豆播种面积", "玉米进口金额", "玉米播种面积"]
temp_df["周期"] = temp_df["周期"].astype(int).astype(str)
temp_df["大豆进口金额"] = pd.to_numeric(temp_df["大豆进口金额"], errors="coerce")
temp_df["大豆播种面积"] = pd.to_numeric(temp_df["大豆播种面积"], errors="coerce")
temp_df["玉米进口金额"] = pd.to_numeric(temp_df["玉米进口金额"], errors="coerce")
temp_df["玉米播种面积"] = pd.to_numeric(temp_df["玉米播种面积"], errors="coerce")
return temp_df
elif symbol == "中央储备冻猪肉":
url = "https://zhujia.zhuwang.cc/new_map/chubeidongzhurou/chart2.json"
params = {"timestamp": "1627567846422"}
r = requests.get(url, params=params)
data_json = r.json()
temp_df = pd.DataFrame(data_json).T
temp_df.columns = ["date", "value"]
temp_df["date"] = pd.to_datetime(temp_df["date"], format="%Y年%m月%d日").dt.date
temp_df["value"] = pd.to_numeric(temp_df["value"])
return temp_df
elif symbol == "白条肉":
url = "https://zhujia.zhuwang.cc/new_map/baitiaozhurou/chart1.json"
params = {"timestamp": "1627567846422"}
r = requests.get(url, params=params)
data_json = r.json()
temp_df = pd.DataFrame(data_json).T
temp_df.columns = ["周期", "白条肉平均出厂价格", "环比", "同比"]
temp_df["白条肉平均出厂价格"] = pd.to_numeric(temp_df["白条肉平均出厂价格"])
temp_df["环比"] = pd.to_numeric(temp_df["环比"])
temp_df["同比"] = pd.to_numeric(temp_df["同比"])
return temp_df
elif symbol == "育肥猪配合饲料":
url = "https://zhujia.zhuwang.cc/new_map/yufeipig/chart1.json"
params = {"timestamp": "1627567846422"}
r = requests.get(url, params=params)
data_json = r.json()
temp_df = pd.DataFrame(data_json)
temp_df.columns = ["周期", "发布日期", "_", "本周", "去年同期", "上一周", "_", "_", "_"]
temp_df = temp_df[["发布日期", "周期", "本周", "去年同期", "上一周"]]
temp_df["去年同期"] = pd.to_numeric(temp_df["去年同期"])
temp_df["上一周"] = pd.to_numeric(temp_df["上一周"])
return temp_df
elif symbol == "肉类价格指数":
url = "https://zhujia.zhuwang.cc/new_map/meatindex/chart1.json"
params = {"timestamp": "1627567846422"}
r = requests.get(url, params=params)
data_json = r.json()
temp_df = pd.DataFrame(data_json)
temp_df.columns = ["date", "_", "value"]
temp_df = temp_df[["date", "value"]]
temp_df["value"] = pd.to_numeric(temp_df["value"])
return temp_df
elif symbol == "猪粮比价":
url = "https://zhujia.zhuwang.cc/new_map/zhuliangbi/chart2.json"
params = {"timestamp": "1627567846422"}
r = requests.get(url, params=params)
data_json = r.json()
temp_df = pd.DataFrame(data_json).T
temp_df.columns = ["date", "value"]
temp_df["date"] = pd.to_datetime(temp_df["date"], format="%Y年%m月%d日").dt.date
temp_df["value"] = pd.to_numeric(temp_df["value"])
return temp_df
elif symbol == "猪企销售简报-销售量":
url = "https://zhujia.zhuwang.cc/new_map/zhuqixiaoshoujianbao/xiaoliang.json"
params = {"timestamp": "1627567846422"}
r = requests.get(url, params=params)
data_json = r.json()
temp_df = pd.DataFrame(data_json).T
temp_df.columns = ["周期", "温氏", "正邦", "新希望", "牧原"]
temp_df["温氏"] = pd.to_numeric(temp_df["温氏"])
temp_df["正邦"] = pd.to_numeric(temp_df["正邦"])
temp_df["新希望"] = pd.to_numeric(temp_df["新希望"])
temp_df["牧原"] = pd.to_numeric(temp_df["牧原"])
return temp_df
elif symbol == "猪企销售简报-销售额":
url = "https://zhujia.zhuwang.cc/new_map/zhuqixiaoshoujianbao/xiaoshoue.json"
params = {"timestamp": "1627567846422"}
r = requests.get(url, params=params)
data_json = r.json()
temp_df = pd.DataFrame(data_json).T
temp_df.columns = ["周期", "温氏", "正邦", "新希望", "牧原"]
temp_df["温氏"] = pd.to_numeric(temp_df["温氏"])
temp_df["正邦"] = pd.to_numeric(temp_df["正邦"])
temp_df["新希望"] = pd.to_numeric(temp_df["新希望"])
temp_df["牧原"] = pd.to_numeric(temp_df["牧原"])
return temp_df
elif symbol == "猪企销售简报-销售均价":
url = "https://zhujia.zhuwang.cc/new_map/zhuqixiaoshoujianbao/junjia.json"
params = {"timestamp": "1627567846422"}
r = requests.get(url, params=params)
data_json = r.json()
temp_df = pd.DataFrame(data_json).T
temp_df.columns = ["周期", "温氏", "正邦", "新希望", "牧原"]
temp_df["温氏"] = pd.to_numeric(temp_df["温氏"])
temp_df["正邦"] = pd.to_numeric(temp_df["正邦"])
temp_df["新希望"] = pd.to_numeric(temp_df["新希望"])
temp_df["牧原"] = pd.to_numeric(temp_df["牧原"])
return temp_df
| 18,237 |
|
akfamily/akshare
|
087025d8d6f799b30ca114013e82c1ad22dc9294
|
akshare/futures_derivative/futures_hog.py
|
futures_hog_rank
|
(symbol: str = "外三元") -> pd
|
价格排行榜
https://zhujia.zhuwang.cc/lists.shtml
:param symbol: choice of {"外三元", "内三元", "土杂猪", "玉米", "豆粕"}
:type symbol: str
:return: 价格排行榜
:rtype: pandas.DataFrame
|
价格排行榜
https://zhujia.zhuwang.cc/lists.shtml
:param symbol: choice of {"外三元", "内三元", "土杂猪", "玉米", "豆粕"}
:type symbol: str
:return: 价格排行榜
:rtype: pandas.DataFrame
| 176 | 254 |
def futures_hog_rank(symbol: str = "外三元") -> pd.DataFrame:
"""
价格排行榜
https://zhujia.zhuwang.cc/lists.shtml
:param symbol: choice of {"外三元", "内三元", "土杂猪", "玉米", "豆粕"}
:type symbol: str
:return: 价格排行榜
:rtype: pandas.DataFrame
"""
if symbol == "外三元":
temp_df = pd.read_html("https://zhujia.zhuwang.cc/lists.shtml")[0]
temp_df.columns = [
'排名',
'品种',
'省份',
'价格-公斤',
'价格-斤',
]
temp_df['价格-公斤'] = temp_df['价格-公斤'].str.strip("元")
temp_df['价格-斤'] = temp_df['价格-斤'].str.strip("元")
temp_df['价格-公斤'] = pd.to_numeric(temp_df['价格-公斤'])
temp_df['价格-斤'] = pd.to_numeric(temp_df['价格-斤'])
return temp_df
elif symbol == "内三元":
temp_df = pd.read_html("https://zhujia.zhuwang.cc/lists-1.shtml")[0]
temp_df.columns = [
'排名',
'品种',
'省份',
'价格-公斤',
'价格-斤',
]
temp_df['价格-公斤'] = temp_df['价格-公斤'].str.strip("元")
temp_df['价格-斤'] = temp_df['价格-斤'].str.strip("元")
temp_df['价格-公斤'] = pd.to_numeric(temp_df['价格-公斤'])
temp_df['价格-斤'] = pd.to_numeric(temp_df['价格-斤'])
return temp_df
elif symbol == "土杂猪":
temp_df = pd.read_html("https://zhujia.zhuwang.cc/lists-2.shtml")[0]
temp_df.columns = [
'排名',
'品种',
'省份',
'价格-公斤',
'价格-斤',
]
temp_df['价格-公斤'] = temp_df['价格-公斤'].str.strip("元")
temp_df['价格-斤'] = temp_df['价格-斤'].str.strip("元")
temp_df['价格-公斤'] = pd.to_numeric(temp_df['价格-公斤'])
temp_df['价格-斤'] = pd.to_numeric(temp_df['价格-斤'])
return temp_df
elif symbol == "玉米":
temp_df = pd.read_html("https://zhujia.zhuwang.cc/lists-3.shtml")[0]
temp_df.columns = [
'排名',
'品种',
'省份',
'价格-公斤',
'价格-斤',
]
temp_df['价格-公斤'] = temp_df['价格-公斤'].str.strip("元")
temp_df['价格-斤'] = temp_df['价格-斤'].str.strip("元")
temp_df['价格-公斤'] = pd.to_numeric(temp_df['价格-公斤'])
temp_df['价格-斤'] = pd.to_numeric(temp_df['价格-斤'])
return temp_df
elif symbol == "豆粕":
temp_df = pd.read_html("https://zhujia.zhuwang.cc/lists-4.shtml")[0]
temp_df.columns = [
'排名',
'品种',
'省份',
'价格-公斤',
'价格-斤',
]
temp_df['价格-公斤'] = temp_df['价格-公斤'].str.strip("元")
temp_df['价格-斤'] = temp_df['价格-斤'].str.strip("元")
temp_df['价格-公斤'] = pd.to_numeric(temp_df['价格-公斤'])
temp_df['价格-斤'] = pd.to_numeric(temp_df['价格-斤'])
return temp_df
|
https://github.com/akfamily/akshare/blob/087025d8d6f799b30ca114013e82c1ad22dc9294/project25/akshare/futures_derivative/futures_hog.py#L176-L254
| 25 |
[
0,
1,
2,
3,
4,
5,
6,
7,
8
] | 11.392405 |
[
9,
10,
11,
18,
19,
20,
21,
22,
23,
24,
25,
32,
33,
34,
35,
36,
37,
38,
39,
46,
47,
48,
49,
50,
51,
52,
53,
60,
61,
62,
63,
64,
65,
66,
67,
74,
75,
76,
77,
78
] | 50.632911 | false | 2.531646 | 79 | 6 | 49.367089 | 6 |
def futures_hog_rank(symbol: str = "外三元") -> pd.DataFrame:
if symbol == "外三元":
temp_df = pd.read_html("https://zhujia.zhuwang.cc/lists.shtml")[0]
temp_df.columns = [
'排名',
'品种',
'省份',
'价格-公斤',
'价格-斤',
]
temp_df['价格-公斤'] = temp_df['价格-公斤'].str.strip("元")
temp_df['价格-斤'] = temp_df['价格-斤'].str.strip("元")
temp_df['价格-公斤'] = pd.to_numeric(temp_df['价格-公斤'])
temp_df['价格-斤'] = pd.to_numeric(temp_df['价格-斤'])
return temp_df
elif symbol == "内三元":
temp_df = pd.read_html("https://zhujia.zhuwang.cc/lists-1.shtml")[0]
temp_df.columns = [
'排名',
'品种',
'省份',
'价格-公斤',
'价格-斤',
]
temp_df['价格-公斤'] = temp_df['价格-公斤'].str.strip("元")
temp_df['价格-斤'] = temp_df['价格-斤'].str.strip("元")
temp_df['价格-公斤'] = pd.to_numeric(temp_df['价格-公斤'])
temp_df['价格-斤'] = pd.to_numeric(temp_df['价格-斤'])
return temp_df
elif symbol == "土杂猪":
temp_df = pd.read_html("https://zhujia.zhuwang.cc/lists-2.shtml")[0]
temp_df.columns = [
'排名',
'品种',
'省份',
'价格-公斤',
'价格-斤',
]
temp_df['价格-公斤'] = temp_df['价格-公斤'].str.strip("元")
temp_df['价格-斤'] = temp_df['价格-斤'].str.strip("元")
temp_df['价格-公斤'] = pd.to_numeric(temp_df['价格-公斤'])
temp_df['价格-斤'] = pd.to_numeric(temp_df['价格-斤'])
return temp_df
elif symbol == "玉米":
temp_df = pd.read_html("https://zhujia.zhuwang.cc/lists-3.shtml")[0]
temp_df.columns = [
'排名',
'品种',
'省份',
'价格-公斤',
'价格-斤',
]
temp_df['价格-公斤'] = temp_df['价格-公斤'].str.strip("元")
temp_df['价格-斤'] = temp_df['价格-斤'].str.strip("元")
temp_df['价格-公斤'] = pd.to_numeric(temp_df['价格-公斤'])
temp_df['价格-斤'] = pd.to_numeric(temp_df['价格-斤'])
return temp_df
elif symbol == "豆粕":
temp_df = pd.read_html("https://zhujia.zhuwang.cc/lists-4.shtml")[0]
temp_df.columns = [
'排名',
'品种',
'省份',
'价格-公斤',
'价格-斤',
]
temp_df['价格-公斤'] = temp_df['价格-公斤'].str.strip("元")
temp_df['价格-斤'] = temp_df['价格-斤'].str.strip("元")
temp_df['价格-公斤'] = pd.to_numeric(temp_df['价格-公斤'])
temp_df['价格-斤'] = pd.to_numeric(temp_df['价格-斤'])
return temp_df
| 18,238 |
|
akfamily/akshare
|
087025d8d6f799b30ca114013e82c1ad22dc9294
|
akshare/futures_derivative/futures_index_return_nh.py
|
futures_return_index_nh
|
(symbol: str = "Y")
|
南华期货-南华指数单品种-收益率-所有历史数据
http://www.nanhua.net/ianalysis/varietyindex/index/NHCI.json?t=1574932290494
:param symbol: 通过 ak.futures_index_symbol_table_nh() 获取
:type symbol: str
:return: 南华指数单品种-收益率-所有历史数据
:rtype: pandas.Series
|
南华期货-南华指数单品种-收益率-所有历史数据
http://www.nanhua.net/ianalysis/varietyindex/index/NHCI.json?t=1574932290494
:param symbol: 通过 ak.futures_index_symbol_table_nh() 获取
:type symbol: str
:return: 南华指数单品种-收益率-所有历史数据
:rtype: pandas.Series
| 17 | 37 |
def futures_return_index_nh(symbol: str = "Y") -> pd.DataFrame:
"""
南华期货-南华指数单品种-收益率-所有历史数据
http://www.nanhua.net/ianalysis/varietyindex/index/NHCI.json?t=1574932290494
:param symbol: 通过 ak.futures_index_symbol_table_nh() 获取
:type symbol: str
:return: 南华指数单品种-收益率-所有历史数据
:rtype: pandas.Series
"""
symbol_df = futures_index_symbol_table_nh()
symbol_list = symbol_df["code"].tolist()
if symbol in symbol_list:
t = time.time()
url = f"http://www.nanhua.net/ianalysis/varietyindex/index/{symbol}.json?t={int(round(t * 1000))}"
r = requests.get(url)
data_json = r.json()
temp_df = pd.DataFrame(data_json)
temp_df.columns = ["date", "value"]
temp_df['date'] = pd.to_datetime(temp_df["date"], unit='ms').dt.date
temp_df['value'] = pd.to_numeric(temp_df['value'])
return temp_df
|
https://github.com/akfamily/akshare/blob/087025d8d6f799b30ca114013e82c1ad22dc9294/project25/akshare/futures_derivative/futures_index_return_nh.py#L17-L37
| 25 |
[
0,
1,
2,
3,
4,
5,
6,
7,
8
] | 42.857143 |
[
9,
10,
11,
12,
13,
14,
15,
16,
17,
18,
19,
20
] | 57.142857 | false | 33.333333 | 21 | 2 | 42.857143 | 6 |
def futures_return_index_nh(symbol: str = "Y") -> pd.DataFrame:
symbol_df = futures_index_symbol_table_nh()
symbol_list = symbol_df["code"].tolist()
if symbol in symbol_list:
t = time.time()
url = f"http://www.nanhua.net/ianalysis/varietyindex/index/{symbol}.json?t={int(round(t * 1000))}"
r = requests.get(url)
data_json = r.json()
temp_df = pd.DataFrame(data_json)
temp_df.columns = ["date", "value"]
temp_df['date'] = pd.to_datetime(temp_df["date"], unit='ms').dt.date
temp_df['value'] = pd.to_numeric(temp_df['value'])
return temp_df
| 18,239 |
|
akfamily/akshare
|
087025d8d6f799b30ca114013e82c1ad22dc9294
|
akshare/futures_derivative/futures_other_index_nh.py
|
futures_board_index_nh
|
(start_date: str = "20220104", end_date: str = "20220413")
|
return temp_df
|
南华期货-市场涨跌-板块指数涨跌
http://www.nanhua.net/nhzc/platechange.html
:param start_date: 开始时间
:type start_date: str
:param end_date: 结束时间
:type end_date: str
:return: 板块指数涨跌
:rtype: pandas.DataFrame
|
南华期货-市场涨跌-板块指数涨跌
http://www.nanhua.net/nhzc/platechange.html
:param start_date: 开始时间
:type start_date: str
:param end_date: 结束时间
:type end_date: str
:return: 板块指数涨跌
:rtype: pandas.DataFrame
| 17 | 59 |
def futures_board_index_nh(start_date: str = "20220104", end_date: str = "20220413") -> pd.DataFrame:
"""
南华期货-市场涨跌-板块指数涨跌
http://www.nanhua.net/nhzc/platechange.html
:param start_date: 开始时间
:type start_date: str
:param end_date: 结束时间
:type end_date: str
:return: 板块指数涨跌
:rtype: pandas.DataFrame
"""
url = f"http://www.nanhua.net/ianalysis/plate/{start_date[:4]}/{start_date[4:6]}/{start_date}.json"
params = {
't': '1649920913503'
}
r = requests.get(url, params=params)
start_df = pd.DataFrame(r.json())
start_df.columns = [
'name',
'code',
start_date,
]
url = f"http://www.nanhua.net/ianalysis/plate/{end_date[:4]}/{end_date[4:6]}/{end_date}.json"
params = {
't': '1649920913503'
}
r = requests.get(url, params=params)
end_df = pd.DataFrame(r.json())
end_df.columns = [
'name',
'code',
'end_date',
]
start_df[end_date] = end_df['end_date']
start_df['gap'] = start_df[end_date] - start_df[start_date]
start_df['return'] = start_df['gap']/start_df[start_date]
temp_df = start_df
temp_df = temp_df[['name', 'return']]
return temp_df
|
https://github.com/akfamily/akshare/blob/087025d8d6f799b30ca114013e82c1ad22dc9294/project25/akshare/futures_derivative/futures_other_index_nh.py#L17-L59
| 25 |
[
0,
1,
2,
3,
4,
5,
6,
7,
8,
9,
10
] | 25.581395 |
[
11,
12,
15,
16,
17,
23,
24,
27,
28,
29,
35,
36,
37,
39,
40,
42
] | 37.209302 | false | 13.461538 | 43 | 1 | 62.790698 | 8 |
def futures_board_index_nh(start_date: str = "20220104", end_date: str = "20220413") -> pd.DataFrame:
url = f"http://www.nanhua.net/ianalysis/plate/{start_date[:4]}/{start_date[4:6]}/{start_date}.json"
params = {
't': '1649920913503'
}
r = requests.get(url, params=params)
start_df = pd.DataFrame(r.json())
start_df.columns = [
'name',
'code',
start_date,
]
url = f"http://www.nanhua.net/ianalysis/plate/{end_date[:4]}/{end_date[4:6]}/{end_date}.json"
params = {
't': '1649920913503'
}
r = requests.get(url, params=params)
end_df = pd.DataFrame(r.json())
end_df.columns = [
'name',
'code',
'end_date',
]
start_df[end_date] = end_df['end_date']
start_df['gap'] = start_df[end_date] - start_df[start_date]
start_df['return'] = start_df['gap']/start_df[start_date]
temp_df = start_df
temp_df = temp_df[['name', 'return']]
return temp_df
| 18,240 |
akfamily/akshare
|
087025d8d6f799b30ca114013e82c1ad22dc9294
|
akshare/futures_derivative/futures_other_index_nh.py
|
futures_variety_index_nh
|
(start_date: str = "20220104", end_date: str = "20220413")
|
return temp_df
|
南华期货-市场涨跌-品种指数涨跌
http://www.nanhua.net/nhzc/varietychange.html
:param start_date: 开始时间
:type start_date: str
:param end_date: 结束时间
:type end_date: str
:return: 品种指数涨跌
:rtype: pandas.DataFrame
|
南华期货-市场涨跌-品种指数涨跌
http://www.nanhua.net/nhzc/varietychange.html
:param start_date: 开始时间
:type start_date: str
:param end_date: 结束时间
:type end_date: str
:return: 品种指数涨跌
:rtype: pandas.DataFrame
| 62 | 103 |
def futures_variety_index_nh(start_date: str = "20220104", end_date: str = "20220413") -> pd.DataFrame:
"""
南华期货-市场涨跌-品种指数涨跌
http://www.nanhua.net/nhzc/varietychange.html
:param start_date: 开始时间
:type start_date: str
:param end_date: 结束时间
:type end_date: str
:return: 品种指数涨跌
:rtype: pandas.DataFrame
"""
url = f"http://www.nanhua.net/ianalysis/variety/{start_date[:4]}/{start_date[4:6]}/{start_date}.json"
params = {
't': '1649920913503'
}
r = requests.get(url, params=params)
start_df = pd.DataFrame(r.json())
start_df.columns = [
'name',
'code',
start_date,
]
url = f"http://www.nanhua.net/ianalysis/variety/{end_date[:4]}/{end_date[4:6]}/{end_date}.json"
params = {
't': '1649920913503'
}
r = requests.get(url, params=params)
end_df = pd.DataFrame(r.json())
end_df.columns = [
'name',
'code',
'end_date',
]
start_df[end_date] = end_df['end_date']
start_df['gap'] = start_df[end_date] - start_df[start_date]
start_df['return'] = start_df['gap']/start_df[start_date]
temp_df = start_df
temp_df = temp_df[['name', 'return']]
return temp_df
|
https://github.com/akfamily/akshare/blob/087025d8d6f799b30ca114013e82c1ad22dc9294/project25/akshare/futures_derivative/futures_other_index_nh.py#L62-L103
| 25 |
[
0,
1,
2,
3,
4,
5,
6,
7,
8,
9,
10
] | 26.190476 |
[
11,
12,
15,
16,
17,
23,
24,
27,
28,
29,
34,
36,
37,
39,
40,
41
] | 38.095238 | false | 13.461538 | 42 | 1 | 61.904762 | 8 |
def futures_variety_index_nh(start_date: str = "20220104", end_date: str = "20220413") -> pd.DataFrame:
url = f"http://www.nanhua.net/ianalysis/variety/{start_date[:4]}/{start_date[4:6]}/{start_date}.json"
params = {
't': '1649920913503'
}
r = requests.get(url, params=params)
start_df = pd.DataFrame(r.json())
start_df.columns = [
'name',
'code',
start_date,
]
url = f"http://www.nanhua.net/ianalysis/variety/{end_date[:4]}/{end_date[4:6]}/{end_date}.json"
params = {
't': '1649920913503'
}
r = requests.get(url, params=params)
end_df = pd.DataFrame(r.json())
end_df.columns = [
'name',
'code',
'end_date',
]
start_df[end_date] = end_df['end_date']
start_df['gap'] = start_df[end_date] - start_df[start_date]
start_df['return'] = start_df['gap']/start_df[start_date]
temp_df = start_df
temp_df = temp_df[['name', 'return']]
return temp_df
| 18,241 |
akfamily/akshare
|
087025d8d6f799b30ca114013e82c1ad22dc9294
|
akshare/futures_derivative/futures_other_index_nh.py
|
futures_correlation_nh
|
(date: str = "20220104", period: str = "20")
|
return temp_df
|
南华期货-统计监控-相关系数矩阵
http://www.nanhua.net/nhzc/correltable.html
:param date: 开始时间
:type date: str
:param period: 周期; choice of {"5", "20", "60", "120"}
:type period: str
:return: 相关系数矩阵
:rtype: pandas.DataFrame
|
南华期货-统计监控-相关系数矩阵
http://www.nanhua.net/nhzc/correltable.html
:param date: 开始时间
:type date: str
:param period: 周期; choice of {"5", "20", "60", "120"}
:type period: str
:return: 相关系数矩阵
:rtype: pandas.DataFrame
| 106 | 131 |
def futures_correlation_nh(date: str = "20220104", period: str = "20") -> pd.DataFrame:
"""
南华期货-统计监控-相关系数矩阵
http://www.nanhua.net/nhzc/correltable.html
:param date: 开始时间
:type date: str
:param period: 周期; choice of {"5", "20", "60", "120"}
:type period: str
:return: 相关系数矩阵
:rtype: pandas.DataFrame
"""
url = f"http://www.nanhua.net/ianalysis/correl/{period}/{date[:4]}/{date[4:6]}/{date}.json"
params = {
't': '1649920913503'
}
r = requests.get(url, params=params)
temp_df = pd.DataFrame(r.json())
temp_df.columns = [
'品种代码1',
'品种名称1',
'品种代码2',
'品种名称2',
'相关系数',
]
temp_df['相关系数'] = pd.to_numeric(temp_df['相关系数'])
return temp_df
|
https://github.com/akfamily/akshare/blob/087025d8d6f799b30ca114013e82c1ad22dc9294/project25/akshare/futures_derivative/futures_other_index_nh.py#L106-L131
| 25 |
[
0,
1,
2,
3,
4,
5,
6,
7,
8,
9,
10
] | 42.307692 |
[
11,
12,
15,
16,
17,
24,
25
] | 26.923077 | false | 13.461538 | 26 | 1 | 73.076923 | 8 |
def futures_correlation_nh(date: str = "20220104", period: str = "20") -> pd.DataFrame:
url = f"http://www.nanhua.net/ianalysis/correl/{period}/{date[:4]}/{date[4:6]}/{date}.json"
params = {
't': '1649920913503'
}
r = requests.get(url, params=params)
temp_df = pd.DataFrame(r.json())
temp_df.columns = [
'品种代码1',
'品种名称1',
'品种代码2',
'品种名称2',
'相关系数',
]
temp_df['相关系数'] = pd.to_numeric(temp_df['相关系数'])
return temp_df
| 18,242 |
akfamily/akshare
|
087025d8d6f799b30ca114013e82c1ad22dc9294
|
akshare/index/index_spot.py
|
spot_goods
|
(symbol: str = "波罗的海干散货指数") -> pd.DataFrame:
|
return temp_df
|
新浪财经-商品现货价格指数
http://finance.sina.com.cn/futuremarket/spotprice.shtml#titlePos_0
:param symbol: choice of {"进口大豆价格指数", "波罗的海干散货指数", "钢坯价格指数", "普氏62%铁矿石指数"}
:type symbol: str
:return: 商品现货价格指数
:rtype: pandas.DataFrame
|
新浪财经-商品现货价格指数
http://finance.sina.com.cn/futuremarket/spotprice.shtml#titlePos_0
:param symbol: choice of {"进口大豆价格指数", "波罗的海干散货指数", "钢坯价格指数", "普氏62%铁矿石指数"}
:type symbol: str
:return: 商品现货价格指数
:rtype: pandas.DataFrame
| 12 | 39 |
def spot_goods(symbol: str = "波罗的海干散货指数") -> pd.DataFrame:
"""
新浪财经-商品现货价格指数
http://finance.sina.com.cn/futuremarket/spotprice.shtml#titlePos_0
:param symbol: choice of {"进口大豆价格指数", "波罗的海干散货指数", "钢坯价格指数", "普氏62%铁矿石指数"}
:type symbol: str
:return: 商品现货价格指数
:rtype: pandas.DataFrame
"""
url = "http://stock.finance.sina.com.cn/futures/api/openapi.php/GoodsIndexService.get_goods_index"
symbol_url_dict = {
"进口大豆价格指数": "SOY",
"波罗的海干散货指数": "BDI",
"钢坯价格指数": "GP",
"普氏62%铁矿石指数": "PS",
}
params = {"symbol": symbol_url_dict[symbol], "table": "0"}
r = requests.get(url, params=params)
r.encoding = "gbk"
data_json = r.json()
temp_df = pd.DataFrame(data_json["result"]["data"]["data"])
temp_df = temp_df[["opendate", "price", "zde", "zdf"]]
temp_df.columns = ["日期", "指数", "涨跌额", "涨跌幅"]
temp_df['日期'] = pd.to_datetime(temp_df['日期']).dt.date
temp_df['指数'] = pd.to_numeric(temp_df['指数'])
temp_df['涨跌额'] = pd.to_numeric(temp_df['涨跌额'])
temp_df['涨跌幅'] = pd.to_numeric(temp_df['涨跌幅'])
return temp_df
|
https://github.com/akfamily/akshare/blob/087025d8d6f799b30ca114013e82c1ad22dc9294/project25/akshare/index/index_spot.py#L12-L39
| 25 |
[
0,
1,
2,
3,
4,
5,
6,
7,
8
] | 32.142857 |
[
9,
10,
16,
17,
18,
19,
20,
21,
22,
23,
24,
25,
26,
27
] | 50 | false | 23.809524 | 28 | 1 | 50 | 6 |
def spot_goods(symbol: str = "波罗的海干散货指数") -> pd.DataFrame:
url = "http://stock.finance.sina.com.cn/futures/api/openapi.php/GoodsIndexService.get_goods_index"
symbol_url_dict = {
"进口大豆价格指数": "SOY",
"波罗的海干散货指数": "BDI",
"钢坯价格指数": "GP",
"普氏62%铁矿石指数": "PS",
}
params = {"symbol": symbol_url_dict[symbol], "table": "0"}
r = requests.get(url, params=params)
r.encoding = "gbk"
data_json = r.json()
temp_df = pd.DataFrame(data_json["result"]["data"]["data"])
temp_df = temp_df[["opendate", "price", "zde", "zdf"]]
temp_df.columns = ["日期", "指数", "涨跌额", "涨跌幅"]
temp_df['日期'] = pd.to_datetime(temp_df['日期']).dt.date
temp_df['指数'] = pd.to_numeric(temp_df['指数'])
temp_df['涨跌额'] = pd.to_numeric(temp_df['涨跌额'])
temp_df['涨跌幅'] = pd.to_numeric(temp_df['涨跌幅'])
return temp_df
| 18,243 |
akfamily/akshare
|
087025d8d6f799b30ca114013e82c1ad22dc9294
|
akshare/index/index_eri.py
|
index_eri
|
()
|
return big_df
|
浙江省排污权交易指数
https://zs.zjpwq.net
:return: 浙江省排污权交易指数
:rtype: pandas.DataFrame
|
浙江省排污权交易指数
https://zs.zjpwq.net
:return: 浙江省排污权交易指数
:rtype: pandas.DataFrame
| 12 | 72 |
def index_eri() -> pd.DataFrame:
"""
浙江省排污权交易指数
https://zs.zjpwq.net
:return: 浙江省排污权交易指数
:rtype: pandas.DataFrame
"""
url = "https://zs.zjpwq.net/zhe-jiang-pwq-webapi/indexData"
params = {
"indexId": "1",
"areaCode": "330000",
"cycle": "MONTH",
"structCode": "01",
}
r = requests.get(url, params=params)
data_json = r.json()
temp_df = pd.DataFrame(data_json["data"])
del temp_df["id"]
del temp_df["indexId"]
del temp_df["stageId"]
del temp_df["structCode"]
del temp_df["areaCode"]
del temp_df["rawValue"]
temp_df.columns = [
"value",
"date",
]
temp_df = temp_df[
[
"date",
"value",
]
]
big_df = temp_df
url = "https://zs.zjpwq.net/zhe-jiang-pwq-webapi/rawValueStatistics"
params = {
"orderBy": "-date",
"pageSize": "1000",
"quotaType": "0",
"index": "TOTAL_QUANTITY",
"areaCode": "330000",
}
r = requests.get(url, params=params)
data_json = r.json()
temp_df = pd.DataFrame(data_json["data"])
del temp_df["id"]
del temp_df["quotaType"]
del temp_df["index"]
temp_df.columns = [
"date",
"value",
"update",
]
big_df = big_df.merge(temp_df, on="date")
big_df.columns = [
"日期",
"交易指数",
"成交量",
"更新时间",
]
return big_df
|
https://github.com/akfamily/akshare/blob/087025d8d6f799b30ca114013e82c1ad22dc9294/project25/akshare/index/index_eri.py#L12-L72
| 25 |
[
0,
1,
2,
3,
4,
5,
6
] | 11.47541 |
[
7,
8,
14,
15,
16,
17,
18,
19,
20,
21,
22,
23,
27,
33,
34,
35,
42,
43,
44,
45,
46,
47,
48,
53,
54,
60
] | 42.622951 | false | 15.151515 | 61 | 1 | 57.377049 | 4 |
def index_eri() -> pd.DataFrame:
url = "https://zs.zjpwq.net/zhe-jiang-pwq-webapi/indexData"
params = {
"indexId": "1",
"areaCode": "330000",
"cycle": "MONTH",
"structCode": "01",
}
r = requests.get(url, params=params)
data_json = r.json()
temp_df = pd.DataFrame(data_json["data"])
del temp_df["id"]
del temp_df["indexId"]
del temp_df["stageId"]
del temp_df["structCode"]
del temp_df["areaCode"]
del temp_df["rawValue"]
temp_df.columns = [
"value",
"date",
]
temp_df = temp_df[
[
"date",
"value",
]
]
big_df = temp_df
url = "https://zs.zjpwq.net/zhe-jiang-pwq-webapi/rawValueStatistics"
params = {
"orderBy": "-date",
"pageSize": "1000",
"quotaType": "0",
"index": "TOTAL_QUANTITY",
"areaCode": "330000",
}
r = requests.get(url, params=params)
data_json = r.json()
temp_df = pd.DataFrame(data_json["data"])
del temp_df["id"]
del temp_df["quotaType"]
del temp_df["index"]
temp_df.columns = [
"date",
"value",
"update",
]
big_df = big_df.merge(temp_df, on="date")
big_df.columns = [
"日期",
"交易指数",
"成交量",
"更新时间",
]
return big_df
| 18,244 |
akfamily/akshare
|
087025d8d6f799b30ca114013e82c1ad22dc9294
|
akshare/index/index_kq_fz.py
|
index_kq_fz
|
(symbol: str = "价格指数") -> pd.D
|
return big_df
|
中国柯桥纺织指数
http://www.kqindex.cn/flzs/jiage
:param symbol: choice of {'价格指数', '景气指数', '外贸指数'}
:type symbol: str
:return: 中国柯桥纺织指数
:rtype: pandas.DataFrame
|
中国柯桥纺织指数
http://www.kqindex.cn/flzs/jiage
:param symbol: choice of {'价格指数', '景气指数', '外贸指数'}
:type symbol: str
:return: 中国柯桥纺织指数
:rtype: pandas.DataFrame
| 14 | 76 |
def index_kq_fz(symbol: str = "价格指数") -> pd.DataFrame:
"""
中国柯桥纺织指数
http://www.kqindex.cn/flzs/jiage
:param symbol: choice of {'价格指数', '景气指数', '外贸指数'}
:type symbol: str
:return: 中国柯桥纺织指数
:rtype: pandas.DataFrame
"""
symbol_map = {
"价格指数": "1_1",
"景气指数": "1_2",
"外贸指数": "2",
}
url = "http://www.kqindex.cn/flzs/table_data"
params = {
"category": "0",
"start": "",
"end": "",
"indexType": f"{symbol_map[symbol]}",
"pageindex": "1",
"_": "1619871781413",
}
r = session.get(url, params=params)
data_json = r.json()
page_num = data_json["page"]
big_df = pd.DataFrame()
for page in tqdm(range(1, page_num + 1), leave=False):
params = {
"category": "0",
"start": "",
"end": "",
"indexType": f"{symbol_map[symbol]}",
"pageindex": page,
"_": "1619871781413",
}
r = session.get(url, params=params)
data_json = r.json()
temp_df = pd.DataFrame(data_json["result"])
big_df = big_df.append(temp_df, ignore_index=True)
if symbol == "价格指数":
big_df.columns = [
"期次",
"指数",
"涨跌幅",
]
elif symbol == "景气指数":
big_df.columns = [
"期次",
"总景气指数",
"涨跌幅",
"流通景气指数",
"生产景气指数",
]
elif symbol == "外贸指数":
big_df.columns = [
"期次",
"价格指数",
"涨跌幅",
"景气指数",
"涨跌幅",
]
return big_df
|
https://github.com/akfamily/akshare/blob/087025d8d6f799b30ca114013e82c1ad22dc9294/project25/akshare/index/index_kq_fz.py#L14-L76
| 25 |
[
0,
1,
2,
3,
4,
5,
6,
7,
8
] | 14.285714 |
[
9,
14,
15,
23,
24,
25,
26,
27,
28,
36,
37,
38,
39,
40,
41,
46,
47,
54,
55,
62
] | 31.746032 | false | 18.75 | 63 | 5 | 68.253968 | 6 |
def index_kq_fz(symbol: str = "价格指数") -> pd.DataFrame:
symbol_map = {
"价格指数": "1_1",
"景气指数": "1_2",
"外贸指数": "2",
}
url = "http://www.kqindex.cn/flzs/table_data"
params = {
"category": "0",
"start": "",
"end": "",
"indexType": f"{symbol_map[symbol]}",
"pageindex": "1",
"_": "1619871781413",
}
r = session.get(url, params=params)
data_json = r.json()
page_num = data_json["page"]
big_df = pd.DataFrame()
for page in tqdm(range(1, page_num + 1), leave=False):
params = {
"category": "0",
"start": "",
"end": "",
"indexType": f"{symbol_map[symbol]}",
"pageindex": page,
"_": "1619871781413",
}
r = session.get(url, params=params)
data_json = r.json()
temp_df = pd.DataFrame(data_json["result"])
big_df = big_df.append(temp_df, ignore_index=True)
if symbol == "价格指数":
big_df.columns = [
"期次",
"指数",
"涨跌幅",
]
elif symbol == "景气指数":
big_df.columns = [
"期次",
"总景气指数",
"涨跌幅",
"流通景气指数",
"生产景气指数",
]
elif symbol == "外贸指数":
big_df.columns = [
"期次",
"价格指数",
"涨跌幅",
"景气指数",
"涨跌幅",
]
return big_df
| 18,245 |
akfamily/akshare
|
087025d8d6f799b30ca114013e82c1ad22dc9294
|
akshare/index/index_investing.py
|
_get_global_index_area_name_code
|
()
|
return name_code_list
|
全球指数-各国的全球指数数据
https://cn.investing.com/indices/global-indices?majorIndices=on&primarySectors=on&bonds=on&additionalIndices=on&otherIndices=on&c_id=37
:return: 国家和代码
:rtype: dict
|
全球指数-各国的全球指数数据
https://cn.investing.com/indices/global-indices?majorIndices=on&primarySectors=on&bonds=on&additionalIndices=on&otherIndices=on&c_id=37
:return: 国家和代码
:rtype: dict
| 19 | 53 |
def _get_global_index_area_name_code() -> dict:
"""
全球指数-各国的全球指数数据
https://cn.investing.com/indices/global-indices?majorIndices=on&primarySectors=on&bonds=on&additionalIndices=on&otherIndices=on&c_id=37
:return: 国家和代码
:rtype: dict
"""
url = "https://cn.investing.com/indices/global-indices"
params = {
"majorIndices": "on",
"primarySectors": "on",
"bonds": "on",
"additionalIndices": "on",
"otherIndices": "on",
}
r = session.get(url, params=params, headers=short_headers)
data_text = r.text
soup = BeautifulSoup(data_text, "lxml")
name_url_option_list = soup.find_all("option")[1:]
url_list = [
item["value"]
for item in name_url_option_list
if "c_id" in item["value"]
]
url_list_code = [
item["value"].split("?")[1].split("=")[1]
for item in name_url_option_list
if "c_id" in item["value"]
]
name_list = [item.get_text() for item in name_url_option_list][
: len(url_list)
]
_temp_df = pd.DataFrame([name_list, url_list_code]).T
name_code_list = dict(zip(_temp_df.iloc[:, 0], _temp_df.iloc[:, 1]))
return name_code_list
|
https://github.com/akfamily/akshare/blob/087025d8d6f799b30ca114013e82c1ad22dc9294/project25/akshare/index/index_investing.py#L19-L53
| 25 |
[
0,
1,
2,
3,
4,
5,
6
] | 20 |
[
7,
8,
15,
16,
17,
18,
19,
24,
29,
32,
33,
34
] | 34.285714 | false | 12.820513 | 35 | 4 | 65.714286 | 4 |
def _get_global_index_area_name_code() -> dict:
url = "https://cn.investing.com/indices/global-indices"
params = {
"majorIndices": "on",
"primarySectors": "on",
"bonds": "on",
"additionalIndices": "on",
"otherIndices": "on",
}
r = session.get(url, params=params, headers=short_headers)
data_text = r.text
soup = BeautifulSoup(data_text, "lxml")
name_url_option_list = soup.find_all("option")[1:]
url_list = [
item["value"]
for item in name_url_option_list
if "c_id" in item["value"]
]
url_list_code = [
item["value"].split("?")[1].split("=")[1]
for item in name_url_option_list
if "c_id" in item["value"]
]
name_list = [item.get_text() for item in name_url_option_list][
: len(url_list)
]
_temp_df = pd.DataFrame([name_list, url_list_code]).T
name_code_list = dict(zip(_temp_df.iloc[:, 0], _temp_df.iloc[:, 1]))
return name_code_list
| 18,246 |
akfamily/akshare
|
087025d8d6f799b30ca114013e82c1ad22dc9294
|
akshare/index/index_investing.py
|
_get_global_country_name_url
|
()
|
return name_code_map_dict
|
可获得指数数据国家对应的 URL
https://cn.investing.com/indices/
:return: 国家和 URL
:rtype: dict
|
可获得指数数据国家对应的 URL
https://cn.investing.com/indices/
:return: 国家和 URL
:rtype: dict
| 56 | 75 |
def _get_global_country_name_url() -> dict:
"""
可获得指数数据国家对应的 URL
https://cn.investing.com/indices/
:return: 国家和 URL
:rtype: dict
"""
url = "https://cn.investing.com/indices/"
res = session.post(url, headers=short_headers)
soup = BeautifulSoup(res.text, "lxml")
name_url_option_list = soup.find(
"select", attrs={"name": "country"}
).find_all("option")[
1:
] # 去掉-所有国家及地区
url_list = [item["value"] for item in name_url_option_list]
name_list = [item.get_text() for item in name_url_option_list]
name_code_map_dict = {}
name_code_map_dict.update(zip(name_list, url_list))
return name_code_map_dict
|
https://github.com/akfamily/akshare/blob/087025d8d6f799b30ca114013e82c1ad22dc9294/project25/akshare/index/index_investing.py#L56-L75
| 25 |
[
0,
1,
2,
3,
4,
5,
6
] | 35 |
[
7,
8,
9,
10,
15,
16,
17,
18,
19
] | 45 | false | 12.820513 | 20 | 3 | 55 | 4 |
def _get_global_country_name_url() -> dict:
url = "https://cn.investing.com/indices/"
res = session.post(url, headers=short_headers)
soup = BeautifulSoup(res.text, "lxml")
name_url_option_list = soup.find(
"select", attrs={"name": "country"}
).find_all("option")[
1:
] # 去掉-所有国家及地区
url_list = [item["value"] for item in name_url_option_list]
name_list = [item.get_text() for item in name_url_option_list]
name_code_map_dict = {}
name_code_map_dict.update(zip(name_list, url_list))
return name_code_map_dict
| 18,247 |
akfamily/akshare
|
087025d8d6f799b30ca114013e82c1ad22dc9294
|
akshare/index/index_investing.py
|
index_investing_global_area_index_name_code
|
(area: str = "中国") ->
|
return name_code_map_dict
|
指定 area 的所有指数和代码
https://cn.investing.com/indices/
:param area: 指定的国家或地区;ak._get_global_country_name_url() 函数返回的国家或地区的名称
:type area: str
:return: 指定 area 的所有指数和代码
:rtype: dict
|
指定 area 的所有指数和代码
https://cn.investing.com/indices/
:param area: 指定的国家或地区;ak._get_global_country_name_url() 函数返回的国家或地区的名称
:type area: str
:return: 指定 area 的所有指数和代码
:rtype: dict
| 78 | 105 |
def index_investing_global_area_index_name_code(area: str = "中国") -> dict:
"""
指定 area 的所有指数和代码
https://cn.investing.com/indices/
:param area: 指定的国家或地区;ak._get_global_country_name_url() 函数返回的国家或地区的名称
:type area: str
:return: 指定 area 的所有指数和代码
:rtype: dict
"""
scraper = cfscrape.create_scraper(delay=10)
pd.set_option("mode.chained_assignment", None)
name_url_dict = _get_global_country_name_url()
url = f"https://cn.investing.com{name_url_dict[area]}?&majorIndices=on&primarySectors=on&additionalIndices=on&otherIndices=on"
r = scraper.get(url)
soup = BeautifulSoup(r.text, "lxml")
code_list = [
item["data-id"]
for item in soup.find_all("table")[1].find_all(
"span", attrs={"class": "alertBellGrayPlus"}
)
]
name_list = [
item.find("a").text
for item in soup.find_all("td", attrs={"class": "plusIconTd"})
]
name_code_map_dict = {}
name_code_map_dict.update(zip(name_list, code_list))
return name_code_map_dict
|
https://github.com/akfamily/akshare/blob/087025d8d6f799b30ca114013e82c1ad22dc9294/project25/akshare/index/index_investing.py#L78-L105
| 25 |
[
0,
1,
2,
3,
4,
5,
6,
7,
8
] | 32.142857 |
[
9,
10,
11,
12,
13,
14,
15,
21,
25,
26,
27
] | 39.285714 | false | 12.820513 | 28 | 3 | 60.714286 | 6 |
def index_investing_global_area_index_name_code(area: str = "中国") -> dict:
scraper = cfscrape.create_scraper(delay=10)
pd.set_option("mode.chained_assignment", None)
name_url_dict = _get_global_country_name_url()
url = f"https://cn.investing.com{name_url_dict[area]}?&majorIndices=on&primarySectors=on&additionalIndices=on&otherIndices=on"
r = scraper.get(url)
soup = BeautifulSoup(r.text, "lxml")
code_list = [
item["data-id"]
for item in soup.find_all("table")[1].find_all(
"span", attrs={"class": "alertBellGrayPlus"}
)
]
name_list = [
item.find("a").text
for item in soup.find_all("td", attrs={"class": "plusIconTd"})
]
name_code_map_dict = {}
name_code_map_dict.update(zip(name_list, code_list))
return name_code_map_dict
| 18,248 |
akfamily/akshare
|
087025d8d6f799b30ca114013e82c1ad22dc9294
|
akshare/index/index_investing.py
|
index_investing_global_area_index_name_url
|
(area: str = "中国") ->
|
return name_code_map_dict
|
指定 area 的所有指数和 URL 地址
https://cn.investing.com/indices/
:param area: 指定的国家或地区;ak._get_global_country_name_url() 函数返回的国家或地区的名称
:type area: str
:return: 指定 area 的所有指数和 URL 地址
:rtype: dict
|
指定 area 的所有指数和 URL 地址
https://cn.investing.com/indices/
:param area: 指定的国家或地区;ak._get_global_country_name_url() 函数返回的国家或地区的名称
:type area: str
:return: 指定 area 的所有指数和 URL 地址
:rtype: dict
| 108 | 133 |
def index_investing_global_area_index_name_url(area: str = "中国") -> dict:
"""
指定 area 的所有指数和 URL 地址
https://cn.investing.com/indices/
:param area: 指定的国家或地区;ak._get_global_country_name_url() 函数返回的国家或地区的名称
:type area: str
:return: 指定 area 的所有指数和 URL 地址
:rtype: dict
"""
scraper = cfscrape.create_scraper(delay=10)
pd.set_option("mode.chained_assignment", None)
name_url_dict = _get_global_country_name_url()
url = f"https://cn.investing.com{name_url_dict[area]}?&majorIndices=on&primarySectors=on&additionalIndices=on&otherIndices=on"
r = scraper.get(url)
soup = BeautifulSoup(r.text, "lxml")
code_list = [
item.find("a")["href"]
for item in soup.find_all("td", attrs={"class": "plusIconTd"})
]
name_list = [
item.find("a").text
for item in soup.find_all("td", attrs={"class": "plusIconTd"})
]
name_code_map_dict = {}
name_code_map_dict.update(zip(name_list, code_list))
return name_code_map_dict
|
https://github.com/akfamily/akshare/blob/087025d8d6f799b30ca114013e82c1ad22dc9294/project25/akshare/index/index_investing.py#L108-L133
| 25 |
[
0,
1,
2,
3,
4,
5,
6,
7,
8
] | 34.615385 |
[
9,
10,
11,
12,
13,
14,
15,
19,
23,
24,
25
] | 42.307692 | false | 12.820513 | 26 | 3 | 57.692308 | 6 |
def index_investing_global_area_index_name_url(area: str = "中国") -> dict:
scraper = cfscrape.create_scraper(delay=10)
pd.set_option("mode.chained_assignment", None)
name_url_dict = _get_global_country_name_url()
url = f"https://cn.investing.com{name_url_dict[area]}?&majorIndices=on&primarySectors=on&additionalIndices=on&otherIndices=on"
r = scraper.get(url)
soup = BeautifulSoup(r.text, "lxml")
code_list = [
item.find("a")["href"]
for item in soup.find_all("td", attrs={"class": "plusIconTd"})
]
name_list = [
item.find("a").text
for item in soup.find_all("td", attrs={"class": "plusIconTd"})
]
name_code_map_dict = {}
name_code_map_dict.update(zip(name_list, code_list))
return name_code_map_dict
| 18,249 |
akfamily/akshare
|
087025d8d6f799b30ca114013e82c1ad22dc9294
|
akshare/index/index_investing.py
|
index_investing_global
|
(
area: str = "中国",
symbol: str = "上证指数",
period: str = "每日",
start_date: str = "20100101",
end_date: str = "20211031",
)
|
return df_data
|
具体国家或地区的从 start_date 到 end_date 期间的数据
https://cn.investing.com/indices/ftse-epra-nareit-hong-kong-historical-data
:param area: 对应函数中的国家或地区名称
:type area: str
:param symbol: 对应函数中的指数名称
:type symbol: str
:param period: choice of {"每日", "每周", "每月"}
:type period: str
:param start_date: '20000101', 注意格式
:type start_date: str
:param end_date: '20191017', 注意格式
:type end_date: str
:return: 指定参数的数据
:rtype: pandas.DataFrame
|
具体国家或地区的从 start_date 到 end_date 期间的数据
https://cn.investing.com/indices/ftse-epra-nareit-hong-kong-historical-data
:param area: 对应函数中的国家或地区名称
:type area: str
:param symbol: 对应函数中的指数名称
:type symbol: str
:param period: choice of {"每日", "每周", "每月"}
:type period: str
:param start_date: '20000101', 注意格式
:type start_date: str
:param end_date: '20191017', 注意格式
:type end_date: str
:return: 指定参数的数据
:rtype: pandas.DataFrame
| 136 | 222 |
def index_investing_global(
area: str = "中国",
symbol: str = "上证指数",
period: str = "每日",
start_date: str = "20100101",
end_date: str = "20211031",
) -> pd.DataFrame:
"""
具体国家或地区的从 start_date 到 end_date 期间的数据
https://cn.investing.com/indices/ftse-epra-nareit-hong-kong-historical-data
:param area: 对应函数中的国家或地区名称
:type area: str
:param symbol: 对应函数中的指数名称
:type symbol: str
:param period: choice of {"每日", "每周", "每月"}
:type period: str
:param start_date: '20000101', 注意格式
:type start_date: str
:param end_date: '20191017', 注意格式
:type end_date: str
:return: 指定参数的数据
:rtype: pandas.DataFrame
"""
start_date = "-".join([start_date[:4], start_date[4:6], start_date[6:]])
end_date = "-".join([end_date[:4], end_date[4:6], end_date[6:]])
period_map = {"每日": "Daily", "每周": "Weekly", "每月": "Monthly"}
name_code_dict = index_investing_global_area_index_name_code(area)
url = f"https://api.investing.com/api/financialdata/historical/{name_code_dict[symbol]}"
params = {
"start-date": start_date,
"end-date": end_date,
"time-frame": period_map[period],
"add-missing-rows": "false",
}
headers = {
"accept": "application/json, text/plain, */*",
"accept-encoding": "gzip, deflate, br",
"accept-language": "zh-CN,zh;q=0.9,en;q=0.8",
"cache-control": "no-cache",
"domain-id": "cn",
"origin": "https://cn.investing.com",
"pragma": "no-cache",
"referer": "https://cn.investing.com/",
"sec-ch-ua": '"Google Chrome";v="105", "Not)A;Brand";v="8", "Chromium";v="105"',
"sec-ch-ua-mobile": "?0",
"sec-ch-ua-platform": '"Windows"',
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-site",
"authorization": "Bearer eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJleHAiOjE2NjM2NjQ1NzUsImp0aSI6IjIyODA4MDM5MSIsImlhdCI6MTY2MzY2MDk3NSwiaXNzIjoiaW52ZXN0aW5nLmNvbSIsInVzZXJfaWQiOjIyODA4MDM5MSwicHJpbWFyeV9kb21haW5faWQiOiIxIiwiQXV0aG5TeXN0ZW1Ub2tlbiI6IiIsIkF1dGhuU2Vzc2lvblRva2VuIjoiIiwiRGV2aWNlVG9rZW4iOiIiLCJVYXBpVG9rZW4iOiJObmclMkJmMlJyUHpjeWRtdHRaell5TW1JN1pUNWliV1prTURJMVB6czlNeVUySWpVN1lEYzNjV1ZxYWlSZ1kyVjVNamRsWWpRMFptWTFQMkk4TnpCdlBEWXlQbVJrWXo4M01tQnJaMmN3TW1aaU1HVm9ZbWRtWmpBNU5UWTdhRE0lMkJOalUxTW1Cdk56VmxPbW93WUR4bGJUSWdaWGswY0daM05XZGlNamQyYnlnMk9UNSUyRlpEUSUyRllESm1hMjluTURJeFlqRmxQV0l3Wmpjd1pUVXhPenN6S3paOSIsIkF1dGhuSWQiOiIiLCJJc0RvdWJsZUVuY3J5cHRlZCI6ZmFsc2UsIkRldmljZUlkIjoiIiwiUmVmcmVzaEV4cGlyZWRBdCI6MTY2NjE4MDk3NX0.uRLTP1IG3696uxHm3Qq0D8z4o3nfsD3CaIS9cZGjsV0",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/105.0.0.0 Safari/537.36",
}
scraper = cfscrape.create_scraper(delay=10)
r = scraper.get(url, params=params, headers=headers)
r.encoding = "utf-8"
r = requests.get(url, params=params, headers=headers)
data_json = r.json()
df_data = pd.DataFrame(data_json["data"])
df_data.columns = [
"-",
"-",
"-",
"日期",
"-",
"-",
"-",
"-",
"-",
"交易量",
"-",
"收盘",
"开盘",
"高",
"低",
"涨跌幅",
]
df_data = df_data[["日期", "收盘", "开盘", "高", "低", "交易量", "涨跌幅"]]
df_data["日期"] = pd.to_datetime(df_data["日期"]).dt.date
df_data["收盘"] = pd.to_numeric(df_data["收盘"])
df_data["开盘"] = pd.to_numeric(df_data["开盘"])
df_data["高"] = pd.to_numeric(df_data["高"])
df_data["低"] = pd.to_numeric(df_data["低"])
df_data["交易量"] = pd.to_numeric(df_data["交易量"])
df_data["涨跌幅"] = pd.to_numeric(df_data["涨跌幅"])
df_data.sort_values("日期", inplace=True)
df_data.reset_index(inplace=True, drop=True)
return df_data
|
https://github.com/akfamily/akshare/blob/087025d8d6f799b30ca114013e82c1ad22dc9294/project25/akshare/index/index_investing.py#L136-L222
| 25 |
[
0
] | 1.149425 |
[
23,
24,
25,
26,
27,
28,
34,
52,
53,
54,
55,
56,
57,
58,
76,
77,
78,
79,
80,
81,
82,
83,
84,
85,
86
] | 28.735632 | false | 12.820513 | 87 | 1 | 71.264368 | 14 |
def index_investing_global(
area: str = "中国",
symbol: str = "上证指数",
period: str = "每日",
start_date: str = "20100101",
end_date: str = "20211031",
) -> pd.DataFrame:
start_date = "-".join([start_date[:4], start_date[4:6], start_date[6:]])
end_date = "-".join([end_date[:4], end_date[4:6], end_date[6:]])
period_map = {"每日": "Daily", "每周": "Weekly", "每月": "Monthly"}
name_code_dict = index_investing_global_area_index_name_code(area)
url = f"https://api.investing.com/api/financialdata/historical/{name_code_dict[symbol]}"
params = {
"start-date": start_date,
"end-date": end_date,
"time-frame": period_map[period],
"add-missing-rows": "false",
}
headers = {
"accept": "application/json, text/plain, */*",
"accept-encoding": "gzip, deflate, br",
"accept-language": "zh-CN,zh;q=0.9,en;q=0.8",
"cache-control": "no-cache",
"domain-id": "cn",
"origin": "https://cn.investing.com",
"pragma": "no-cache",
"referer": "https://cn.investing.com/",
"sec-ch-ua": '"Google Chrome";v="105", "Not)A;Brand";v="8", "Chromium";v="105"',
"sec-ch-ua-mobile": "?0",
"sec-ch-ua-platform": '"Windows"',
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-site",
"authorization": "Bearer eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJleHAiOjE2NjM2NjQ1NzUsImp0aSI6IjIyODA4MDM5MSIsImlhdCI6MTY2MzY2MDk3NSwiaXNzIjoiaW52ZXN0aW5nLmNvbSIsInVzZXJfaWQiOjIyODA4MDM5MSwicHJpbWFyeV9kb21haW5faWQiOiIxIiwiQXV0aG5TeXN0ZW1Ub2tlbiI6IiIsIkF1dGhuU2Vzc2lvblRva2VuIjoiIiwiRGV2aWNlVG9rZW4iOiIiLCJVYXBpVG9rZW4iOiJObmclMkJmMlJyUHpjeWRtdHRaell5TW1JN1pUNWliV1prTURJMVB6czlNeVUySWpVN1lEYzNjV1ZxYWlSZ1kyVjVNamRsWWpRMFptWTFQMkk4TnpCdlBEWXlQbVJrWXo4M01tQnJaMmN3TW1aaU1HVm9ZbWRtWmpBNU5UWTdhRE0lMkJOalUxTW1Cdk56VmxPbW93WUR4bGJUSWdaWGswY0daM05XZGlNamQyYnlnMk9UNSUyRlpEUSUyRllESm1hMjluTURJeFlqRmxQV0l3Wmpjd1pUVXhPenN6S3paOSIsIkF1dGhuSWQiOiIiLCJJc0RvdWJsZUVuY3J5cHRlZCI6ZmFsc2UsIkRldmljZUlkIjoiIiwiUmVmcmVzaEV4cGlyZWRBdCI6MTY2NjE4MDk3NX0.uRLTP1IG3696uxHm3Qq0D8z4o3nfsD3CaIS9cZGjsV0",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/105.0.0.0 Safari/537.36",
}
scraper = cfscrape.create_scraper(delay=10)
r = scraper.get(url, params=params, headers=headers)
r.encoding = "utf-8"
r = requests.get(url, params=params, headers=headers)
data_json = r.json()
df_data = pd.DataFrame(data_json["data"])
df_data.columns = [
"-",
"-",
"-",
"日期",
"-",
"-",
"-",
"-",
"-",
"交易量",
"-",
"收盘",
"开盘",
"高",
"低",
"涨跌幅",
]
df_data = df_data[["日期", "收盘", "开盘", "高", "低", "交易量", "涨跌幅"]]
df_data["日期"] = pd.to_datetime(df_data["日期"]).dt.date
df_data["收盘"] = pd.to_numeric(df_data["收盘"])
df_data["开盘"] = pd.to_numeric(df_data["开盘"])
df_data["高"] = pd.to_numeric(df_data["高"])
df_data["低"] = pd.to_numeric(df_data["低"])
df_data["交易量"] = pd.to_numeric(df_data["交易量"])
df_data["涨跌幅"] = pd.to_numeric(df_data["涨跌幅"])
df_data.sort_values("日期", inplace=True)
df_data.reset_index(inplace=True, drop=True)
return df_data
| 18,250 |
akfamily/akshare
|
087025d8d6f799b30ca114013e82c1ad22dc9294
|
akshare/index/index_investing.py
|
index_investing_global_from_url
|
(
url: str = "https://www.investing.com/indices/ftse-epra-nareit-eurozone",
period: str = "每日",
start_date: str = "20000101",
end_date: str = "20220808",
)
|
return df_data
|
获得具体指数的从 start_date 到 end_date 期间的数据
https://www.investing.com/indices/ftse-epra-nareit-eurozone
:param url: 具体数据链接
:type url: str
:param period: choice of {"每日", "每周", "每月"}
:type period: str
:param start_date: '20000101', 注意格式
:type start_date: str
:param end_date: '20191017', 注意格式
:type end_date: str
:return: 指定参数的数据
:rtype: pandas.DataFrame
|
获得具体指数的从 start_date 到 end_date 期间的数据
https://www.investing.com/indices/ftse-epra-nareit-eurozone
:param url: 具体数据链接
:type url: str
:param period: choice of {"每日", "每周", "每月"}
:type period: str
:param start_date: '20000101', 注意格式
:type start_date: str
:param end_date: '20191017', 注意格式
:type end_date: str
:return: 指定参数的数据
:rtype: pandas.DataFrame
| 225 | 300 |
def index_investing_global_from_url(
url: str = "https://www.investing.com/indices/ftse-epra-nareit-eurozone",
period: str = "每日",
start_date: str = "20000101",
end_date: str = "20220808",
) -> pd.DataFrame:
"""
获得具体指数的从 start_date 到 end_date 期间的数据
https://www.investing.com/indices/ftse-epra-nareit-eurozone
:param url: 具体数据链接
:type url: str
:param period: choice of {"每日", "每周", "每月"}
:type period: str
:param start_date: '20000101', 注意格式
:type start_date: str
:param end_date: '20191017', 注意格式
:type end_date: str
:return: 指定参数的数据
:rtype: pandas.DataFrame
"""
url = url.replace("www", "cn")
scraper = cfscrape.create_scraper(delay=10)
r = scraper.get(url)
soup = BeautifulSoup(r.text, "lxml")
data_text = soup.find("script", attrs={"id": "__NEXT_DATA__"}).text
data_json = json.loads(data_text)
code = json.loads(data_json["props"]["pageProps"]["state"])["dataStore"][
"pageInfoStore"
]["identifiers"]["instrument_id"]
start_date = "-".join([start_date[:4], start_date[4:6], start_date[6:]])
end_date = "-".join([end_date[:4], end_date[4:6], end_date[6:]])
period_map = {"每日": "Daily", "每周": "Weekly", "每月": "Monthly"}
url = f"https://api.investing.com/api/financialdata/historical/{code}"
params = {
"start-date": start_date,
"end-date": end_date,
"time-frame": period_map[period],
"add-missing-rows": "false",
}
headers = {
"domain-id": "cn",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/103.0.0.0 Safari/537.36",
}
r = requests.get(url, params=params, headers=headers)
data_json = r.json()
df_data = pd.DataFrame(data_json["data"])
df_data.columns = [
"-",
"-",
"-",
"日期",
"-",
"-",
"-",
"-",
"-",
"交易量",
"-",
"收盘",
"开盘",
"高",
"低",
"涨跌幅",
]
df_data = df_data[["日期", "收盘", "开盘", "高", "低", "交易量", "涨跌幅"]]
df_data["日期"] = pd.to_datetime(df_data["日期"]).dt.date
df_data["收盘"] = pd.to_numeric(df_data["收盘"])
df_data["开盘"] = pd.to_numeric(df_data["开盘"])
df_data["高"] = pd.to_numeric(df_data["高"])
df_data["低"] = pd.to_numeric(df_data["低"])
df_data["交易量"] = pd.to_numeric(df_data["交易量"])
df_data["涨跌幅"] = pd.to_numeric(df_data["涨跌幅"])
df_data.sort_values("日期", inplace=True)
df_data.reset_index(inplace=True, drop=True)
return df_data
|
https://github.com/akfamily/akshare/blob/087025d8d6f799b30ca114013e82c1ad22dc9294/project25/akshare/index/index_investing.py#L225-L300
| 25 |
[
0
] | 1.315789 |
[
20,
21,
22,
23,
24,
25,
26,
29,
30,
31,
32,
33,
39,
44,
45,
46,
47,
65,
66,
67,
68,
69,
70,
71,
72,
73,
74,
75
] | 36.842105 | false | 12.820513 | 76 | 1 | 63.157895 | 12 |
def index_investing_global_from_url(
url: str = "https://www.investing.com/indices/ftse-epra-nareit-eurozone",
period: str = "每日",
start_date: str = "20000101",
end_date: str = "20220808",
) -> pd.DataFrame:
url = url.replace("www", "cn")
scraper = cfscrape.create_scraper(delay=10)
r = scraper.get(url)
soup = BeautifulSoup(r.text, "lxml")
data_text = soup.find("script", attrs={"id": "__NEXT_DATA__"}).text
data_json = json.loads(data_text)
code = json.loads(data_json["props"]["pageProps"]["state"])["dataStore"][
"pageInfoStore"
]["identifiers"]["instrument_id"]
start_date = "-".join([start_date[:4], start_date[4:6], start_date[6:]])
end_date = "-".join([end_date[:4], end_date[4:6], end_date[6:]])
period_map = {"每日": "Daily", "每周": "Weekly", "每月": "Monthly"}
url = f"https://api.investing.com/api/financialdata/historical/{code}"
params = {
"start-date": start_date,
"end-date": end_date,
"time-frame": period_map[period],
"add-missing-rows": "false",
}
headers = {
"domain-id": "cn",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/103.0.0.0 Safari/537.36",
}
r = requests.get(url, params=params, headers=headers)
data_json = r.json()
df_data = pd.DataFrame(data_json["data"])
df_data.columns = [
"-",
"-",
"-",
"日期",
"-",
"-",
"-",
"-",
"-",
"交易量",
"-",
"收盘",
"开盘",
"高",
"低",
"涨跌幅",
]
df_data = df_data[["日期", "收盘", "开盘", "高", "低", "交易量", "涨跌幅"]]
df_data["日期"] = pd.to_datetime(df_data["日期"]).dt.date
df_data["收盘"] = pd.to_numeric(df_data["收盘"])
df_data["开盘"] = pd.to_numeric(df_data["开盘"])
df_data["高"] = pd.to_numeric(df_data["高"])
df_data["低"] = pd.to_numeric(df_data["低"])
df_data["交易量"] = pd.to_numeric(df_data["交易量"])
df_data["涨跌幅"] = pd.to_numeric(df_data["涨跌幅"])
df_data.sort_values("日期", inplace=True)
df_data.reset_index(inplace=True, drop=True)
return df_data
| 18,251 |
akfamily/akshare
|
087025d8d6f799b30ca114013e82c1ad22dc9294
|
akshare/index/index_baidu.py
|
decrypt
|
(t: str, e: str)
|
return "".join([a[j] for j in e])
|
解密函数
:param t: 加密字符
:type t: str
:param e: 加密字符
:type e: str
:return: 解密后字符
:rtype: str
|
解密函数
:param t: 加密字符
:type t: str
:param e: 加密字符
:type e: str
:return: 解密后字符
:rtype: str
| 12 | 26 |
def decrypt(t: str, e: str) -> str:
"""
解密函数
:param t: 加密字符
:type t: str
:param e: 加密字符
:type e: str
:return: 解密后字符
:rtype: str
"""
n, i, a, result = list(t), list(e), {}, []
ln = int(len(n) / 2)
start, end = n[ln:], n[:ln]
a = dict(zip(end, start))
return "".join([a[j] for j in e])
|
https://github.com/akfamily/akshare/blob/087025d8d6f799b30ca114013e82c1ad22dc9294/project25/akshare/index/index_baidu.py#L12-L26
| 25 |
[
0,
1,
2,
3,
4,
5,
6,
7,
8,
9
] | 66.666667 |
[
10,
11,
12,
13,
14
] | 33.333333 | false | 8.181818 | 15 | 2 | 66.666667 | 7 |
def decrypt(t: str, e: str) -> str:
n, i, a, result = list(t), list(e), {}, []
ln = int(len(n) / 2)
start, end = n[ln:], n[:ln]
a = dict(zip(end, start))
return "".join([a[j] for j in e])
| 18,252 |
akfamily/akshare
|
087025d8d6f799b30ca114013e82c1ad22dc9294
|
akshare/index/index_baidu.py
|
get_ptbk
|
(uniqid: str, cookie: str)
|
获取编码
:param uniqid: 传入 uniqid
:type uniqid: str
:param cookie: 传入 cookie
:type cookie: str
:return: 编码
:rtype: str
|
获取编码
:param uniqid: 传入 uniqid
:type uniqid: str
:param cookie: 传入 cookie
:type cookie: str
:return: 编码
:rtype: str
| 29 | 59 |
def get_ptbk(uniqid: str, cookie: str) -> str:
"""
获取编码
:param uniqid: 传入 uniqid
:type uniqid: str
:param cookie: 传入 cookie
:type cookie: str
:return: 编码
:rtype: str
"""
headers = {
"Accept": "application/json, text/plain, */*",
"Accept-Encoding": "gzip, deflate",
"Accept-Language": "zh-CN,zh;q=0.9",
"Cache-Control": "no-cache",
"Cookie": cookie,
"DNT": "1",
"Host": "index.baidu.com",
"Pragma": "no-cache",
"Proxy-Connection": "keep-alive",
"Referer": "https://index.baidu.com/v2/main/index.html",
"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.90 Safari/537.36",
"X-Requested-With": "XMLHttpRequest",
}
session = requests.Session()
session.headers.update(headers)
with session.get(
url=f"http://index.baidu.com/Interface/ptbk?uniqid={uniqid}"
) as response:
ptbk = response.json()["data"]
return ptbk
|
https://github.com/akfamily/akshare/blob/087025d8d6f799b30ca114013e82c1ad22dc9294/project25/akshare/index/index_baidu.py#L29-L59
| 25 |
[
0,
1,
2,
3,
4,
5,
6,
7,
8,
9
] | 32.258065 |
[
10,
24,
25,
26,
29,
30
] | 19.354839 | false | 8.181818 | 31 | 2 | 80.645161 | 7 |
def get_ptbk(uniqid: str, cookie: str) -> str:
headers = {
"Accept": "application/json, text/plain, */*",
"Accept-Encoding": "gzip, deflate",
"Accept-Language": "zh-CN,zh;q=0.9",
"Cache-Control": "no-cache",
"Cookie": cookie,
"DNT": "1",
"Host": "index.baidu.com",
"Pragma": "no-cache",
"Proxy-Connection": "keep-alive",
"Referer": "https://index.baidu.com/v2/main/index.html",
"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.90 Safari/537.36",
"X-Requested-With": "XMLHttpRequest",
}
session = requests.Session()
session.headers.update(headers)
with session.get(
url=f"http://index.baidu.com/Interface/ptbk?uniqid={uniqid}"
) as response:
ptbk = response.json()["data"]
return ptbk
| 18,253 |
|
akfamily/akshare
|
087025d8d6f799b30ca114013e82c1ad22dc9294
|
akshare/index/index_baidu.py
|
baidu_search_index
|
(
word: str = "python",
start_date: str = "2020-01-01",
end_date: str = "2020-05-01",
province: str = "全国",
city: str = "全国",
cookie: str = None,
text: str = None,
)
|
百度-搜索指数
https://index.baidu.com/v2/index.html
:param word: 需要搜索的词语
:type word: str
:param start_date: 开始时间;注意开始时间和结束时间不要超过一年
:type start_date: str
:param end_date: 结束时间;注意开始时间和结束时间不要超过一年
:type end_date: str
:param province: 省份, 默认为 `全国`; 请按照百度指数提供的名称进行输入, 比如:`四川`
:type province: str
:param city: 城市, 默认为 `全国`; 请按照百度指数提供的名称进行输入, 比如:`成都`
:type city: str
:param cookie: 输入 cookie
:type cookie: str
:param text: 输入 text
:type text: str
:return: 搜索指数
:rtype: pandas.Series
|
百度-搜索指数
https://index.baidu.com/v2/index.html
:param word: 需要搜索的词语
:type word: str
:param start_date: 开始时间;注意开始时间和结束时间不要超过一年
:type start_date: str
:param end_date: 结束时间;注意开始时间和结束时间不要超过一年
:type end_date: str
:param province: 省份, 默认为 `全国`; 请按照百度指数提供的名称进行输入, 比如:`四川`
:type province: str
:param city: 城市, 默认为 `全国`; 请按照百度指数提供的名称进行输入, 比如:`成都`
:type city: str
:param cookie: 输入 cookie
:type cookie: str
:param text: 输入 text
:type text: str
:return: 搜索指数
:rtype: pandas.Series
| 62 | 519 |
def baidu_search_index(
word: str = "python",
start_date: str = "2020-01-01",
end_date: str = "2020-05-01",
province: str = "全国",
city: str = "全国",
cookie: str = None,
text: str = None,
) -> str:
"""
百度-搜索指数
https://index.baidu.com/v2/index.html
:param word: 需要搜索的词语
:type word: str
:param start_date: 开始时间;注意开始时间和结束时间不要超过一年
:type start_date: str
:param end_date: 结束时间;注意开始时间和结束时间不要超过一年
:type end_date: str
:param province: 省份, 默认为 `全国`; 请按照百度指数提供的名称进行输入, 比如:`四川`
:type province: str
:param city: 城市, 默认为 `全国`; 请按照百度指数提供的名称进行输入, 比如:`成都`
:type city: str
:param cookie: 输入 cookie
:type cookie: str
:param text: 输入 text
:type text: str
:return: 搜索指数
:rtype: pandas.Series
"""
baidu_area_map = {
("广东", "广州", "95"),
("广东", "深圳", "94"),
("广东", "东莞", "133"),
("广东", "云浮", "195"),
("广东", "佛山", "196"),
("广东", "湛江", "197"),
("广东", "江门", "198"),
("广东", "惠州", "199"),
("广东", "珠海", "200"),
("广东", "韶关", "201"),
("广东", "阳江", "202"),
("广东", "茂名", "203"),
("广东", "潮州", "204"),
("广东", "揭阳", "205"),
("广东", "中山", "207"),
("广东", "清远", "208"),
("广东", "肇庆", "209"),
("广东", "河源", "210"),
("广东", "梅州", "211"),
("广东", "汕头", "212"),
("广东", "汕尾", "213"),
("河南", "郑州", "168"),
("河南", "南阳", "262"),
("河南", "新乡", "263"),
("河南", "开封", "264"),
("河南", "焦作", "265"),
("河南", "平顶山", "266"),
("河南", "许昌", "268"),
("河南", "安阳", "370"),
("河南", "驻马店", "371"),
("河南", "信阳", "373"),
("河南", "鹤壁", "374"),
("河南", "周口", "375"),
("河南", "商丘", "376"),
("河南", "洛阳", "378"),
("河南", "漯河", "379"),
("河南", "濮阳", "380"),
("河南", "三门峡", "381"),
("河南", "济源", "667"),
("四川", "成都", "97"),
("四川", "宜宾", "96"),
("四川", "绵阳", "98"),
("四川", "广元", "99"),
("四川", "遂宁", "100"),
("四川", "巴中", "101"),
("四川", "内江", "102"),
("四川", "泸州", "103"),
("四川", "南充", "104"),
("四川", "德阳", "106"),
("四川", "乐山", "107"),
("四川", "广安", "108"),
("四川", "资阳", "109"),
("四川", "自贡", "111"),
("四川", "攀枝花", "112"),
("四川", "达州", "113"),
("四川", "雅安", "114"),
("四川", "眉山", "291"),
("四川", "甘孜", "417"),
("四川", "阿坝", "457"),
("四川", "凉山", "479"),
("江苏", "南京", "125"),
("江苏", "苏州", "126"),
("江苏", "无锡", "127"),
("江苏", "连云港", "156"),
("江苏", "淮安", "157"),
("江苏", "扬州", "158"),
("江苏", "泰州", "159"),
("江苏", "盐城", "160"),
("江苏", "徐州", "161"),
("江苏", "常州", "162"),
("江苏", "南通", "163"),
("江苏", "镇江", "169"),
("江苏", "宿迁", "172"),
("湖北", "武汉", "28"),
("湖北", "黄石", "30"),
("湖北", "荆州", "31"),
("湖北", "襄阳", "32"),
("湖北", "黄冈", "33"),
("湖北", "荆门", "34"),
("湖北", "宜昌", "35"),
("湖北", "十堰", "36"),
("湖北", "随州", "37"),
("湖北", "恩施", "38"),
("湖北", "鄂州", "39"),
("湖北", "咸宁", "40"),
("湖北", "孝感", "41"),
("湖北", "仙桃", "42"),
("湖北", "天门", "73"),
("湖北", "潜江", "74"),
("湖北", "神农架", "687"),
("浙江", "杭州", "138"),
("浙江", "丽水", "134"),
("浙江", "金华", "135"),
("浙江", "温州", "149"),
("浙江", "台州", "287"),
("浙江", "衢州", "288"),
("浙江", "宁波", "289"),
("浙江", "绍兴", "303"),
("浙江", "嘉兴", "304"),
("浙江", "湖州", "305"),
("浙江", "舟山", "306"),
("福建", "福州", "50"),
("福建", "莆田", "51"),
("福建", "三明", "52"),
("福建", "龙岩", "53"),
("福建", "厦门", "54"),
("福建", "泉州", "55"),
("福建", "漳州", "56"),
("福建", "宁德", "87"),
("福建", "南平", "253"),
("黑龙江", "哈尔滨", "152"),
("黑龙江", "大庆", "153"),
("黑龙江", "伊春", "295"),
("黑龙江", "大兴安岭", "297"),
("黑龙江", "黑河", "300"),
("黑龙江", "鹤岗", "301"),
("黑龙江", "七台河", "302"),
("黑龙江", "齐齐哈尔", "319"),
("黑龙江", "佳木斯", "320"),
("黑龙江", "牡丹江", "322"),
("黑龙江", "鸡西", "323"),
("黑龙江", "绥化", "324"),
("黑龙江", "双鸭山", "359"),
("山东", "济南", "1"),
("山东", "滨州", "76"),
("山东", "青岛", "77"),
("山东", "烟台", "78"),
("山东", "临沂", "79"),
("山东", "潍坊", "80"),
("山东", "淄博", "81"),
("山东", "东营", "82"),
("山东", "聊城", "83"),
("山东", "菏泽", "84"),
("山东", "枣庄", "85"),
("山东", "德州", "86"),
("山东", "威海", "88"),
("山东", "济宁", "352"),
("山东", "泰安", "353"),
("山东", "莱芜", "356"),
("山东", "日照", "366"),
("陕西", "西安", "165"),
("陕西", "铜川", "271"),
("陕西", "安康", "272"),
("陕西", "宝鸡", "273"),
("陕西", "商洛", "274"),
("陕西", "渭南", "275"),
("陕西", "汉中", "276"),
("陕西", "咸阳", "277"),
("陕西", "榆林", "278"),
("陕西", "延安", "401"),
("河北", "石家庄", "141"),
("河北", "衡水", "143"),
("河北", "张家口", "144"),
("河北", "承德", "145"),
("河北", "秦皇岛", "146"),
("河北", "廊坊", "147"),
("河北", "沧州", "148"),
("河北", "保定", "259"),
("河北", "唐山", "261"),
("河北", "邯郸", "292"),
("河北", "邢台", "293"),
("辽宁", "沈阳", "150"),
("辽宁", "大连", "29"),
("辽宁", "盘锦", "151"),
("辽宁", "鞍山", "215"),
("辽宁", "朝阳", "216"),
("辽宁", "锦州", "217"),
("辽宁", "铁岭", "218"),
("辽宁", "丹东", "219"),
("辽宁", "本溪", "220"),
("辽宁", "营口", "221"),
("辽宁", "抚顺", "222"),
("辽宁", "阜新", "223"),
("辽宁", "辽阳", "224"),
("辽宁", "葫芦岛", "225"),
("吉林", "长春", "154"),
("吉林", "四平", "155"),
("吉林", "辽源", "191"),
("吉林", "松原", "194"),
("吉林", "吉林", "270"),
("吉林", "通化", "407"),
("吉林", "白山", "408"),
("吉林", "白城", "410"),
("吉林", "延边", "525"),
("云南", "昆明", "117"),
("云南", "玉溪", "123"),
("云南", "楚雄", "124"),
("云南", "大理", "334"),
("云南", "昭通", "335"),
("云南", "红河", "337"),
("云南", "曲靖", "339"),
("云南", "丽江", "342"),
("云南", "临沧", "350"),
("云南", "文山", "437"),
("云南", "保山", "438"),
("云南", "普洱", "666"),
("云南", "西双版纳", "668"),
("云南", "德宏", "669"),
("云南", "怒江", "671"),
("云南", "迪庆", "672"),
("新疆", "乌鲁木齐", "467"),
("新疆", "石河子", "280"),
("新疆", "吐鲁番", "310"),
("新疆", "昌吉", "311"),
("新疆", "哈密", "312"),
("新疆", "阿克苏", "315"),
("新疆", "克拉玛依", "317"),
("新疆", "博尔塔拉", "318"),
("新疆", "阿勒泰", "383"),
("新疆", "喀什", "384"),
("新疆", "和田", "386"),
("新疆", "巴音郭楞", "499"),
("新疆", "伊犁", "520"),
("新疆", "塔城", "563"),
("新疆", "克孜勒苏柯尔克孜", "653"),
("新疆", "五家渠", "661"),
("新疆", "阿拉尔", "692"),
("新疆", "图木舒克", "693"),
("广西", "南宁", "90"),
("广西", "柳州", "89"),
("广西", "桂林", "91"),
("广西", "贺州", "92"),
("广西", "贵港", "93"),
("广西", "玉林", "118"),
("广西", "河池", "119"),
("广西", "北海", "128"),
("广西", "钦州", "129"),
("广西", "防城港", "130"),
("广西", "百色", "131"),
("广西", "梧州", "132"),
("广西", "来宾", "506"),
("广西", "崇左", "665"),
("山西", "太原", "231"),
("山西", "大同", "227"),
("山西", "长治", "228"),
("山西", "忻州", "229"),
("山西", "晋中", "230"),
("山西", "临汾", "232"),
("山西", "运城", "233"),
("山西", "晋城", "234"),
("山西", "朔州", "235"),
("山西", "阳泉", "236"),
("山西", "吕梁", "237"),
("湖南", "长沙", "43"),
("湖南", "岳阳", "44"),
("湖南", "衡阳", "45"),
("湖南", "株洲", "46"),
("湖南", "湘潭", "47"),
("湖南", "益阳", "48"),
("湖南", "郴州", "49"),
("湖南", "湘西", "65"),
("湖南", "娄底", "66"),
("湖南", "怀化", "67"),
("湖南", "常德", "68"),
("湖南", "张家界", "226"),
("湖南", "永州", "269"),
("湖南", "邵阳", "405"),
("江西", "南昌", "5"),
("江西", "九江", "6"),
("江西", "鹰潭", "7"),
("江西", "抚州", "8"),
("江西", "上饶", "9"),
("江西", "赣州", "10"),
("江西", "吉安", "115"),
("江西", "萍乡", "136"),
("江西", "景德镇", "137"),
("江西", "新余", "246"),
("江西", "宜春", "256"),
("安徽", "合肥", "189"),
("安徽", "铜陵", "173"),
("安徽", "黄山", "174"),
("安徽", "池州", "175"),
("安徽", "宣城", "176"),
("安徽", "巢湖", "177"),
("安徽", "淮南", "178"),
("安徽", "宿州", "179"),
("安徽", "六安", "181"),
("安徽", "滁州", "182"),
("安徽", "淮北", "183"),
("安徽", "阜阳", "184"),
("安徽", "马鞍山", "185"),
("安徽", "安庆", "186"),
("安徽", "蚌埠", "187"),
("安徽", "芜湖", "188"),
("安徽", "亳州", "391"),
("内蒙古", "呼和浩特", "20"),
("内蒙古", "包头", "13"),
("内蒙古", "鄂尔多斯", "14"),
("内蒙古", "巴彦淖尔", "15"),
("内蒙古", "乌海", "16"),
("内蒙古", "阿拉善盟", "17"),
("内蒙古", "锡林郭勒盟", "19"),
("内蒙古", "赤峰", "21"),
("内蒙古", "通辽", "22"),
("内蒙古", "呼伦贝尔", "25"),
("内蒙古", "乌兰察布", "331"),
("内蒙古", "兴安盟", "333"),
("甘肃", "兰州", "166"),
("甘肃", "庆阳", "281"),
("甘肃", "定西", "282"),
("甘肃", "武威", "283"),
("甘肃", "酒泉", "284"),
("甘肃", "张掖", "285"),
("甘肃", "嘉峪关", "286"),
("甘肃", "平凉", "307"),
("甘肃", "天水", "308"),
("甘肃", "白银", "309"),
("甘肃", "金昌", "343"),
("甘肃", "陇南", "344"),
("甘肃", "临夏", "346"),
("甘肃", "甘南", "673"),
("海南", "海口", "239"),
("海南", "万宁", "241"),
("海南", "琼海", "242"),
("海南", "三亚", "243"),
("海南", "儋州", "244"),
("海南", "东方", "456"),
("海南", "五指山", "582"),
("海南", "文昌", "670"),
("海南", "陵水", "674"),
("海南", "澄迈", "675"),
("海南", "乐东", "679"),
("海南", "临高", "680"),
("海南", "定安", "681"),
("海南", "昌江", "683"),
("海南", "屯昌", "684"),
("海南", "保亭", "686"),
("海南", "白沙", "689"),
("海南", "琼中", "690"),
("贵州", "贵阳", "2"),
("贵州", "黔南", "3"),
("贵州", "六盘水", "4"),
("贵州", "遵义", "59"),
("贵州", "黔东南", "61"),
("贵州", "铜仁", "422"),
("贵州", "安顺", "424"),
("贵州", "毕节", "426"),
("贵州", "黔西南", "588"),
("宁夏", "银川", "140"),
("宁夏", "吴忠", "395"),
("宁夏", "固原", "396"),
("宁夏", "石嘴山", "472"),
("宁夏", "中卫", "480"),
("青海", "西宁", "139"),
("青海", "海西", "608"),
("青海", "海东", "652"),
("青海", "玉树", "659"),
("青海", "海南", "676"),
("青海", "海北", "682"),
("青海", "黄南", "685"),
("青海", "果洛", "688"),
("西藏", "拉萨", "466"),
("西藏", "日喀则", "516"),
("西藏", "那曲", "655"),
("西藏", "林芝", "656"),
("西藏", "山南", "677"),
("西藏", "昌都", "678"),
("西藏", "阿里", "691"),
("北京", "北京", "911"),
("上海", "上海", "910"),
("重庆", "重庆", "904"),
("天津", "天津", "923")
}
if province == "全国":
area = "0"
else:
result_list = [item for item in baidu_area_map if item[0] == province and item[1] == city]
if result_list:
area = result_list[0][2]
else:
raise "请按照百度指数的要求输入正确的省份和城市"
headers = {
"Accept": "application/json, text/plain, */*",
"Accept-Encoding": "gzip, deflate, br",
"Accept-Language": "zh-CN,zh;q=0.9,en;q=0.8",
"Cache-Control": "no-cache",
"Cipher-Text": text,
"Cookie": cookie,
"DNT": "1",
"Host": "index.baidu.com",
"Pragma": "no-cache",
"Referer": "https://index.baidu.com/v2/main/index.html",
"Proxy-Connection": "keep-alive",
"Referer": "zhishu.baidu.com",
"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.90 Safari/537.36",
"X-Requested-With": "XMLHttpRequest",
}
session = requests.Session()
session.headers.update(headers)
params = {
"area": area,
"word": '[[{"name":' + f'"{word}"' + ',"wordType"' + ":1}]]",
"startDate": start_date,
"endDate": end_date,
}
with session.get(
url="http://index.baidu.com/api/SearchApi/index", params=params
) as response:
data = response.json()["data"]
all_data = data["userIndexes"][0]["all"]["data"]
uniqid = data["uniqid"]
ptbk = get_ptbk(uniqid, cookie)
result = decrypt(ptbk, all_data).split(",")
result = [int(item) if item != "" else 0 for item in result]
if len(result) == len(
pd.date_range(start=start_date, end=end_date, freq="7D")
):
temp_df_7 = pd.DataFrame(
[
pd.date_range(start=start_date, end=end_date, freq="7D"),
result,
],
index=["date", word],
).T
temp_df_7.index = pd.to_datetime(temp_df_7["date"])
del temp_df_7["date"]
return temp_df_7
else:
temp_df_1 = pd.DataFrame(
[
pd.date_range(start=start_date, end=end_date, freq="1D"),
result,
],
index=["date", word],
).T
temp_df_1.index = pd.to_datetime(temp_df_1["date"])
del temp_df_1["date"]
return temp_df_1
|
https://github.com/akfamily/akshare/blob/087025d8d6f799b30ca114013e82c1ad22dc9294/project25/akshare/index/index_baidu.py#L62-L519
| 25 |
[
0
] | 0.218341 |
[
29,
393,
394,
396,
397,
398,
400,
401,
417,
418,
419,
425,
428,
429,
430,
431,
432,
433,
434,
437,
444,
445,
446,
448,
455,
456,
457
] | 5.895197 | false | 8.181818 | 458 | 8 | 94.104803 | 18 |
def baidu_search_index(
word: str = "python",
start_date: str = "2020-01-01",
end_date: str = "2020-05-01",
province: str = "全国",
city: str = "全国",
cookie: str = None,
text: str = None,
) -> str:
baidu_area_map = {
("广东", "广州", "95"),
("广东", "深圳", "94"),
("广东", "东莞", "133"),
("广东", "云浮", "195"),
("广东", "佛山", "196"),
("广东", "湛江", "197"),
("广东", "江门", "198"),
("广东", "惠州", "199"),
("广东", "珠海", "200"),
("广东", "韶关", "201"),
("广东", "阳江", "202"),
("广东", "茂名", "203"),
("广东", "潮州", "204"),
("广东", "揭阳", "205"),
("广东", "中山", "207"),
("广东", "清远", "208"),
("广东", "肇庆", "209"),
("广东", "河源", "210"),
("广东", "梅州", "211"),
("广东", "汕头", "212"),
("广东", "汕尾", "213"),
("河南", "郑州", "168"),
("河南", "南阳", "262"),
("河南", "新乡", "263"),
("河南", "开封", "264"),
("河南", "焦作", "265"),
("河南", "平顶山", "266"),
("河南", "许昌", "268"),
("河南", "安阳", "370"),
("河南", "驻马店", "371"),
("河南", "信阳", "373"),
("河南", "鹤壁", "374"),
("河南", "周口", "375"),
("河南", "商丘", "376"),
("河南", "洛阳", "378"),
("河南", "漯河", "379"),
("河南", "濮阳", "380"),
("河南", "三门峡", "381"),
("河南", "济源", "667"),
("四川", "成都", "97"),
("四川", "宜宾", "96"),
("四川", "绵阳", "98"),
("四川", "广元", "99"),
("四川", "遂宁", "100"),
("四川", "巴中", "101"),
("四川", "内江", "102"),
("四川", "泸州", "103"),
("四川", "南充", "104"),
("四川", "德阳", "106"),
("四川", "乐山", "107"),
("四川", "广安", "108"),
("四川", "资阳", "109"),
("四川", "自贡", "111"),
("四川", "攀枝花", "112"),
("四川", "达州", "113"),
("四川", "雅安", "114"),
("四川", "眉山", "291"),
("四川", "甘孜", "417"),
("四川", "阿坝", "457"),
("四川", "凉山", "479"),
("江苏", "南京", "125"),
("江苏", "苏州", "126"),
("江苏", "无锡", "127"),
("江苏", "连云港", "156"),
("江苏", "淮安", "157"),
("江苏", "扬州", "158"),
("江苏", "泰州", "159"),
("江苏", "盐城", "160"),
("江苏", "徐州", "161"),
("江苏", "常州", "162"),
("江苏", "南通", "163"),
("江苏", "镇江", "169"),
("江苏", "宿迁", "172"),
("湖北", "武汉", "28"),
("湖北", "黄石", "30"),
("湖北", "荆州", "31"),
("湖北", "襄阳", "32"),
("湖北", "黄冈", "33"),
("湖北", "荆门", "34"),
("湖北", "宜昌", "35"),
("湖北", "十堰", "36"),
("湖北", "随州", "37"),
("湖北", "恩施", "38"),
("湖北", "鄂州", "39"),
("湖北", "咸宁", "40"),
("湖北", "孝感", "41"),
("湖北", "仙桃", "42"),
("湖北", "天门", "73"),
("湖北", "潜江", "74"),
("湖北", "神农架", "687"),
("浙江", "杭州", "138"),
("浙江", "丽水", "134"),
("浙江", "金华", "135"),
("浙江", "温州", "149"),
("浙江", "台州", "287"),
("浙江", "衢州", "288"),
("浙江", "宁波", "289"),
("浙江", "绍兴", "303"),
("浙江", "嘉兴", "304"),
("浙江", "湖州", "305"),
("浙江", "舟山", "306"),
("福建", "福州", "50"),
("福建", "莆田", "51"),
("福建", "三明", "52"),
("福建", "龙岩", "53"),
("福建", "厦门", "54"),
("福建", "泉州", "55"),
("福建", "漳州", "56"),
("福建", "宁德", "87"),
("福建", "南平", "253"),
("黑龙江", "哈尔滨", "152"),
("黑龙江", "大庆", "153"),
("黑龙江", "伊春", "295"),
("黑龙江", "大兴安岭", "297"),
("黑龙江", "黑河", "300"),
("黑龙江", "鹤岗", "301"),
("黑龙江", "七台河", "302"),
("黑龙江", "齐齐哈尔", "319"),
("黑龙江", "佳木斯", "320"),
("黑龙江", "牡丹江", "322"),
("黑龙江", "鸡西", "323"),
("黑龙江", "绥化", "324"),
("黑龙江", "双鸭山", "359"),
("山东", "济南", "1"),
("山东", "滨州", "76"),
("山东", "青岛", "77"),
("山东", "烟台", "78"),
("山东", "临沂", "79"),
("山东", "潍坊", "80"),
("山东", "淄博", "81"),
("山东", "东营", "82"),
("山东", "聊城", "83"),
("山东", "菏泽", "84"),
("山东", "枣庄", "85"),
("山东", "德州", "86"),
("山东", "威海", "88"),
("山东", "济宁", "352"),
("山东", "泰安", "353"),
("山东", "莱芜", "356"),
("山东", "日照", "366"),
("陕西", "西安", "165"),
("陕西", "铜川", "271"),
("陕西", "安康", "272"),
("陕西", "宝鸡", "273"),
("陕西", "商洛", "274"),
("陕西", "渭南", "275"),
("陕西", "汉中", "276"),
("陕西", "咸阳", "277"),
("陕西", "榆林", "278"),
("陕西", "延安", "401"),
("河北", "石家庄", "141"),
("河北", "衡水", "143"),
("河北", "张家口", "144"),
("河北", "承德", "145"),
("河北", "秦皇岛", "146"),
("河北", "廊坊", "147"),
("河北", "沧州", "148"),
("河北", "保定", "259"),
("河北", "唐山", "261"),
("河北", "邯郸", "292"),
("河北", "邢台", "293"),
("辽宁", "沈阳", "150"),
("辽宁", "大连", "29"),
("辽宁", "盘锦", "151"),
("辽宁", "鞍山", "215"),
("辽宁", "朝阳", "216"),
("辽宁", "锦州", "217"),
("辽宁", "铁岭", "218"),
("辽宁", "丹东", "219"),
("辽宁", "本溪", "220"),
("辽宁", "营口", "221"),
("辽宁", "抚顺", "222"),
("辽宁", "阜新", "223"),
("辽宁", "辽阳", "224"),
("辽宁", "葫芦岛", "225"),
("吉林", "长春", "154"),
("吉林", "四平", "155"),
("吉林", "辽源", "191"),
("吉林", "松原", "194"),
("吉林", "吉林", "270"),
("吉林", "通化", "407"),
("吉林", "白山", "408"),
("吉林", "白城", "410"),
("吉林", "延边", "525"),
("云南", "昆明", "117"),
("云南", "玉溪", "123"),
("云南", "楚雄", "124"),
("云南", "大理", "334"),
("云南", "昭通", "335"),
("云南", "红河", "337"),
("云南", "曲靖", "339"),
("云南", "丽江", "342"),
("云南", "临沧", "350"),
("云南", "文山", "437"),
("云南", "保山", "438"),
("云南", "普洱", "666"),
("云南", "西双版纳", "668"),
("云南", "德宏", "669"),
("云南", "怒江", "671"),
("云南", "迪庆", "672"),
("新疆", "乌鲁木齐", "467"),
("新疆", "石河子", "280"),
("新疆", "吐鲁番", "310"),
("新疆", "昌吉", "311"),
("新疆", "哈密", "312"),
("新疆", "阿克苏", "315"),
("新疆", "克拉玛依", "317"),
("新疆", "博尔塔拉", "318"),
("新疆", "阿勒泰", "383"),
("新疆", "喀什", "384"),
("新疆", "和田", "386"),
("新疆", "巴音郭楞", "499"),
("新疆", "伊犁", "520"),
("新疆", "塔城", "563"),
("新疆", "克孜勒苏柯尔克孜", "653"),
("新疆", "五家渠", "661"),
("新疆", "阿拉尔", "692"),
("新疆", "图木舒克", "693"),
("广西", "南宁", "90"),
("广西", "柳州", "89"),
("广西", "桂林", "91"),
("广西", "贺州", "92"),
("广西", "贵港", "93"),
("广西", "玉林", "118"),
("广西", "河池", "119"),
("广西", "北海", "128"),
("广西", "钦州", "129"),
("广西", "防城港", "130"),
("广西", "百色", "131"),
("广西", "梧州", "132"),
("广西", "来宾", "506"),
("广西", "崇左", "665"),
("山西", "太原", "231"),
("山西", "大同", "227"),
("山西", "长治", "228"),
("山西", "忻州", "229"),
("山西", "晋中", "230"),
("山西", "临汾", "232"),
("山西", "运城", "233"),
("山西", "晋城", "234"),
("山西", "朔州", "235"),
("山西", "阳泉", "236"),
("山西", "吕梁", "237"),
("湖南", "长沙", "43"),
("湖南", "岳阳", "44"),
("湖南", "衡阳", "45"),
("湖南", "株洲", "46"),
("湖南", "湘潭", "47"),
("湖南", "益阳", "48"),
("湖南", "郴州", "49"),
("湖南", "湘西", "65"),
("湖南", "娄底", "66"),
("湖南", "怀化", "67"),
("湖南", "常德", "68"),
("湖南", "张家界", "226"),
("湖南", "永州", "269"),
("湖南", "邵阳", "405"),
("江西", "南昌", "5"),
("江西", "九江", "6"),
("江西", "鹰潭", "7"),
("江西", "抚州", "8"),
("江西", "上饶", "9"),
("江西", "赣州", "10"),
("江西", "吉安", "115"),
("江西", "萍乡", "136"),
("江西", "景德镇", "137"),
("江西", "新余", "246"),
("江西", "宜春", "256"),
("安徽", "合肥", "189"),
("安徽", "铜陵", "173"),
("安徽", "黄山", "174"),
("安徽", "池州", "175"),
("安徽", "宣城", "176"),
("安徽", "巢湖", "177"),
("安徽", "淮南", "178"),
("安徽", "宿州", "179"),
("安徽", "六安", "181"),
("安徽", "滁州", "182"),
("安徽", "淮北", "183"),
("安徽", "阜阳", "184"),
("安徽", "马鞍山", "185"),
("安徽", "安庆", "186"),
("安徽", "蚌埠", "187"),
("安徽", "芜湖", "188"),
("安徽", "亳州", "391"),
("内蒙古", "呼和浩特", "20"),
("内蒙古", "包头", "13"),
("内蒙古", "鄂尔多斯", "14"),
("内蒙古", "巴彦淖尔", "15"),
("内蒙古", "乌海", "16"),
("内蒙古", "阿拉善盟", "17"),
("内蒙古", "锡林郭勒盟", "19"),
("内蒙古", "赤峰", "21"),
("内蒙古", "通辽", "22"),
("内蒙古", "呼伦贝尔", "25"),
("内蒙古", "乌兰察布", "331"),
("内蒙古", "兴安盟", "333"),
("甘肃", "兰州", "166"),
("甘肃", "庆阳", "281"),
("甘肃", "定西", "282"),
("甘肃", "武威", "283"),
("甘肃", "酒泉", "284"),
("甘肃", "张掖", "285"),
("甘肃", "嘉峪关", "286"),
("甘肃", "平凉", "307"),
("甘肃", "天水", "308"),
("甘肃", "白银", "309"),
("甘肃", "金昌", "343"),
("甘肃", "陇南", "344"),
("甘肃", "临夏", "346"),
("甘肃", "甘南", "673"),
("海南", "海口", "239"),
("海南", "万宁", "241"),
("海南", "琼海", "242"),
("海南", "三亚", "243"),
("海南", "儋州", "244"),
("海南", "东方", "456"),
("海南", "五指山", "582"),
("海南", "文昌", "670"),
("海南", "陵水", "674"),
("海南", "澄迈", "675"),
("海南", "乐东", "679"),
("海南", "临高", "680"),
("海南", "定安", "681"),
("海南", "昌江", "683"),
("海南", "屯昌", "684"),
("海南", "保亭", "686"),
("海南", "白沙", "689"),
("海南", "琼中", "690"),
("贵州", "贵阳", "2"),
("贵州", "黔南", "3"),
("贵州", "六盘水", "4"),
("贵州", "遵义", "59"),
("贵州", "黔东南", "61"),
("贵州", "铜仁", "422"),
("贵州", "安顺", "424"),
("贵州", "毕节", "426"),
("贵州", "黔西南", "588"),
("宁夏", "银川", "140"),
("宁夏", "吴忠", "395"),
("宁夏", "固原", "396"),
("宁夏", "石嘴山", "472"),
("宁夏", "中卫", "480"),
("青海", "西宁", "139"),
("青海", "海西", "608"),
("青海", "海东", "652"),
("青海", "玉树", "659"),
("青海", "海南", "676"),
("青海", "海北", "682"),
("青海", "黄南", "685"),
("青海", "果洛", "688"),
("西藏", "拉萨", "466"),
("西藏", "日喀则", "516"),
("西藏", "那曲", "655"),
("西藏", "林芝", "656"),
("西藏", "山南", "677"),
("西藏", "昌都", "678"),
("西藏", "阿里", "691"),
("北京", "北京", "911"),
("上海", "上海", "910"),
("重庆", "重庆", "904"),
("天津", "天津", "923")
}
if province == "全国":
area = "0"
else:
result_list = [item for item in baidu_area_map if item[0] == province and item[1] == city]
if result_list:
area = result_list[0][2]
else:
raise "请按照百度指数的要求输入正确的省份和城市"
headers = {
"Accept": "application/json, text/plain, */*",
"Accept-Encoding": "gzip, deflate, br",
"Accept-Language": "zh-CN,zh;q=0.9,en;q=0.8",
"Cache-Control": "no-cache",
"Cipher-Text": text,
"Cookie": cookie,
"DNT": "1",
"Host": "index.baidu.com",
"Pragma": "no-cache",
"Referer": "https://index.baidu.com/v2/main/index.html",
"Proxy-Connection": "keep-alive",
"Referer": "zhishu.baidu.com",
"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.90 Safari/537.36",
"X-Requested-With": "XMLHttpRequest",
}
session = requests.Session()
session.headers.update(headers)
params = {
"area": area,
"word": '[[{"name":' + f'"{word}"' + ',"wordType"' + ":1}]]",
"startDate": start_date,
"endDate": end_date,
}
with session.get(
url="http://index.baidu.com/api/SearchApi/index", params=params
) as response:
data = response.json()["data"]
all_data = data["userIndexes"][0]["all"]["data"]
uniqid = data["uniqid"]
ptbk = get_ptbk(uniqid, cookie)
result = decrypt(ptbk, all_data).split(",")
result = [int(item) if item != "" else 0 for item in result]
if len(result) == len(
pd.date_range(start=start_date, end=end_date, freq="7D")
):
temp_df_7 = pd.DataFrame(
[
pd.date_range(start=start_date, end=end_date, freq="7D"),
result,
],
index=["date", word],
).T
temp_df_7.index = pd.to_datetime(temp_df_7["date"])
del temp_df_7["date"]
return temp_df_7
else:
temp_df_1 = pd.DataFrame(
[
pd.date_range(start=start_date, end=end_date, freq="1D"),
result,
],
index=["date", word],
).T
temp_df_1.index = pd.to_datetime(temp_df_1["date"])
del temp_df_1["date"]
return temp_df_1
| 18,254 |
|
akfamily/akshare
|
087025d8d6f799b30ca114013e82c1ad22dc9294
|
akshare/index/index_baidu.py
|
baidu_info_index
|
(
word: str = "python",
start_date: str = "2020-01-01",
end_date: str = "2020-06-01",
province: str = "全国",
city: str = "全国",
cookie: str = None,
text: str = None,
)
|
百度-资讯指数
https://index.baidu.com/v2/index.html
:param word: 需要搜索的词语
:type word: str
:param start_date: 开始时间;注意开始时间和结束时间不要超过一年
:type start_date: str
:param end_date: 结束时间;注意开始时间和结束时间不要超过一年
:type end_date: str
:param province: 省份, 默认为 `全国`; 请按照百度指数提供的名称进行输入, 比如:`四川`
:type province: str
:param city: 城市, 默认为 `全国`; 请按照百度指数提供的名称进行输入, 比如:`成都`
:type city: str
:param cookie: 输入 cookie
:type cookie: str
:param text: 输入 text
:type text: str
:return: 资讯指数
:rtype: pandas.Series
|
百度-资讯指数
https://index.baidu.com/v2/index.html
:param word: 需要搜索的词语
:type word: str
:param start_date: 开始时间;注意开始时间和结束时间不要超过一年
:type start_date: str
:param end_date: 结束时间;注意开始时间和结束时间不要超过一年
:type end_date: str
:param province: 省份, 默认为 `全国`; 请按照百度指数提供的名称进行输入, 比如:`四川`
:type province: str
:param city: 城市, 默认为 `全国`; 请按照百度指数提供的名称进行输入, 比如:`成都`
:type city: str
:param cookie: 输入 cookie
:type cookie: str
:param text: 输入 text
:type text: str
:return: 资讯指数
:rtype: pandas.Series
| 522 | 979 |
def baidu_info_index(
word: str = "python",
start_date: str = "2020-01-01",
end_date: str = "2020-06-01",
province: str = "全国",
city: str = "全国",
cookie: str = None,
text: str = None,
) -> str:
"""
百度-资讯指数
https://index.baidu.com/v2/index.html
:param word: 需要搜索的词语
:type word: str
:param start_date: 开始时间;注意开始时间和结束时间不要超过一年
:type start_date: str
:param end_date: 结束时间;注意开始时间和结束时间不要超过一年
:type end_date: str
:param province: 省份, 默认为 `全国`; 请按照百度指数提供的名称进行输入, 比如:`四川`
:type province: str
:param city: 城市, 默认为 `全国`; 请按照百度指数提供的名称进行输入, 比如:`成都`
:type city: str
:param cookie: 输入 cookie
:type cookie: str
:param text: 输入 text
:type text: str
:return: 资讯指数
:rtype: pandas.Series
"""
baidu_area_map = {
("广东", "广州", "95"),
("广东", "深圳", "94"),
("广东", "东莞", "133"),
("广东", "云浮", "195"),
("广东", "佛山", "196"),
("广东", "湛江", "197"),
("广东", "江门", "198"),
("广东", "惠州", "199"),
("广东", "珠海", "200"),
("广东", "韶关", "201"),
("广东", "阳江", "202"),
("广东", "茂名", "203"),
("广东", "潮州", "204"),
("广东", "揭阳", "205"),
("广东", "中山", "207"),
("广东", "清远", "208"),
("广东", "肇庆", "209"),
("广东", "河源", "210"),
("广东", "梅州", "211"),
("广东", "汕头", "212"),
("广东", "汕尾", "213"),
("河南", "郑州", "168"),
("河南", "南阳", "262"),
("河南", "新乡", "263"),
("河南", "开封", "264"),
("河南", "焦作", "265"),
("河南", "平顶山", "266"),
("河南", "许昌", "268"),
("河南", "安阳", "370"),
("河南", "驻马店", "371"),
("河南", "信阳", "373"),
("河南", "鹤壁", "374"),
("河南", "周口", "375"),
("河南", "商丘", "376"),
("河南", "洛阳", "378"),
("河南", "漯河", "379"),
("河南", "濮阳", "380"),
("河南", "三门峡", "381"),
("河南", "济源", "667"),
("四川", "成都", "97"),
("四川", "宜宾", "96"),
("四川", "绵阳", "98"),
("四川", "广元", "99"),
("四川", "遂宁", "100"),
("四川", "巴中", "101"),
("四川", "内江", "102"),
("四川", "泸州", "103"),
("四川", "南充", "104"),
("四川", "德阳", "106"),
("四川", "乐山", "107"),
("四川", "广安", "108"),
("四川", "资阳", "109"),
("四川", "自贡", "111"),
("四川", "攀枝花", "112"),
("四川", "达州", "113"),
("四川", "雅安", "114"),
("四川", "眉山", "291"),
("四川", "甘孜", "417"),
("四川", "阿坝", "457"),
("四川", "凉山", "479"),
("江苏", "南京", "125"),
("江苏", "苏州", "126"),
("江苏", "无锡", "127"),
("江苏", "连云港", "156"),
("江苏", "淮安", "157"),
("江苏", "扬州", "158"),
("江苏", "泰州", "159"),
("江苏", "盐城", "160"),
("江苏", "徐州", "161"),
("江苏", "常州", "162"),
("江苏", "南通", "163"),
("江苏", "镇江", "169"),
("江苏", "宿迁", "172"),
("湖北", "武汉", "28"),
("湖北", "黄石", "30"),
("湖北", "荆州", "31"),
("湖北", "襄阳", "32"),
("湖北", "黄冈", "33"),
("湖北", "荆门", "34"),
("湖北", "宜昌", "35"),
("湖北", "十堰", "36"),
("湖北", "随州", "37"),
("湖北", "恩施", "38"),
("湖北", "鄂州", "39"),
("湖北", "咸宁", "40"),
("湖北", "孝感", "41"),
("湖北", "仙桃", "42"),
("湖北", "天门", "73"),
("湖北", "潜江", "74"),
("湖北", "神农架", "687"),
("浙江", "杭州", "138"),
("浙江", "丽水", "134"),
("浙江", "金华", "135"),
("浙江", "温州", "149"),
("浙江", "台州", "287"),
("浙江", "衢州", "288"),
("浙江", "宁波", "289"),
("浙江", "绍兴", "303"),
("浙江", "嘉兴", "304"),
("浙江", "湖州", "305"),
("浙江", "舟山", "306"),
("福建", "福州", "50"),
("福建", "莆田", "51"),
("福建", "三明", "52"),
("福建", "龙岩", "53"),
("福建", "厦门", "54"),
("福建", "泉州", "55"),
("福建", "漳州", "56"),
("福建", "宁德", "87"),
("福建", "南平", "253"),
("黑龙江", "哈尔滨", "152"),
("黑龙江", "大庆", "153"),
("黑龙江", "伊春", "295"),
("黑龙江", "大兴安岭", "297"),
("黑龙江", "黑河", "300"),
("黑龙江", "鹤岗", "301"),
("黑龙江", "七台河", "302"),
("黑龙江", "齐齐哈尔", "319"),
("黑龙江", "佳木斯", "320"),
("黑龙江", "牡丹江", "322"),
("黑龙江", "鸡西", "323"),
("黑龙江", "绥化", "324"),
("黑龙江", "双鸭山", "359"),
("山东", "济南", "1"),
("山东", "滨州", "76"),
("山东", "青岛", "77"),
("山东", "烟台", "78"),
("山东", "临沂", "79"),
("山东", "潍坊", "80"),
("山东", "淄博", "81"),
("山东", "东营", "82"),
("山东", "聊城", "83"),
("山东", "菏泽", "84"),
("山东", "枣庄", "85"),
("山东", "德州", "86"),
("山东", "威海", "88"),
("山东", "济宁", "352"),
("山东", "泰安", "353"),
("山东", "莱芜", "356"),
("山东", "日照", "366"),
("陕西", "西安", "165"),
("陕西", "铜川", "271"),
("陕西", "安康", "272"),
("陕西", "宝鸡", "273"),
("陕西", "商洛", "274"),
("陕西", "渭南", "275"),
("陕西", "汉中", "276"),
("陕西", "咸阳", "277"),
("陕西", "榆林", "278"),
("陕西", "延安", "401"),
("河北", "石家庄", "141"),
("河北", "衡水", "143"),
("河北", "张家口", "144"),
("河北", "承德", "145"),
("河北", "秦皇岛", "146"),
("河北", "廊坊", "147"),
("河北", "沧州", "148"),
("河北", "保定", "259"),
("河北", "唐山", "261"),
("河北", "邯郸", "292"),
("河北", "邢台", "293"),
("辽宁", "沈阳", "150"),
("辽宁", "大连", "29"),
("辽宁", "盘锦", "151"),
("辽宁", "鞍山", "215"),
("辽宁", "朝阳", "216"),
("辽宁", "锦州", "217"),
("辽宁", "铁岭", "218"),
("辽宁", "丹东", "219"),
("辽宁", "本溪", "220"),
("辽宁", "营口", "221"),
("辽宁", "抚顺", "222"),
("辽宁", "阜新", "223"),
("辽宁", "辽阳", "224"),
("辽宁", "葫芦岛", "225"),
("吉林", "长春", "154"),
("吉林", "四平", "155"),
("吉林", "辽源", "191"),
("吉林", "松原", "194"),
("吉林", "吉林", "270"),
("吉林", "通化", "407"),
("吉林", "白山", "408"),
("吉林", "白城", "410"),
("吉林", "延边", "525"),
("云南", "昆明", "117"),
("云南", "玉溪", "123"),
("云南", "楚雄", "124"),
("云南", "大理", "334"),
("云南", "昭通", "335"),
("云南", "红河", "337"),
("云南", "曲靖", "339"),
("云南", "丽江", "342"),
("云南", "临沧", "350"),
("云南", "文山", "437"),
("云南", "保山", "438"),
("云南", "普洱", "666"),
("云南", "西双版纳", "668"),
("云南", "德宏", "669"),
("云南", "怒江", "671"),
("云南", "迪庆", "672"),
("新疆", "乌鲁木齐", "467"),
("新疆", "石河子", "280"),
("新疆", "吐鲁番", "310"),
("新疆", "昌吉", "311"),
("新疆", "哈密", "312"),
("新疆", "阿克苏", "315"),
("新疆", "克拉玛依", "317"),
("新疆", "博尔塔拉", "318"),
("新疆", "阿勒泰", "383"),
("新疆", "喀什", "384"),
("新疆", "和田", "386"),
("新疆", "巴音郭楞", "499"),
("新疆", "伊犁", "520"),
("新疆", "塔城", "563"),
("新疆", "克孜勒苏柯尔克孜", "653"),
("新疆", "五家渠", "661"),
("新疆", "阿拉尔", "692"),
("新疆", "图木舒克", "693"),
("广西", "南宁", "90"),
("广西", "柳州", "89"),
("广西", "桂林", "91"),
("广西", "贺州", "92"),
("广西", "贵港", "93"),
("广西", "玉林", "118"),
("广西", "河池", "119"),
("广西", "北海", "128"),
("广西", "钦州", "129"),
("广西", "防城港", "130"),
("广西", "百色", "131"),
("广西", "梧州", "132"),
("广西", "来宾", "506"),
("广西", "崇左", "665"),
("山西", "太原", "231"),
("山西", "大同", "227"),
("山西", "长治", "228"),
("山西", "忻州", "229"),
("山西", "晋中", "230"),
("山西", "临汾", "232"),
("山西", "运城", "233"),
("山西", "晋城", "234"),
("山西", "朔州", "235"),
("山西", "阳泉", "236"),
("山西", "吕梁", "237"),
("湖南", "长沙", "43"),
("湖南", "岳阳", "44"),
("湖南", "衡阳", "45"),
("湖南", "株洲", "46"),
("湖南", "湘潭", "47"),
("湖南", "益阳", "48"),
("湖南", "郴州", "49"),
("湖南", "湘西", "65"),
("湖南", "娄底", "66"),
("湖南", "怀化", "67"),
("湖南", "常德", "68"),
("湖南", "张家界", "226"),
("湖南", "永州", "269"),
("湖南", "邵阳", "405"),
("江西", "南昌", "5"),
("江西", "九江", "6"),
("江西", "鹰潭", "7"),
("江西", "抚州", "8"),
("江西", "上饶", "9"),
("江西", "赣州", "10"),
("江西", "吉安", "115"),
("江西", "萍乡", "136"),
("江西", "景德镇", "137"),
("江西", "新余", "246"),
("江西", "宜春", "256"),
("安徽", "合肥", "189"),
("安徽", "铜陵", "173"),
("安徽", "黄山", "174"),
("安徽", "池州", "175"),
("安徽", "宣城", "176"),
("安徽", "巢湖", "177"),
("安徽", "淮南", "178"),
("安徽", "宿州", "179"),
("安徽", "六安", "181"),
("安徽", "滁州", "182"),
("安徽", "淮北", "183"),
("安徽", "阜阳", "184"),
("安徽", "马鞍山", "185"),
("安徽", "安庆", "186"),
("安徽", "蚌埠", "187"),
("安徽", "芜湖", "188"),
("安徽", "亳州", "391"),
("内蒙古", "呼和浩特", "20"),
("内蒙古", "包头", "13"),
("内蒙古", "鄂尔多斯", "14"),
("内蒙古", "巴彦淖尔", "15"),
("内蒙古", "乌海", "16"),
("内蒙古", "阿拉善盟", "17"),
("内蒙古", "锡林郭勒盟", "19"),
("内蒙古", "赤峰", "21"),
("内蒙古", "通辽", "22"),
("内蒙古", "呼伦贝尔", "25"),
("内蒙古", "乌兰察布", "331"),
("内蒙古", "兴安盟", "333"),
("甘肃", "兰州", "166"),
("甘肃", "庆阳", "281"),
("甘肃", "定西", "282"),
("甘肃", "武威", "283"),
("甘肃", "酒泉", "284"),
("甘肃", "张掖", "285"),
("甘肃", "嘉峪关", "286"),
("甘肃", "平凉", "307"),
("甘肃", "天水", "308"),
("甘肃", "白银", "309"),
("甘肃", "金昌", "343"),
("甘肃", "陇南", "344"),
("甘肃", "临夏", "346"),
("甘肃", "甘南", "673"),
("海南", "海口", "239"),
("海南", "万宁", "241"),
("海南", "琼海", "242"),
("海南", "三亚", "243"),
("海南", "儋州", "244"),
("海南", "东方", "456"),
("海南", "五指山", "582"),
("海南", "文昌", "670"),
("海南", "陵水", "674"),
("海南", "澄迈", "675"),
("海南", "乐东", "679"),
("海南", "临高", "680"),
("海南", "定安", "681"),
("海南", "昌江", "683"),
("海南", "屯昌", "684"),
("海南", "保亭", "686"),
("海南", "白沙", "689"),
("海南", "琼中", "690"),
("贵州", "贵阳", "2"),
("贵州", "黔南", "3"),
("贵州", "六盘水", "4"),
("贵州", "遵义", "59"),
("贵州", "黔东南", "61"),
("贵州", "铜仁", "422"),
("贵州", "安顺", "424"),
("贵州", "毕节", "426"),
("贵州", "黔西南", "588"),
("宁夏", "银川", "140"),
("宁夏", "吴忠", "395"),
("宁夏", "固原", "396"),
("宁夏", "石嘴山", "472"),
("宁夏", "中卫", "480"),
("青海", "西宁", "139"),
("青海", "海西", "608"),
("青海", "海东", "652"),
("青海", "玉树", "659"),
("青海", "海南", "676"),
("青海", "海北", "682"),
("青海", "黄南", "685"),
("青海", "果洛", "688"),
("西藏", "拉萨", "466"),
("西藏", "日喀则", "516"),
("西藏", "那曲", "655"),
("西藏", "林芝", "656"),
("西藏", "山南", "677"),
("西藏", "昌都", "678"),
("西藏", "阿里", "691"),
("北京", "北京", "911"),
("上海", "上海", "910"),
("重庆", "重庆", "904"),
("天津", "天津", "923")
}
if province == "全国":
area = "0"
else:
result_list = [item for item in baidu_area_map if item[0] == province and item[1] == city]
if result_list:
area = result_list[0][2]
else:
raise "请按照百度指数的要求输入正确的省份和城市"
headers = {
"Accept": "application/json, text/plain, */*",
"Accept-Encoding": "gzip, deflate",
"Accept-Language": "zh-CN,zh;q=0.9",
"Cache-Control": "no-cache",
"Cipher-Text": text,
"Cookie": cookie,
"DNT": "1",
"Host": "index.baidu.com",
"Pragma": "no-cache",
"Proxy-Connection": "keep-alive",
"Referer": "https://index.baidu.com/v2/main/index.html",
"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.90 Safari/537.36",
"X-Requested-With": "XMLHttpRequest",
}
session = requests.Session()
session.headers.update(headers)
params = {
"area": area,
"word": '[[{"name":' + f'"{word}"' + ',"wordType"' + ":1}]]",
"startDate": start_date,
"endDate": end_date,
}
with session.get(
url=f"http://index.baidu.com/api/FeedSearchApi/getFeedIndex",
params=params,
) as response:
data = response.json()["data"]
all_data = data["index"][0]["data"]
uniqid = data["uniqid"]
ptbk = get_ptbk(uniqid, cookie)
result = decrypt(ptbk, all_data).split(",")
result = [int(item) if item != "" else 0 for item in result]
if len(result) == len(
pd.date_range(start=start_date, end=end_date, freq="7D")
):
temp_df_7 = pd.DataFrame(
[
pd.date_range(start=start_date, end=end_date, freq="7D"),
result,
],
index=["date", word],
).T
temp_df_7.index = pd.to_datetime(temp_df_7["date"])
del temp_df_7["date"]
return temp_df_7
else:
temp_df_1 = pd.DataFrame(
[
pd.date_range(start=start_date, end=end_date, freq="1D"),
result,
],
index=["date", word],
).T
temp_df_1.index = pd.to_datetime(temp_df_1["date"])
del temp_df_1["date"]
return temp_df_1
|
https://github.com/akfamily/akshare/blob/087025d8d6f799b30ca114013e82c1ad22dc9294/project25/akshare/index/index_baidu.py#L522-L979
| 25 |
[
0
] | 0.218341 |
[
29,
393,
394,
396,
397,
398,
400,
401,
416,
417,
418,
424,
428,
429,
430,
431,
432,
433,
434,
437,
444,
445,
446,
448,
455,
456,
457
] | 5.895197 | false | 8.181818 | 458 | 8 | 94.104803 | 18 |
def baidu_info_index(
word: str = "python",
start_date: str = "2020-01-01",
end_date: str = "2020-06-01",
province: str = "全国",
city: str = "全国",
cookie: str = None,
text: str = None,
) -> str:
baidu_area_map = {
("广东", "广州", "95"),
("广东", "深圳", "94"),
("广东", "东莞", "133"),
("广东", "云浮", "195"),
("广东", "佛山", "196"),
("广东", "湛江", "197"),
("广东", "江门", "198"),
("广东", "惠州", "199"),
("广东", "珠海", "200"),
("广东", "韶关", "201"),
("广东", "阳江", "202"),
("广东", "茂名", "203"),
("广东", "潮州", "204"),
("广东", "揭阳", "205"),
("广东", "中山", "207"),
("广东", "清远", "208"),
("广东", "肇庆", "209"),
("广东", "河源", "210"),
("广东", "梅州", "211"),
("广东", "汕头", "212"),
("广东", "汕尾", "213"),
("河南", "郑州", "168"),
("河南", "南阳", "262"),
("河南", "新乡", "263"),
("河南", "开封", "264"),
("河南", "焦作", "265"),
("河南", "平顶山", "266"),
("河南", "许昌", "268"),
("河南", "安阳", "370"),
("河南", "驻马店", "371"),
("河南", "信阳", "373"),
("河南", "鹤壁", "374"),
("河南", "周口", "375"),
("河南", "商丘", "376"),
("河南", "洛阳", "378"),
("河南", "漯河", "379"),
("河南", "濮阳", "380"),
("河南", "三门峡", "381"),
("河南", "济源", "667"),
("四川", "成都", "97"),
("四川", "宜宾", "96"),
("四川", "绵阳", "98"),
("四川", "广元", "99"),
("四川", "遂宁", "100"),
("四川", "巴中", "101"),
("四川", "内江", "102"),
("四川", "泸州", "103"),
("四川", "南充", "104"),
("四川", "德阳", "106"),
("四川", "乐山", "107"),
("四川", "广安", "108"),
("四川", "资阳", "109"),
("四川", "自贡", "111"),
("四川", "攀枝花", "112"),
("四川", "达州", "113"),
("四川", "雅安", "114"),
("四川", "眉山", "291"),
("四川", "甘孜", "417"),
("四川", "阿坝", "457"),
("四川", "凉山", "479"),
("江苏", "南京", "125"),
("江苏", "苏州", "126"),
("江苏", "无锡", "127"),
("江苏", "连云港", "156"),
("江苏", "淮安", "157"),
("江苏", "扬州", "158"),
("江苏", "泰州", "159"),
("江苏", "盐城", "160"),
("江苏", "徐州", "161"),
("江苏", "常州", "162"),
("江苏", "南通", "163"),
("江苏", "镇江", "169"),
("江苏", "宿迁", "172"),
("湖北", "武汉", "28"),
("湖北", "黄石", "30"),
("湖北", "荆州", "31"),
("湖北", "襄阳", "32"),
("湖北", "黄冈", "33"),
("湖北", "荆门", "34"),
("湖北", "宜昌", "35"),
("湖北", "十堰", "36"),
("湖北", "随州", "37"),
("湖北", "恩施", "38"),
("湖北", "鄂州", "39"),
("湖北", "咸宁", "40"),
("湖北", "孝感", "41"),
("湖北", "仙桃", "42"),
("湖北", "天门", "73"),
("湖北", "潜江", "74"),
("湖北", "神农架", "687"),
("浙江", "杭州", "138"),
("浙江", "丽水", "134"),
("浙江", "金华", "135"),
("浙江", "温州", "149"),
("浙江", "台州", "287"),
("浙江", "衢州", "288"),
("浙江", "宁波", "289"),
("浙江", "绍兴", "303"),
("浙江", "嘉兴", "304"),
("浙江", "湖州", "305"),
("浙江", "舟山", "306"),
("福建", "福州", "50"),
("福建", "莆田", "51"),
("福建", "三明", "52"),
("福建", "龙岩", "53"),
("福建", "厦门", "54"),
("福建", "泉州", "55"),
("福建", "漳州", "56"),
("福建", "宁德", "87"),
("福建", "南平", "253"),
("黑龙江", "哈尔滨", "152"),
("黑龙江", "大庆", "153"),
("黑龙江", "伊春", "295"),
("黑龙江", "大兴安岭", "297"),
("黑龙江", "黑河", "300"),
("黑龙江", "鹤岗", "301"),
("黑龙江", "七台河", "302"),
("黑龙江", "齐齐哈尔", "319"),
("黑龙江", "佳木斯", "320"),
("黑龙江", "牡丹江", "322"),
("黑龙江", "鸡西", "323"),
("黑龙江", "绥化", "324"),
("黑龙江", "双鸭山", "359"),
("山东", "济南", "1"),
("山东", "滨州", "76"),
("山东", "青岛", "77"),
("山东", "烟台", "78"),
("山东", "临沂", "79"),
("山东", "潍坊", "80"),
("山东", "淄博", "81"),
("山东", "东营", "82"),
("山东", "聊城", "83"),
("山东", "菏泽", "84"),
("山东", "枣庄", "85"),
("山东", "德州", "86"),
("山东", "威海", "88"),
("山东", "济宁", "352"),
("山东", "泰安", "353"),
("山东", "莱芜", "356"),
("山东", "日照", "366"),
("陕西", "西安", "165"),
("陕西", "铜川", "271"),
("陕西", "安康", "272"),
("陕西", "宝鸡", "273"),
("陕西", "商洛", "274"),
("陕西", "渭南", "275"),
("陕西", "汉中", "276"),
("陕西", "咸阳", "277"),
("陕西", "榆林", "278"),
("陕西", "延安", "401"),
("河北", "石家庄", "141"),
("河北", "衡水", "143"),
("河北", "张家口", "144"),
("河北", "承德", "145"),
("河北", "秦皇岛", "146"),
("河北", "廊坊", "147"),
("河北", "沧州", "148"),
("河北", "保定", "259"),
("河北", "唐山", "261"),
("河北", "邯郸", "292"),
("河北", "邢台", "293"),
("辽宁", "沈阳", "150"),
("辽宁", "大连", "29"),
("辽宁", "盘锦", "151"),
("辽宁", "鞍山", "215"),
("辽宁", "朝阳", "216"),
("辽宁", "锦州", "217"),
("辽宁", "铁岭", "218"),
("辽宁", "丹东", "219"),
("辽宁", "本溪", "220"),
("辽宁", "营口", "221"),
("辽宁", "抚顺", "222"),
("辽宁", "阜新", "223"),
("辽宁", "辽阳", "224"),
("辽宁", "葫芦岛", "225"),
("吉林", "长春", "154"),
("吉林", "四平", "155"),
("吉林", "辽源", "191"),
("吉林", "松原", "194"),
("吉林", "吉林", "270"),
("吉林", "通化", "407"),
("吉林", "白山", "408"),
("吉林", "白城", "410"),
("吉林", "延边", "525"),
("云南", "昆明", "117"),
("云南", "玉溪", "123"),
("云南", "楚雄", "124"),
("云南", "大理", "334"),
("云南", "昭通", "335"),
("云南", "红河", "337"),
("云南", "曲靖", "339"),
("云南", "丽江", "342"),
("云南", "临沧", "350"),
("云南", "文山", "437"),
("云南", "保山", "438"),
("云南", "普洱", "666"),
("云南", "西双版纳", "668"),
("云南", "德宏", "669"),
("云南", "怒江", "671"),
("云南", "迪庆", "672"),
("新疆", "乌鲁木齐", "467"),
("新疆", "石河子", "280"),
("新疆", "吐鲁番", "310"),
("新疆", "昌吉", "311"),
("新疆", "哈密", "312"),
("新疆", "阿克苏", "315"),
("新疆", "克拉玛依", "317"),
("新疆", "博尔塔拉", "318"),
("新疆", "阿勒泰", "383"),
("新疆", "喀什", "384"),
("新疆", "和田", "386"),
("新疆", "巴音郭楞", "499"),
("新疆", "伊犁", "520"),
("新疆", "塔城", "563"),
("新疆", "克孜勒苏柯尔克孜", "653"),
("新疆", "五家渠", "661"),
("新疆", "阿拉尔", "692"),
("新疆", "图木舒克", "693"),
("广西", "南宁", "90"),
("广西", "柳州", "89"),
("广西", "桂林", "91"),
("广西", "贺州", "92"),
("广西", "贵港", "93"),
("广西", "玉林", "118"),
("广西", "河池", "119"),
("广西", "北海", "128"),
("广西", "钦州", "129"),
("广西", "防城港", "130"),
("广西", "百色", "131"),
("广西", "梧州", "132"),
("广西", "来宾", "506"),
("广西", "崇左", "665"),
("山西", "太原", "231"),
("山西", "大同", "227"),
("山西", "长治", "228"),
("山西", "忻州", "229"),
("山西", "晋中", "230"),
("山西", "临汾", "232"),
("山西", "运城", "233"),
("山西", "晋城", "234"),
("山西", "朔州", "235"),
("山西", "阳泉", "236"),
("山西", "吕梁", "237"),
("湖南", "长沙", "43"),
("湖南", "岳阳", "44"),
("湖南", "衡阳", "45"),
("湖南", "株洲", "46"),
("湖南", "湘潭", "47"),
("湖南", "益阳", "48"),
("湖南", "郴州", "49"),
("湖南", "湘西", "65"),
("湖南", "娄底", "66"),
("湖南", "怀化", "67"),
("湖南", "常德", "68"),
("湖南", "张家界", "226"),
("湖南", "永州", "269"),
("湖南", "邵阳", "405"),
("江西", "南昌", "5"),
("江西", "九江", "6"),
("江西", "鹰潭", "7"),
("江西", "抚州", "8"),
("江西", "上饶", "9"),
("江西", "赣州", "10"),
("江西", "吉安", "115"),
("江西", "萍乡", "136"),
("江西", "景德镇", "137"),
("江西", "新余", "246"),
("江西", "宜春", "256"),
("安徽", "合肥", "189"),
("安徽", "铜陵", "173"),
("安徽", "黄山", "174"),
("安徽", "池州", "175"),
("安徽", "宣城", "176"),
("安徽", "巢湖", "177"),
("安徽", "淮南", "178"),
("安徽", "宿州", "179"),
("安徽", "六安", "181"),
("安徽", "滁州", "182"),
("安徽", "淮北", "183"),
("安徽", "阜阳", "184"),
("安徽", "马鞍山", "185"),
("安徽", "安庆", "186"),
("安徽", "蚌埠", "187"),
("安徽", "芜湖", "188"),
("安徽", "亳州", "391"),
("内蒙古", "呼和浩特", "20"),
("内蒙古", "包头", "13"),
("内蒙古", "鄂尔多斯", "14"),
("内蒙古", "巴彦淖尔", "15"),
("内蒙古", "乌海", "16"),
("内蒙古", "阿拉善盟", "17"),
("内蒙古", "锡林郭勒盟", "19"),
("内蒙古", "赤峰", "21"),
("内蒙古", "通辽", "22"),
("内蒙古", "呼伦贝尔", "25"),
("内蒙古", "乌兰察布", "331"),
("内蒙古", "兴安盟", "333"),
("甘肃", "兰州", "166"),
("甘肃", "庆阳", "281"),
("甘肃", "定西", "282"),
("甘肃", "武威", "283"),
("甘肃", "酒泉", "284"),
("甘肃", "张掖", "285"),
("甘肃", "嘉峪关", "286"),
("甘肃", "平凉", "307"),
("甘肃", "天水", "308"),
("甘肃", "白银", "309"),
("甘肃", "金昌", "343"),
("甘肃", "陇南", "344"),
("甘肃", "临夏", "346"),
("甘肃", "甘南", "673"),
("海南", "海口", "239"),
("海南", "万宁", "241"),
("海南", "琼海", "242"),
("海南", "三亚", "243"),
("海南", "儋州", "244"),
("海南", "东方", "456"),
("海南", "五指山", "582"),
("海南", "文昌", "670"),
("海南", "陵水", "674"),
("海南", "澄迈", "675"),
("海南", "乐东", "679"),
("海南", "临高", "680"),
("海南", "定安", "681"),
("海南", "昌江", "683"),
("海南", "屯昌", "684"),
("海南", "保亭", "686"),
("海南", "白沙", "689"),
("海南", "琼中", "690"),
("贵州", "贵阳", "2"),
("贵州", "黔南", "3"),
("贵州", "六盘水", "4"),
("贵州", "遵义", "59"),
("贵州", "黔东南", "61"),
("贵州", "铜仁", "422"),
("贵州", "安顺", "424"),
("贵州", "毕节", "426"),
("贵州", "黔西南", "588"),
("宁夏", "银川", "140"),
("宁夏", "吴忠", "395"),
("宁夏", "固原", "396"),
("宁夏", "石嘴山", "472"),
("宁夏", "中卫", "480"),
("青海", "西宁", "139"),
("青海", "海西", "608"),
("青海", "海东", "652"),
("青海", "玉树", "659"),
("青海", "海南", "676"),
("青海", "海北", "682"),
("青海", "黄南", "685"),
("青海", "果洛", "688"),
("西藏", "拉萨", "466"),
("西藏", "日喀则", "516"),
("西藏", "那曲", "655"),
("西藏", "林芝", "656"),
("西藏", "山南", "677"),
("西藏", "昌都", "678"),
("西藏", "阿里", "691"),
("北京", "北京", "911"),
("上海", "上海", "910"),
("重庆", "重庆", "904"),
("天津", "天津", "923")
}
if province == "全国":
area = "0"
else:
result_list = [item for item in baidu_area_map if item[0] == province and item[1] == city]
if result_list:
area = result_list[0][2]
else:
raise "请按照百度指数的要求输入正确的省份和城市"
headers = {
"Accept": "application/json, text/plain, */*",
"Accept-Encoding": "gzip, deflate",
"Accept-Language": "zh-CN,zh;q=0.9",
"Cache-Control": "no-cache",
"Cipher-Text": text,
"Cookie": cookie,
"DNT": "1",
"Host": "index.baidu.com",
"Pragma": "no-cache",
"Proxy-Connection": "keep-alive",
"Referer": "https://index.baidu.com/v2/main/index.html",
"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.90 Safari/537.36",
"X-Requested-With": "XMLHttpRequest",
}
session = requests.Session()
session.headers.update(headers)
params = {
"area": area,
"word": '[[{"name":' + f'"{word}"' + ',"wordType"' + ":1}]]",
"startDate": start_date,
"endDate": end_date,
}
with session.get(
url=f"http://index.baidu.com/api/FeedSearchApi/getFeedIndex",
params=params,
) as response:
data = response.json()["data"]
all_data = data["index"][0]["data"]
uniqid = data["uniqid"]
ptbk = get_ptbk(uniqid, cookie)
result = decrypt(ptbk, all_data).split(",")
result = [int(item) if item != "" else 0 for item in result]
if len(result) == len(
pd.date_range(start=start_date, end=end_date, freq="7D")
):
temp_df_7 = pd.DataFrame(
[
pd.date_range(start=start_date, end=end_date, freq="7D"),
result,
],
index=["date", word],
).T
temp_df_7.index = pd.to_datetime(temp_df_7["date"])
del temp_df_7["date"]
return temp_df_7
else:
temp_df_1 = pd.DataFrame(
[
pd.date_range(start=start_date, end=end_date, freq="1D"),
result,
],
index=["date", word],
).T
temp_df_1.index = pd.to_datetime(temp_df_1["date"])
del temp_df_1["date"]
return temp_df_1
| 18,255 |
|
akfamily/akshare
|
087025d8d6f799b30ca114013e82c1ad22dc9294
|
akshare/index/index_baidu.py
|
baidu_media_index
|
(
word: str = "python",
start_date: str = "2020-01-01",
end_date: str = "2020-04-20",
province: str = "全国",
city: str = "全国",
cookie: str = None,
text: str = None,
)
|
百度-媒体指数
https://index.baidu.com/v2/index.html
:param word: 需要搜索的词语
:type word: str
:param start_date: 开始时间;注意开始时间和结束时间不要超过一年
:type start_date: str
:param end_date: 结束时间;注意开始时间和结束时间不要超过一年
:type end_date: str
:param province: 省份, 默认为 `全国`; 请按照百度指数提供的名称进行输入, 比如:`四川`
:type province: str
:param city: 城市, 默认为 `全国`; 请按照百度指数提供的名称进行输入, 比如:`成都`
:type city: str
:param cookie: 输入 cookie
:type cookie: str
:param text: 输入 text
:type text: str
:return: 媒体指数
:rtype: pandas.Series
|
百度-媒体指数
https://index.baidu.com/v2/index.html
:param word: 需要搜索的词语
:type word: str
:param start_date: 开始时间;注意开始时间和结束时间不要超过一年
:type start_date: str
:param end_date: 结束时间;注意开始时间和结束时间不要超过一年
:type end_date: str
:param province: 省份, 默认为 `全国`; 请按照百度指数提供的名称进行输入, 比如:`四川`
:type province: str
:param city: 城市, 默认为 `全国`; 请按照百度指数提供的名称进行输入, 比如:`成都`
:type city: str
:param cookie: 输入 cookie
:type cookie: str
:param text: 输入 text
:type text: str
:return: 媒体指数
:rtype: pandas.Series
| 982 | 1,439 |
def baidu_media_index(
word: str = "python",
start_date: str = "2020-01-01",
end_date: str = "2020-04-20",
province: str = "全国",
city: str = "全国",
cookie: str = None,
text: str = None,
) -> str:
"""
百度-媒体指数
https://index.baidu.com/v2/index.html
:param word: 需要搜索的词语
:type word: str
:param start_date: 开始时间;注意开始时间和结束时间不要超过一年
:type start_date: str
:param end_date: 结束时间;注意开始时间和结束时间不要超过一年
:type end_date: str
:param province: 省份, 默认为 `全国`; 请按照百度指数提供的名称进行输入, 比如:`四川`
:type province: str
:param city: 城市, 默认为 `全国`; 请按照百度指数提供的名称进行输入, 比如:`成都`
:type city: str
:param cookie: 输入 cookie
:type cookie: str
:param text: 输入 text
:type text: str
:return: 媒体指数
:rtype: pandas.Series
"""
baidu_area_map = {
("广东", "广州", "95"),
("广东", "深圳", "94"),
("广东", "东莞", "133"),
("广东", "云浮", "195"),
("广东", "佛山", "196"),
("广东", "湛江", "197"),
("广东", "江门", "198"),
("广东", "惠州", "199"),
("广东", "珠海", "200"),
("广东", "韶关", "201"),
("广东", "阳江", "202"),
("广东", "茂名", "203"),
("广东", "潮州", "204"),
("广东", "揭阳", "205"),
("广东", "中山", "207"),
("广东", "清远", "208"),
("广东", "肇庆", "209"),
("广东", "河源", "210"),
("广东", "梅州", "211"),
("广东", "汕头", "212"),
("广东", "汕尾", "213"),
("河南", "郑州", "168"),
("河南", "南阳", "262"),
("河南", "新乡", "263"),
("河南", "开封", "264"),
("河南", "焦作", "265"),
("河南", "平顶山", "266"),
("河南", "许昌", "268"),
("河南", "安阳", "370"),
("河南", "驻马店", "371"),
("河南", "信阳", "373"),
("河南", "鹤壁", "374"),
("河南", "周口", "375"),
("河南", "商丘", "376"),
("河南", "洛阳", "378"),
("河南", "漯河", "379"),
("河南", "濮阳", "380"),
("河南", "三门峡", "381"),
("河南", "济源", "667"),
("四川", "成都", "97"),
("四川", "宜宾", "96"),
("四川", "绵阳", "98"),
("四川", "广元", "99"),
("四川", "遂宁", "100"),
("四川", "巴中", "101"),
("四川", "内江", "102"),
("四川", "泸州", "103"),
("四川", "南充", "104"),
("四川", "德阳", "106"),
("四川", "乐山", "107"),
("四川", "广安", "108"),
("四川", "资阳", "109"),
("四川", "自贡", "111"),
("四川", "攀枝花", "112"),
("四川", "达州", "113"),
("四川", "雅安", "114"),
("四川", "眉山", "291"),
("四川", "甘孜", "417"),
("四川", "阿坝", "457"),
("四川", "凉山", "479"),
("江苏", "南京", "125"),
("江苏", "苏州", "126"),
("江苏", "无锡", "127"),
("江苏", "连云港", "156"),
("江苏", "淮安", "157"),
("江苏", "扬州", "158"),
("江苏", "泰州", "159"),
("江苏", "盐城", "160"),
("江苏", "徐州", "161"),
("江苏", "常州", "162"),
("江苏", "南通", "163"),
("江苏", "镇江", "169"),
("江苏", "宿迁", "172"),
("湖北", "武汉", "28"),
("湖北", "黄石", "30"),
("湖北", "荆州", "31"),
("湖北", "襄阳", "32"),
("湖北", "黄冈", "33"),
("湖北", "荆门", "34"),
("湖北", "宜昌", "35"),
("湖北", "十堰", "36"),
("湖北", "随州", "37"),
("湖北", "恩施", "38"),
("湖北", "鄂州", "39"),
("湖北", "咸宁", "40"),
("湖北", "孝感", "41"),
("湖北", "仙桃", "42"),
("湖北", "天门", "73"),
("湖北", "潜江", "74"),
("湖北", "神农架", "687"),
("浙江", "杭州", "138"),
("浙江", "丽水", "134"),
("浙江", "金华", "135"),
("浙江", "温州", "149"),
("浙江", "台州", "287"),
("浙江", "衢州", "288"),
("浙江", "宁波", "289"),
("浙江", "绍兴", "303"),
("浙江", "嘉兴", "304"),
("浙江", "湖州", "305"),
("浙江", "舟山", "306"),
("福建", "福州", "50"),
("福建", "莆田", "51"),
("福建", "三明", "52"),
("福建", "龙岩", "53"),
("福建", "厦门", "54"),
("福建", "泉州", "55"),
("福建", "漳州", "56"),
("福建", "宁德", "87"),
("福建", "南平", "253"),
("黑龙江", "哈尔滨", "152"),
("黑龙江", "大庆", "153"),
("黑龙江", "伊春", "295"),
("黑龙江", "大兴安岭", "297"),
("黑龙江", "黑河", "300"),
("黑龙江", "鹤岗", "301"),
("黑龙江", "七台河", "302"),
("黑龙江", "齐齐哈尔", "319"),
("黑龙江", "佳木斯", "320"),
("黑龙江", "牡丹江", "322"),
("黑龙江", "鸡西", "323"),
("黑龙江", "绥化", "324"),
("黑龙江", "双鸭山", "359"),
("山东", "济南", "1"),
("山东", "滨州", "76"),
("山东", "青岛", "77"),
("山东", "烟台", "78"),
("山东", "临沂", "79"),
("山东", "潍坊", "80"),
("山东", "淄博", "81"),
("山东", "东营", "82"),
("山东", "聊城", "83"),
("山东", "菏泽", "84"),
("山东", "枣庄", "85"),
("山东", "德州", "86"),
("山东", "威海", "88"),
("山东", "济宁", "352"),
("山东", "泰安", "353"),
("山东", "莱芜", "356"),
("山东", "日照", "366"),
("陕西", "西安", "165"),
("陕西", "铜川", "271"),
("陕西", "安康", "272"),
("陕西", "宝鸡", "273"),
("陕西", "商洛", "274"),
("陕西", "渭南", "275"),
("陕西", "汉中", "276"),
("陕西", "咸阳", "277"),
("陕西", "榆林", "278"),
("陕西", "延安", "401"),
("河北", "石家庄", "141"),
("河北", "衡水", "143"),
("河北", "张家口", "144"),
("河北", "承德", "145"),
("河北", "秦皇岛", "146"),
("河北", "廊坊", "147"),
("河北", "沧州", "148"),
("河北", "保定", "259"),
("河北", "唐山", "261"),
("河北", "邯郸", "292"),
("河北", "邢台", "293"),
("辽宁", "沈阳", "150"),
("辽宁", "大连", "29"),
("辽宁", "盘锦", "151"),
("辽宁", "鞍山", "215"),
("辽宁", "朝阳", "216"),
("辽宁", "锦州", "217"),
("辽宁", "铁岭", "218"),
("辽宁", "丹东", "219"),
("辽宁", "本溪", "220"),
("辽宁", "营口", "221"),
("辽宁", "抚顺", "222"),
("辽宁", "阜新", "223"),
("辽宁", "辽阳", "224"),
("辽宁", "葫芦岛", "225"),
("吉林", "长春", "154"),
("吉林", "四平", "155"),
("吉林", "辽源", "191"),
("吉林", "松原", "194"),
("吉林", "吉林", "270"),
("吉林", "通化", "407"),
("吉林", "白山", "408"),
("吉林", "白城", "410"),
("吉林", "延边", "525"),
("云南", "昆明", "117"),
("云南", "玉溪", "123"),
("云南", "楚雄", "124"),
("云南", "大理", "334"),
("云南", "昭通", "335"),
("云南", "红河", "337"),
("云南", "曲靖", "339"),
("云南", "丽江", "342"),
("云南", "临沧", "350"),
("云南", "文山", "437"),
("云南", "保山", "438"),
("云南", "普洱", "666"),
("云南", "西双版纳", "668"),
("云南", "德宏", "669"),
("云南", "怒江", "671"),
("云南", "迪庆", "672"),
("新疆", "乌鲁木齐", "467"),
("新疆", "石河子", "280"),
("新疆", "吐鲁番", "310"),
("新疆", "昌吉", "311"),
("新疆", "哈密", "312"),
("新疆", "阿克苏", "315"),
("新疆", "克拉玛依", "317"),
("新疆", "博尔塔拉", "318"),
("新疆", "阿勒泰", "383"),
("新疆", "喀什", "384"),
("新疆", "和田", "386"),
("新疆", "巴音郭楞", "499"),
("新疆", "伊犁", "520"),
("新疆", "塔城", "563"),
("新疆", "克孜勒苏柯尔克孜", "653"),
("新疆", "五家渠", "661"),
("新疆", "阿拉尔", "692"),
("新疆", "图木舒克", "693"),
("广西", "南宁", "90"),
("广西", "柳州", "89"),
("广西", "桂林", "91"),
("广西", "贺州", "92"),
("广西", "贵港", "93"),
("广西", "玉林", "118"),
("广西", "河池", "119"),
("广西", "北海", "128"),
("广西", "钦州", "129"),
("广西", "防城港", "130"),
("广西", "百色", "131"),
("广西", "梧州", "132"),
("广西", "来宾", "506"),
("广西", "崇左", "665"),
("山西", "太原", "231"),
("山西", "大同", "227"),
("山西", "长治", "228"),
("山西", "忻州", "229"),
("山西", "晋中", "230"),
("山西", "临汾", "232"),
("山西", "运城", "233"),
("山西", "晋城", "234"),
("山西", "朔州", "235"),
("山西", "阳泉", "236"),
("山西", "吕梁", "237"),
("湖南", "长沙", "43"),
("湖南", "岳阳", "44"),
("湖南", "衡阳", "45"),
("湖南", "株洲", "46"),
("湖南", "湘潭", "47"),
("湖南", "益阳", "48"),
("湖南", "郴州", "49"),
("湖南", "湘西", "65"),
("湖南", "娄底", "66"),
("湖南", "怀化", "67"),
("湖南", "常德", "68"),
("湖南", "张家界", "226"),
("湖南", "永州", "269"),
("湖南", "邵阳", "405"),
("江西", "南昌", "5"),
("江西", "九江", "6"),
("江西", "鹰潭", "7"),
("江西", "抚州", "8"),
("江西", "上饶", "9"),
("江西", "赣州", "10"),
("江西", "吉安", "115"),
("江西", "萍乡", "136"),
("江西", "景德镇", "137"),
("江西", "新余", "246"),
("江西", "宜春", "256"),
("安徽", "合肥", "189"),
("安徽", "铜陵", "173"),
("安徽", "黄山", "174"),
("安徽", "池州", "175"),
("安徽", "宣城", "176"),
("安徽", "巢湖", "177"),
("安徽", "淮南", "178"),
("安徽", "宿州", "179"),
("安徽", "六安", "181"),
("安徽", "滁州", "182"),
("安徽", "淮北", "183"),
("安徽", "阜阳", "184"),
("安徽", "马鞍山", "185"),
("安徽", "安庆", "186"),
("安徽", "蚌埠", "187"),
("安徽", "芜湖", "188"),
("安徽", "亳州", "391"),
("内蒙古", "呼和浩特", "20"),
("内蒙古", "包头", "13"),
("内蒙古", "鄂尔多斯", "14"),
("内蒙古", "巴彦淖尔", "15"),
("内蒙古", "乌海", "16"),
("内蒙古", "阿拉善盟", "17"),
("内蒙古", "锡林郭勒盟", "19"),
("内蒙古", "赤峰", "21"),
("内蒙古", "通辽", "22"),
("内蒙古", "呼伦贝尔", "25"),
("内蒙古", "乌兰察布", "331"),
("内蒙古", "兴安盟", "333"),
("甘肃", "兰州", "166"),
("甘肃", "庆阳", "281"),
("甘肃", "定西", "282"),
("甘肃", "武威", "283"),
("甘肃", "酒泉", "284"),
("甘肃", "张掖", "285"),
("甘肃", "嘉峪关", "286"),
("甘肃", "平凉", "307"),
("甘肃", "天水", "308"),
("甘肃", "白银", "309"),
("甘肃", "金昌", "343"),
("甘肃", "陇南", "344"),
("甘肃", "临夏", "346"),
("甘肃", "甘南", "673"),
("海南", "海口", "239"),
("海南", "万宁", "241"),
("海南", "琼海", "242"),
("海南", "三亚", "243"),
("海南", "儋州", "244"),
("海南", "东方", "456"),
("海南", "五指山", "582"),
("海南", "文昌", "670"),
("海南", "陵水", "674"),
("海南", "澄迈", "675"),
("海南", "乐东", "679"),
("海南", "临高", "680"),
("海南", "定安", "681"),
("海南", "昌江", "683"),
("海南", "屯昌", "684"),
("海南", "保亭", "686"),
("海南", "白沙", "689"),
("海南", "琼中", "690"),
("贵州", "贵阳", "2"),
("贵州", "黔南", "3"),
("贵州", "六盘水", "4"),
("贵州", "遵义", "59"),
("贵州", "黔东南", "61"),
("贵州", "铜仁", "422"),
("贵州", "安顺", "424"),
("贵州", "毕节", "426"),
("贵州", "黔西南", "588"),
("宁夏", "银川", "140"),
("宁夏", "吴忠", "395"),
("宁夏", "固原", "396"),
("宁夏", "石嘴山", "472"),
("宁夏", "中卫", "480"),
("青海", "西宁", "139"),
("青海", "海西", "608"),
("青海", "海东", "652"),
("青海", "玉树", "659"),
("青海", "海南", "676"),
("青海", "海北", "682"),
("青海", "黄南", "685"),
("青海", "果洛", "688"),
("西藏", "拉萨", "466"),
("西藏", "日喀则", "516"),
("西藏", "那曲", "655"),
("西藏", "林芝", "656"),
("西藏", "山南", "677"),
("西藏", "昌都", "678"),
("西藏", "阿里", "691"),
("北京", "北京", "911"),
("上海", "上海", "910"),
("重庆", "重庆", "904"),
("天津", "天津", "923")
}
if province == "全国":
area = "0"
else:
result_list = [item for item in baidu_area_map if item[0] == province and item[1] == city]
if result_list:
area = result_list[0][2]
else:
raise "请按照百度指数的要求输入正确的省份和城市"
headers = {
"Accept": "application/json, text/plain, */*",
"Accept-Encoding": "gzip, deflate",
"Accept-Language": "zh-CN,zh;q=0.9",
"Cache-Control": "no-cache",
"Cipher-Text": text,
"Cookie": cookie,
"DNT": "1",
"Host": "index.baidu.com",
"Pragma": "no-cache",
"Proxy-Connection": "keep-alive",
"Referer": "https://index.baidu.com/v2/main/index.html",
"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.90 Safari/537.36",
"X-Requested-With": "XMLHttpRequest",
}
session = requests.Session()
session.headers.update(headers)
params = {
"area": area,
"word": '[[{"name":' + f'"{word}"' + ',"wordType"' + ":1}]]",
"startDate": start_date,
"endDate": end_date,
}
with session.get(
url=f"http://index.baidu.com/api/NewsApi/getNewsIndex", params=params
) as response:
data = response.json()["data"]
all_data = data["index"][0]["data"]
uniqid = data["uniqid"]
ptbk = get_ptbk(uniqid, cookie)
result = decrypt(ptbk, all_data).split(",")
result = ["0" if item == "" else item for item in result]
result = [int(item) for item in result]
if len(result) == len(
pd.date_range(start=start_date, end=end_date, freq="7D")
):
temp_df_7 = pd.DataFrame(
[
pd.date_range(start=start_date, end=end_date, freq="7D"),
result,
],
index=["date", word],
).T
temp_df_7.index = pd.to_datetime(temp_df_7["date"])
del temp_df_7["date"]
return temp_df_7
else:
temp_df_1 = pd.DataFrame(
[
pd.date_range(start=start_date, end=end_date, freq="1D"),
result,
],
index=["date", word],
).T
temp_df_1.index = pd.to_datetime(temp_df_1["date"])
del temp_df_1["date"]
return temp_df_1
|
https://github.com/akfamily/akshare/blob/087025d8d6f799b30ca114013e82c1ad22dc9294/project25/akshare/index/index_baidu.py#L982-L1439
| 25 |
[
0
] | 0.218341 |
[
29,
393,
394,
396,
397,
398,
400,
401,
416,
417,
418,
424,
427,
428,
429,
430,
431,
432,
433,
434,
437,
444,
445,
446,
448,
455,
456,
457
] | 6.113537 | false | 8.181818 | 458 | 9 | 93.886463 | 18 |
def baidu_media_index(
word: str = "python",
start_date: str = "2020-01-01",
end_date: str = "2020-04-20",
province: str = "全国",
city: str = "全国",
cookie: str = None,
text: str = None,
) -> str:
baidu_area_map = {
("广东", "广州", "95"),
("广东", "深圳", "94"),
("广东", "东莞", "133"),
("广东", "云浮", "195"),
("广东", "佛山", "196"),
("广东", "湛江", "197"),
("广东", "江门", "198"),
("广东", "惠州", "199"),
("广东", "珠海", "200"),
("广东", "韶关", "201"),
("广东", "阳江", "202"),
("广东", "茂名", "203"),
("广东", "潮州", "204"),
("广东", "揭阳", "205"),
("广东", "中山", "207"),
("广东", "清远", "208"),
("广东", "肇庆", "209"),
("广东", "河源", "210"),
("广东", "梅州", "211"),
("广东", "汕头", "212"),
("广东", "汕尾", "213"),
("河南", "郑州", "168"),
("河南", "南阳", "262"),
("河南", "新乡", "263"),
("河南", "开封", "264"),
("河南", "焦作", "265"),
("河南", "平顶山", "266"),
("河南", "许昌", "268"),
("河南", "安阳", "370"),
("河南", "驻马店", "371"),
("河南", "信阳", "373"),
("河南", "鹤壁", "374"),
("河南", "周口", "375"),
("河南", "商丘", "376"),
("河南", "洛阳", "378"),
("河南", "漯河", "379"),
("河南", "濮阳", "380"),
("河南", "三门峡", "381"),
("河南", "济源", "667"),
("四川", "成都", "97"),
("四川", "宜宾", "96"),
("四川", "绵阳", "98"),
("四川", "广元", "99"),
("四川", "遂宁", "100"),
("四川", "巴中", "101"),
("四川", "内江", "102"),
("四川", "泸州", "103"),
("四川", "南充", "104"),
("四川", "德阳", "106"),
("四川", "乐山", "107"),
("四川", "广安", "108"),
("四川", "资阳", "109"),
("四川", "自贡", "111"),
("四川", "攀枝花", "112"),
("四川", "达州", "113"),
("四川", "雅安", "114"),
("四川", "眉山", "291"),
("四川", "甘孜", "417"),
("四川", "阿坝", "457"),
("四川", "凉山", "479"),
("江苏", "南京", "125"),
("江苏", "苏州", "126"),
("江苏", "无锡", "127"),
("江苏", "连云港", "156"),
("江苏", "淮安", "157"),
("江苏", "扬州", "158"),
("江苏", "泰州", "159"),
("江苏", "盐城", "160"),
("江苏", "徐州", "161"),
("江苏", "常州", "162"),
("江苏", "南通", "163"),
("江苏", "镇江", "169"),
("江苏", "宿迁", "172"),
("湖北", "武汉", "28"),
("湖北", "黄石", "30"),
("湖北", "荆州", "31"),
("湖北", "襄阳", "32"),
("湖北", "黄冈", "33"),
("湖北", "荆门", "34"),
("湖北", "宜昌", "35"),
("湖北", "十堰", "36"),
("湖北", "随州", "37"),
("湖北", "恩施", "38"),
("湖北", "鄂州", "39"),
("湖北", "咸宁", "40"),
("湖北", "孝感", "41"),
("湖北", "仙桃", "42"),
("湖北", "天门", "73"),
("湖北", "潜江", "74"),
("湖北", "神农架", "687"),
("浙江", "杭州", "138"),
("浙江", "丽水", "134"),
("浙江", "金华", "135"),
("浙江", "温州", "149"),
("浙江", "台州", "287"),
("浙江", "衢州", "288"),
("浙江", "宁波", "289"),
("浙江", "绍兴", "303"),
("浙江", "嘉兴", "304"),
("浙江", "湖州", "305"),
("浙江", "舟山", "306"),
("福建", "福州", "50"),
("福建", "莆田", "51"),
("福建", "三明", "52"),
("福建", "龙岩", "53"),
("福建", "厦门", "54"),
("福建", "泉州", "55"),
("福建", "漳州", "56"),
("福建", "宁德", "87"),
("福建", "南平", "253"),
("黑龙江", "哈尔滨", "152"),
("黑龙江", "大庆", "153"),
("黑龙江", "伊春", "295"),
("黑龙江", "大兴安岭", "297"),
("黑龙江", "黑河", "300"),
("黑龙江", "鹤岗", "301"),
("黑龙江", "七台河", "302"),
("黑龙江", "齐齐哈尔", "319"),
("黑龙江", "佳木斯", "320"),
("黑龙江", "牡丹江", "322"),
("黑龙江", "鸡西", "323"),
("黑龙江", "绥化", "324"),
("黑龙江", "双鸭山", "359"),
("山东", "济南", "1"),
("山东", "滨州", "76"),
("山东", "青岛", "77"),
("山东", "烟台", "78"),
("山东", "临沂", "79"),
("山东", "潍坊", "80"),
("山东", "淄博", "81"),
("山东", "东营", "82"),
("山东", "聊城", "83"),
("山东", "菏泽", "84"),
("山东", "枣庄", "85"),
("山东", "德州", "86"),
("山东", "威海", "88"),
("山东", "济宁", "352"),
("山东", "泰安", "353"),
("山东", "莱芜", "356"),
("山东", "日照", "366"),
("陕西", "西安", "165"),
("陕西", "铜川", "271"),
("陕西", "安康", "272"),
("陕西", "宝鸡", "273"),
("陕西", "商洛", "274"),
("陕西", "渭南", "275"),
("陕西", "汉中", "276"),
("陕西", "咸阳", "277"),
("陕西", "榆林", "278"),
("陕西", "延安", "401"),
("河北", "石家庄", "141"),
("河北", "衡水", "143"),
("河北", "张家口", "144"),
("河北", "承德", "145"),
("河北", "秦皇岛", "146"),
("河北", "廊坊", "147"),
("河北", "沧州", "148"),
("河北", "保定", "259"),
("河北", "唐山", "261"),
("河北", "邯郸", "292"),
("河北", "邢台", "293"),
("辽宁", "沈阳", "150"),
("辽宁", "大连", "29"),
("辽宁", "盘锦", "151"),
("辽宁", "鞍山", "215"),
("辽宁", "朝阳", "216"),
("辽宁", "锦州", "217"),
("辽宁", "铁岭", "218"),
("辽宁", "丹东", "219"),
("辽宁", "本溪", "220"),
("辽宁", "营口", "221"),
("辽宁", "抚顺", "222"),
("辽宁", "阜新", "223"),
("辽宁", "辽阳", "224"),
("辽宁", "葫芦岛", "225"),
("吉林", "长春", "154"),
("吉林", "四平", "155"),
("吉林", "辽源", "191"),
("吉林", "松原", "194"),
("吉林", "吉林", "270"),
("吉林", "通化", "407"),
("吉林", "白山", "408"),
("吉林", "白城", "410"),
("吉林", "延边", "525"),
("云南", "昆明", "117"),
("云南", "玉溪", "123"),
("云南", "楚雄", "124"),
("云南", "大理", "334"),
("云南", "昭通", "335"),
("云南", "红河", "337"),
("云南", "曲靖", "339"),
("云南", "丽江", "342"),
("云南", "临沧", "350"),
("云南", "文山", "437"),
("云南", "保山", "438"),
("云南", "普洱", "666"),
("云南", "西双版纳", "668"),
("云南", "德宏", "669"),
("云南", "怒江", "671"),
("云南", "迪庆", "672"),
("新疆", "乌鲁木齐", "467"),
("新疆", "石河子", "280"),
("新疆", "吐鲁番", "310"),
("新疆", "昌吉", "311"),
("新疆", "哈密", "312"),
("新疆", "阿克苏", "315"),
("新疆", "克拉玛依", "317"),
("新疆", "博尔塔拉", "318"),
("新疆", "阿勒泰", "383"),
("新疆", "喀什", "384"),
("新疆", "和田", "386"),
("新疆", "巴音郭楞", "499"),
("新疆", "伊犁", "520"),
("新疆", "塔城", "563"),
("新疆", "克孜勒苏柯尔克孜", "653"),
("新疆", "五家渠", "661"),
("新疆", "阿拉尔", "692"),
("新疆", "图木舒克", "693"),
("广西", "南宁", "90"),
("广西", "柳州", "89"),
("广西", "桂林", "91"),
("广西", "贺州", "92"),
("广西", "贵港", "93"),
("广西", "玉林", "118"),
("广西", "河池", "119"),
("广西", "北海", "128"),
("广西", "钦州", "129"),
("广西", "防城港", "130"),
("广西", "百色", "131"),
("广西", "梧州", "132"),
("广西", "来宾", "506"),
("广西", "崇左", "665"),
("山西", "太原", "231"),
("山西", "大同", "227"),
("山西", "长治", "228"),
("山西", "忻州", "229"),
("山西", "晋中", "230"),
("山西", "临汾", "232"),
("山西", "运城", "233"),
("山西", "晋城", "234"),
("山西", "朔州", "235"),
("山西", "阳泉", "236"),
("山西", "吕梁", "237"),
("湖南", "长沙", "43"),
("湖南", "岳阳", "44"),
("湖南", "衡阳", "45"),
("湖南", "株洲", "46"),
("湖南", "湘潭", "47"),
("湖南", "益阳", "48"),
("湖南", "郴州", "49"),
("湖南", "湘西", "65"),
("湖南", "娄底", "66"),
("湖南", "怀化", "67"),
("湖南", "常德", "68"),
("湖南", "张家界", "226"),
("湖南", "永州", "269"),
("湖南", "邵阳", "405"),
("江西", "南昌", "5"),
("江西", "九江", "6"),
("江西", "鹰潭", "7"),
("江西", "抚州", "8"),
("江西", "上饶", "9"),
("江西", "赣州", "10"),
("江西", "吉安", "115"),
("江西", "萍乡", "136"),
("江西", "景德镇", "137"),
("江西", "新余", "246"),
("江西", "宜春", "256"),
("安徽", "合肥", "189"),
("安徽", "铜陵", "173"),
("安徽", "黄山", "174"),
("安徽", "池州", "175"),
("安徽", "宣城", "176"),
("安徽", "巢湖", "177"),
("安徽", "淮南", "178"),
("安徽", "宿州", "179"),
("安徽", "六安", "181"),
("安徽", "滁州", "182"),
("安徽", "淮北", "183"),
("安徽", "阜阳", "184"),
("安徽", "马鞍山", "185"),
("安徽", "安庆", "186"),
("安徽", "蚌埠", "187"),
("安徽", "芜湖", "188"),
("安徽", "亳州", "391"),
("内蒙古", "呼和浩特", "20"),
("内蒙古", "包头", "13"),
("内蒙古", "鄂尔多斯", "14"),
("内蒙古", "巴彦淖尔", "15"),
("内蒙古", "乌海", "16"),
("内蒙古", "阿拉善盟", "17"),
("内蒙古", "锡林郭勒盟", "19"),
("内蒙古", "赤峰", "21"),
("内蒙古", "通辽", "22"),
("内蒙古", "呼伦贝尔", "25"),
("内蒙古", "乌兰察布", "331"),
("内蒙古", "兴安盟", "333"),
("甘肃", "兰州", "166"),
("甘肃", "庆阳", "281"),
("甘肃", "定西", "282"),
("甘肃", "武威", "283"),
("甘肃", "酒泉", "284"),
("甘肃", "张掖", "285"),
("甘肃", "嘉峪关", "286"),
("甘肃", "平凉", "307"),
("甘肃", "天水", "308"),
("甘肃", "白银", "309"),
("甘肃", "金昌", "343"),
("甘肃", "陇南", "344"),
("甘肃", "临夏", "346"),
("甘肃", "甘南", "673"),
("海南", "海口", "239"),
("海南", "万宁", "241"),
("海南", "琼海", "242"),
("海南", "三亚", "243"),
("海南", "儋州", "244"),
("海南", "东方", "456"),
("海南", "五指山", "582"),
("海南", "文昌", "670"),
("海南", "陵水", "674"),
("海南", "澄迈", "675"),
("海南", "乐东", "679"),
("海南", "临高", "680"),
("海南", "定安", "681"),
("海南", "昌江", "683"),
("海南", "屯昌", "684"),
("海南", "保亭", "686"),
("海南", "白沙", "689"),
("海南", "琼中", "690"),
("贵州", "贵阳", "2"),
("贵州", "黔南", "3"),
("贵州", "六盘水", "4"),
("贵州", "遵义", "59"),
("贵州", "黔东南", "61"),
("贵州", "铜仁", "422"),
("贵州", "安顺", "424"),
("贵州", "毕节", "426"),
("贵州", "黔西南", "588"),
("宁夏", "银川", "140"),
("宁夏", "吴忠", "395"),
("宁夏", "固原", "396"),
("宁夏", "石嘴山", "472"),
("宁夏", "中卫", "480"),
("青海", "西宁", "139"),
("青海", "海西", "608"),
("青海", "海东", "652"),
("青海", "玉树", "659"),
("青海", "海南", "676"),
("青海", "海北", "682"),
("青海", "黄南", "685"),
("青海", "果洛", "688"),
("西藏", "拉萨", "466"),
("西藏", "日喀则", "516"),
("西藏", "那曲", "655"),
("西藏", "林芝", "656"),
("西藏", "山南", "677"),
("西藏", "昌都", "678"),
("西藏", "阿里", "691"),
("北京", "北京", "911"),
("上海", "上海", "910"),
("重庆", "重庆", "904"),
("天津", "天津", "923")
}
if province == "全国":
area = "0"
else:
result_list = [item for item in baidu_area_map if item[0] == province and item[1] == city]
if result_list:
area = result_list[0][2]
else:
raise "请按照百度指数的要求输入正确的省份和城市"
headers = {
"Accept": "application/json, text/plain, */*",
"Accept-Encoding": "gzip, deflate",
"Accept-Language": "zh-CN,zh;q=0.9",
"Cache-Control": "no-cache",
"Cipher-Text": text,
"Cookie": cookie,
"DNT": "1",
"Host": "index.baidu.com",
"Pragma": "no-cache",
"Proxy-Connection": "keep-alive",
"Referer": "https://index.baidu.com/v2/main/index.html",
"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.90 Safari/537.36",
"X-Requested-With": "XMLHttpRequest",
}
session = requests.Session()
session.headers.update(headers)
params = {
"area": area,
"word": '[[{"name":' + f'"{word}"' + ',"wordType"' + ":1}]]",
"startDate": start_date,
"endDate": end_date,
}
with session.get(
url=f"http://index.baidu.com/api/NewsApi/getNewsIndex", params=params
) as response:
data = response.json()["data"]
all_data = data["index"][0]["data"]
uniqid = data["uniqid"]
ptbk = get_ptbk(uniqid, cookie)
result = decrypt(ptbk, all_data).split(",")
result = ["0" if item == "" else item for item in result]
result = [int(item) for item in result]
if len(result) == len(
pd.date_range(start=start_date, end=end_date, freq="7D")
):
temp_df_7 = pd.DataFrame(
[
pd.date_range(start=start_date, end=end_date, freq="7D"),
result,
],
index=["date", word],
).T
temp_df_7.index = pd.to_datetime(temp_df_7["date"])
del temp_df_7["date"]
return temp_df_7
else:
temp_df_1 = pd.DataFrame(
[
pd.date_range(start=start_date, end=end_date, freq="1D"),
result,
],
index=["date", word],
).T
temp_df_1.index = pd.to_datetime(temp_df_1["date"])
del temp_df_1["date"]
return temp_df_1
| 18,256 |
|
akfamily/akshare
|
087025d8d6f799b30ca114013e82c1ad22dc9294
|
akshare/index/drewry_index.py
|
drewry_wci_index
|
(symbol: str = "composite")
|
return temp_df
|
Drewry 集装箱指数
https://infogram.com/world-container-index-1h17493095xl4zj
:return: choice of {"composite", "shanghai-rotterdam", "rotterdam-shanghai", "shanghai-los angeles", "los angeles-shanghai", "shanghai-genoa", "new york-rotterdam", "rotterdam-new york"}
:type: str
:return: Drewry 集装箱指数
:rtype: pandas.DataFrame
|
Drewry 集装箱指数
https://infogram.com/world-container-index-1h17493095xl4zj
:return: choice of {"composite", "shanghai-rotterdam", "rotterdam-shanghai", "shanghai-los angeles", "los angeles-shanghai", "shanghai-genoa", "new york-rotterdam", "rotterdam-new york"}
:type: str
:return: Drewry 集装箱指数
:rtype: pandas.DataFrame
| 16 | 49 |
def drewry_wci_index(symbol: str = "composite") -> pd.DataFrame:
"""
Drewry 集装箱指数
https://infogram.com/world-container-index-1h17493095xl4zj
:return: choice of {"composite", "shanghai-rotterdam", "rotterdam-shanghai", "shanghai-los angeles", "los angeles-shanghai", "shanghai-genoa", "new york-rotterdam", "rotterdam-new york"}
:type: str
:return: Drewry 集装箱指数
:rtype: pandas.DataFrame
"""
symbol_map = {
"composite": 0,
"shanghai-rotterdam": 1,
"rotterdam-shanghai": 2,
"shanghai-los angeles": 3,
"los angeles-shanghai": 4,
"shanghai-genoa": 5,
"new york-rotterdam": 6,
"rotterdam-new york": 7,
}
url = "https://infogram.com/world-container-index-1h17493095xl4zj"
r = requests.get(url)
soup = BeautifulSoup(r.text, "lxml")
data_text = soup.find_all("script")[-5].string.strip("window.infographicData=")[:-1]
data_json = demjson.decode(data_text)
temp_df = pd.DataFrame(data_json["elements"][2]["data"][symbol_map[symbol]])
temp_df = temp_df.iloc[1:, :]
temp_df.columns = ["date", "wci"]
day = temp_df["date"].str.split("-", expand=True).iloc[:, 0].str.strip()
month = temp_df["date"].str.split("-", expand=True).iloc[:, 1].str.strip()
year = temp_df["date"].str.split("-", expand=True).iloc[:, 2].str.strip()
temp_df["date"] = day + "-" + month + "-" + year
temp_df["date"] = pd.to_datetime(temp_df["date"]).dt.date
temp_df["wci"] = pd.to_numeric(temp_df["wci"], errors="coerce")
return temp_df
|
https://github.com/akfamily/akshare/blob/087025d8d6f799b30ca114013e82c1ad22dc9294/project25/akshare/index/drewry_index.py#L16-L49
| 25 |
[
0,
1,
2,
3,
4,
5,
6,
7,
8
] | 26.470588 |
[
9,
19,
20,
21,
22,
23,
24,
25,
26,
27,
28,
29,
30,
31,
32,
33
] | 47.058824 | false | 17.948718 | 34 | 1 | 52.941176 | 6 |
def drewry_wci_index(symbol: str = "composite") -> pd.DataFrame:
symbol_map = {
"composite": 0,
"shanghai-rotterdam": 1,
"rotterdam-shanghai": 2,
"shanghai-los angeles": 3,
"los angeles-shanghai": 4,
"shanghai-genoa": 5,
"new york-rotterdam": 6,
"rotterdam-new york": 7,
}
url = "https://infogram.com/world-container-index-1h17493095xl4zj"
r = requests.get(url)
soup = BeautifulSoup(r.text, "lxml")
data_text = soup.find_all("script")[-5].string.strip("window.infographicData=")[:-1]
data_json = demjson.decode(data_text)
temp_df = pd.DataFrame(data_json["elements"][2]["data"][symbol_map[symbol]])
temp_df = temp_df.iloc[1:, :]
temp_df.columns = ["date", "wci"]
day = temp_df["date"].str.split("-", expand=True).iloc[:, 0].str.strip()
month = temp_df["date"].str.split("-", expand=True).iloc[:, 1].str.strip()
year = temp_df["date"].str.split("-", expand=True).iloc[:, 2].str.strip()
temp_df["date"] = day + "-" + month + "-" + year
temp_df["date"] = pd.to_datetime(temp_df["date"]).dt.date
temp_df["wci"] = pd.to_numeric(temp_df["wci"], errors="coerce")
return temp_df
| 18,257 |
akfamily/akshare
|
087025d8d6f799b30ca114013e82c1ad22dc9294
|
akshare/index/exceptions.py
|
ResponseError.__init__
|
(self, message, response)
| 4 | 8 |
def __init__(self, message, response):
super(Exception, self).__init__(message)
# pass response so it can be handled upstream
self.response = response
|
https://github.com/akfamily/akshare/blob/087025d8d6f799b30ca114013e82c1ad22dc9294/project25/akshare/index/exceptions.py#L4-L8
| 25 |
[
0
] | 20 |
[
1,
4
] | 40 | false | 50 | 5 | 1 | 60 | 0 |
def __init__(self, message, response):
super(Exception, self).__init__(message)
# pass response so it can be handled upstream
self.response = response
| 18,258 |
|||
akfamily/akshare
|
087025d8d6f799b30ca114013e82c1ad22dc9294
|
akshare/index/index_cx.py
|
index_pmi_com_cx
|
()
|
return temp_df
|
财新数据-指数报告-财新中国 PMI-综合 PMI
https://s.ccxe.com.cn/indices/pmi
:return: 财新中国 PMI-综合 PMI
:rtype: pandas.DataFrame
|
财新数据-指数报告-财新中国 PMI-综合 PMI
https://s.ccxe.com.cn/indices/pmi
:return: 财新中国 PMI-综合 PMI
:rtype: pandas.DataFrame
| 12 | 33 |
def index_pmi_com_cx() -> pd.DataFrame():
"""
财新数据-指数报告-财新中国 PMI-综合 PMI
https://s.ccxe.com.cn/indices/pmi
:return: 财新中国 PMI-综合 PMI
:rtype: pandas.DataFrame
"""
url = "https://s.ccxe.com.cn/api/index/pro/cxIndexTrendInfo"
params = {"type": "com"}
r = requests.get(url, params=params)
data_json = r.json()
temp_df = pd.DataFrame(data_json["data"])
temp_df.columns = ["变化值", "综合PMI", "日期"]
temp_df = temp_df[
[
"日期",
"综合PMI",
"变化值",
]
]
temp_df["日期"] = pd.to_datetime(temp_df["日期"], unit="ms").dt.date
return temp_df
|
https://github.com/akfamily/akshare/blob/087025d8d6f799b30ca114013e82c1ad22dc9294/project25/akshare/index/index_cx.py#L12-L33
| 25 |
[
0,
1,
2,
3,
4,
5,
6
] | 31.818182 |
[
7,
8,
9,
10,
11,
12,
13,
20,
21
] | 40.909091 | false | 10.326087 | 22 | 1 | 59.090909 | 4 |
def index_pmi_com_cx() -> pd.DataFrame():
url = "https://s.ccxe.com.cn/api/index/pro/cxIndexTrendInfo"
params = {"type": "com"}
r = requests.get(url, params=params)
data_json = r.json()
temp_df = pd.DataFrame(data_json["data"])
temp_df.columns = ["变化值", "综合PMI", "日期"]
temp_df = temp_df[
[
"日期",
"综合PMI",
"变化值",
]
]
temp_df["日期"] = pd.to_datetime(temp_df["日期"], unit="ms").dt.date
return temp_df
| 18,259 |
akfamily/akshare
|
087025d8d6f799b30ca114013e82c1ad22dc9294
|
akshare/index/index_cx.py
|
index_pmi_man_cx
|
()
|
return temp_df
|
财新数据-指数报告-财新中国 PMI-制造业 PMI
https://s.ccxe.com.cn/indices/pmi
:return: 财新中国 PMI-制造业 PMI
:rtype: pandas.DataFrame
|
财新数据-指数报告-财新中国 PMI-制造业 PMI
https://s.ccxe.com.cn/indices/pmi
:return: 财新中国 PMI-制造业 PMI
:rtype: pandas.DataFrame
| 36 | 57 |
def index_pmi_man_cx() -> pd.DataFrame():
"""
财新数据-指数报告-财新中国 PMI-制造业 PMI
https://s.ccxe.com.cn/indices/pmi
:return: 财新中国 PMI-制造业 PMI
:rtype: pandas.DataFrame
"""
url = "https://s.ccxe.com.cn/api/index/pro/cxIndexTrendInfo"
params = {"type": "man"}
r = requests.get(url, params=params)
data_json = r.json()
temp_df = pd.DataFrame(data_json["data"])
temp_df.columns = ["变化值", "制造业PMI", "日期"]
temp_df = temp_df[
[
"日期",
"制造业PMI",
"变化值",
]
]
temp_df["日期"] = pd.to_datetime(temp_df["日期"], unit="ms").dt.date
return temp_df
|
https://github.com/akfamily/akshare/blob/087025d8d6f799b30ca114013e82c1ad22dc9294/project25/akshare/index/index_cx.py#L36-L57
| 25 |
[
0,
1,
2,
3,
4,
5,
6
] | 31.818182 |
[
7,
8,
9,
10,
11,
12,
13,
20,
21
] | 40.909091 | false | 10.326087 | 22 | 1 | 59.090909 | 4 |
def index_pmi_man_cx() -> pd.DataFrame():
url = "https://s.ccxe.com.cn/api/index/pro/cxIndexTrendInfo"
params = {"type": "man"}
r = requests.get(url, params=params)
data_json = r.json()
temp_df = pd.DataFrame(data_json["data"])
temp_df.columns = ["变化值", "制造业PMI", "日期"]
temp_df = temp_df[
[
"日期",
"制造业PMI",
"变化值",
]
]
temp_df["日期"] = pd.to_datetime(temp_df["日期"], unit="ms").dt.date
return temp_df
| 18,260 |
akfamily/akshare
|
087025d8d6f799b30ca114013e82c1ad22dc9294
|
akshare/index/index_cx.py
|
index_pmi_ser_cx
|
()
|
return temp_df
|
财新数据-指数报告-财新中国 PMI-服务业 PMI
https://s.ccxe.com.cn/indices/pmi
:return: 财新中国 PMI-服务业 PMI
:rtype: pandas.DataFrame
|
财新数据-指数报告-财新中国 PMI-服务业 PMI
https://s.ccxe.com.cn/indices/pmi
:return: 财新中国 PMI-服务业 PMI
:rtype: pandas.DataFrame
| 60 | 81 |
def index_pmi_ser_cx() -> pd.DataFrame():
"""
财新数据-指数报告-财新中国 PMI-服务业 PMI
https://s.ccxe.com.cn/indices/pmi
:return: 财新中国 PMI-服务业 PMI
:rtype: pandas.DataFrame
"""
url = "https://s.ccxe.com.cn/api/index/pro/cxIndexTrendInfo"
params = {"type": "ser"}
r = requests.get(url, params=params)
data_json = r.json()
temp_df = pd.DataFrame(data_json["data"])
temp_df.columns = ["变化值", "服务业PMI", "日期"]
temp_df = temp_df[
[
"日期",
"服务业PMI",
"变化值",
]
]
temp_df["日期"] = pd.to_datetime(temp_df["日期"], unit="ms").dt.date
return temp_df
|
https://github.com/akfamily/akshare/blob/087025d8d6f799b30ca114013e82c1ad22dc9294/project25/akshare/index/index_cx.py#L60-L81
| 25 |
[
0,
1,
2,
3,
4,
5,
6
] | 31.818182 |
[
7,
8,
9,
10,
11,
12,
13,
20,
21
] | 40.909091 | false | 10.326087 | 22 | 1 | 59.090909 | 4 |
def index_pmi_ser_cx() -> pd.DataFrame():
url = "https://s.ccxe.com.cn/api/index/pro/cxIndexTrendInfo"
params = {"type": "ser"}
r = requests.get(url, params=params)
data_json = r.json()
temp_df = pd.DataFrame(data_json["data"])
temp_df.columns = ["变化值", "服务业PMI", "日期"]
temp_df = temp_df[
[
"日期",
"服务业PMI",
"变化值",
]
]
temp_df["日期"] = pd.to_datetime(temp_df["日期"], unit="ms").dt.date
return temp_df
| 18,261 |
akfamily/akshare
|
087025d8d6f799b30ca114013e82c1ad22dc9294
|
akshare/index/index_cx.py
|
index_dei_cx
|
()
|
return temp_df
|
财新数据-指数报告-数字经济指数
https://s.ccxe.com.cn/indices/dei
:return: 数字经济指数
:rtype: pandas.DataFrame
|
财新数据-指数报告-数字经济指数
https://s.ccxe.com.cn/indices/dei
:return: 数字经济指数
:rtype: pandas.DataFrame
| 84 | 105 |
def index_dei_cx() -> pd.DataFrame():
"""
财新数据-指数报告-数字经济指数
https://s.ccxe.com.cn/indices/dei
:return: 数字经济指数
:rtype: pandas.DataFrame
"""
url = "https://s.ccxe.com.cn/api/index/pro/cxIndexTrendInfo"
params = {"type": "dei"}
r = requests.get(url, params=params)
data_json = r.json()
temp_df = pd.DataFrame(data_json["data"])
temp_df.columns = ["变化值", "数字经济指数", "日期"]
temp_df = temp_df[
[
"日期",
"数字经济指数",
"变化值",
]
]
temp_df["日期"] = pd.to_datetime(temp_df["日期"], unit="ms").dt.date
return temp_df
|
https://github.com/akfamily/akshare/blob/087025d8d6f799b30ca114013e82c1ad22dc9294/project25/akshare/index/index_cx.py#L84-L105
| 25 |
[
0,
1,
2,
3,
4,
5,
6
] | 31.818182 |
[
7,
8,
9,
10,
11,
12,
13,
20,
21
] | 40.909091 | false | 10.326087 | 22 | 1 | 59.090909 | 4 |
def index_dei_cx() -> pd.DataFrame():
url = "https://s.ccxe.com.cn/api/index/pro/cxIndexTrendInfo"
params = {"type": "dei"}
r = requests.get(url, params=params)
data_json = r.json()
temp_df = pd.DataFrame(data_json["data"])
temp_df.columns = ["变化值", "数字经济指数", "日期"]
temp_df = temp_df[
[
"日期",
"数字经济指数",
"变化值",
]
]
temp_df["日期"] = pd.to_datetime(temp_df["日期"], unit="ms").dt.date
return temp_df
| 18,262 |
akfamily/akshare
|
087025d8d6f799b30ca114013e82c1ad22dc9294
|
akshare/index/index_cx.py
|
index_ii_cx
|
()
|
return temp_df
|
财新数据-指数报告-产业指数
https://s.ccxe.com.cn/indices/dei
:return: 产业指数
:rtype: pandas.DataFrame
|
财新数据-指数报告-产业指数
https://s.ccxe.com.cn/indices/dei
:return: 产业指数
:rtype: pandas.DataFrame
| 108 | 129 |
def index_ii_cx() -> pd.DataFrame():
"""
财新数据-指数报告-产业指数
https://s.ccxe.com.cn/indices/dei
:return: 产业指数
:rtype: pandas.DataFrame
"""
url = "https://s.ccxe.com.cn/api/index/pro/cxIndexTrendInfo"
params = {"type": "ii"}
r = requests.get(url, params=params)
data_json = r.json()
temp_df = pd.DataFrame(data_json["data"])
temp_df.columns = ["变化值", "产业指数", "日期"]
temp_df = temp_df[
[
"日期",
"产业指数",
"变化值",
]
]
temp_df["日期"] = pd.to_datetime(temp_df["日期"], unit="ms").dt.date
return temp_df
|
https://github.com/akfamily/akshare/blob/087025d8d6f799b30ca114013e82c1ad22dc9294/project25/akshare/index/index_cx.py#L108-L129
| 25 |
[
0,
1,
2,
3,
4,
5,
6
] | 31.818182 |
[
7,
8,
9,
10,
11,
12,
13,
20,
21
] | 40.909091 | false | 10.326087 | 22 | 1 | 59.090909 | 4 |
def index_ii_cx() -> pd.DataFrame():
url = "https://s.ccxe.com.cn/api/index/pro/cxIndexTrendInfo"
params = {"type": "ii"}
r = requests.get(url, params=params)
data_json = r.json()
temp_df = pd.DataFrame(data_json["data"])
temp_df.columns = ["变化值", "产业指数", "日期"]
temp_df = temp_df[
[
"日期",
"产业指数",
"变化值",
]
]
temp_df["日期"] = pd.to_datetime(temp_df["日期"], unit="ms").dt.date
return temp_df
| 18,263 |
akfamily/akshare
|
087025d8d6f799b30ca114013e82c1ad22dc9294
|
akshare/index/index_cx.py
|
index_si_cx
|
()
|
return temp_df
|
财新数据-指数报告-溢出指数
https://s.ccxe.com.cn/indices/dei
:return: 溢出指数
:rtype: pandas.DataFrame
|
财新数据-指数报告-溢出指数
https://s.ccxe.com.cn/indices/dei
:return: 溢出指数
:rtype: pandas.DataFrame
| 132 | 153 |
def index_si_cx() -> pd.DataFrame():
"""
财新数据-指数报告-溢出指数
https://s.ccxe.com.cn/indices/dei
:return: 溢出指数
:rtype: pandas.DataFrame
"""
url = "https://s.ccxe.com.cn/api/index/pro/cxIndexTrendInfo"
params = {"type": "si"}
r = requests.get(url, params=params)
data_json = r.json()
temp_df = pd.DataFrame(data_json["data"])
temp_df.columns = ["变化值", "溢出指数", "日期"]
temp_df = temp_df[
[
"日期",
"溢出指数",
"变化值",
]
]
temp_df["日期"] = pd.to_datetime(temp_df["日期"], unit="ms").dt.date
return temp_df
|
https://github.com/akfamily/akshare/blob/087025d8d6f799b30ca114013e82c1ad22dc9294/project25/akshare/index/index_cx.py#L132-L153
| 25 |
[
0,
1,
2,
3,
4,
5,
6
] | 31.818182 |
[
7,
8,
9,
10,
11,
12,
13,
20,
21
] | 40.909091 | false | 10.326087 | 22 | 1 | 59.090909 | 4 |
def index_si_cx() -> pd.DataFrame():
url = "https://s.ccxe.com.cn/api/index/pro/cxIndexTrendInfo"
params = {"type": "si"}
r = requests.get(url, params=params)
data_json = r.json()
temp_df = pd.DataFrame(data_json["data"])
temp_df.columns = ["变化值", "溢出指数", "日期"]
temp_df = temp_df[
[
"日期",
"溢出指数",
"变化值",
]
]
temp_df["日期"] = pd.to_datetime(temp_df["日期"], unit="ms").dt.date
return temp_df
| 18,264 |
akfamily/akshare
|
087025d8d6f799b30ca114013e82c1ad22dc9294
|
akshare/index/index_cx.py
|
index_fi_cx
|
()
|
return temp_df
|
财新数据-指数报告-融合指数
https://s.ccxe.com.cn/indices/dei
:return: 融合指数
:rtype: pandas.DataFrame
|
财新数据-指数报告-融合指数
https://s.ccxe.com.cn/indices/dei
:return: 融合指数
:rtype: pandas.DataFrame
| 156 | 177 |
def index_fi_cx() -> pd.DataFrame():
"""
财新数据-指数报告-融合指数
https://s.ccxe.com.cn/indices/dei
:return: 融合指数
:rtype: pandas.DataFrame
"""
url = "https://s.ccxe.com.cn/api/index/pro/cxIndexTrendInfo"
params = {"type": "fi"}
r = requests.get(url, params=params)
data_json = r.json()
temp_df = pd.DataFrame(data_json["data"])
temp_df.columns = ["变化值", "融合指数", "日期"]
temp_df = temp_df[
[
"日期",
"融合指数",
"变化值",
]
]
temp_df["日期"] = pd.to_datetime(temp_df["日期"], unit="ms").dt.date
return temp_df
|
https://github.com/akfamily/akshare/blob/087025d8d6f799b30ca114013e82c1ad22dc9294/project25/akshare/index/index_cx.py#L156-L177
| 25 |
[
0,
1,
2,
3,
4,
5,
6
] | 31.818182 |
[
7,
8,
9,
10,
11,
12,
13,
20,
21
] | 40.909091 | false | 10.326087 | 22 | 1 | 59.090909 | 4 |
def index_fi_cx() -> pd.DataFrame():
url = "https://s.ccxe.com.cn/api/index/pro/cxIndexTrendInfo"
params = {"type": "fi"}
r = requests.get(url, params=params)
data_json = r.json()
temp_df = pd.DataFrame(data_json["data"])
temp_df.columns = ["变化值", "融合指数", "日期"]
temp_df = temp_df[
[
"日期",
"融合指数",
"变化值",
]
]
temp_df["日期"] = pd.to_datetime(temp_df["日期"], unit="ms").dt.date
return temp_df
| 18,265 |
akfamily/akshare
|
087025d8d6f799b30ca114013e82c1ad22dc9294
|
akshare/index/index_cx.py
|
index_bi_cx
|
()
|
return temp_df
|
财新数据-指数报告-基础指数
https://s.ccxe.com.cn/indices/dei
:return: 基础指数
:rtype: pandas.DataFrame
|
财新数据-指数报告-基础指数
https://s.ccxe.com.cn/indices/dei
:return: 基础指数
:rtype: pandas.DataFrame
| 180 | 201 |
def index_bi_cx() -> pd.DataFrame():
"""
财新数据-指数报告-基础指数
https://s.ccxe.com.cn/indices/dei
:return: 基础指数
:rtype: pandas.DataFrame
"""
url = "https://s.ccxe.com.cn/api/index/pro/cxIndexTrendInfo"
params = {"type": "bi"}
r = requests.get(url, params=params)
data_json = r.json()
temp_df = pd.DataFrame(data_json["data"])
temp_df.columns = ["变化值", "基础指数", "日期"]
temp_df = temp_df[
[
"日期",
"基础指数",
"变化值",
]
]
temp_df["日期"] = pd.to_datetime(temp_df["日期"], unit="ms").dt.date
return temp_df
|
https://github.com/akfamily/akshare/blob/087025d8d6f799b30ca114013e82c1ad22dc9294/project25/akshare/index/index_cx.py#L180-L201
| 25 |
[
0,
1,
2,
3,
4,
5,
6
] | 31.818182 |
[
7,
8,
9,
10,
11,
12,
13,
20,
21
] | 40.909091 | false | 10.326087 | 22 | 1 | 59.090909 | 4 |
def index_bi_cx() -> pd.DataFrame():
url = "https://s.ccxe.com.cn/api/index/pro/cxIndexTrendInfo"
params = {"type": "bi"}
r = requests.get(url, params=params)
data_json = r.json()
temp_df = pd.DataFrame(data_json["data"])
temp_df.columns = ["变化值", "基础指数", "日期"]
temp_df = temp_df[
[
"日期",
"基础指数",
"变化值",
]
]
temp_df["日期"] = pd.to_datetime(temp_df["日期"], unit="ms").dt.date
return temp_df
| 18,266 |
akfamily/akshare
|
087025d8d6f799b30ca114013e82c1ad22dc9294
|
akshare/index/index_cx.py
|
index_nei_cx
|
()
|
return temp_df
|
财新数据-指数报告-中国新经济指数
https://s.ccxe.com.cn/indices/nei
:return: 中国新经济指数
:rtype: pandas.DataFrame
|
财新数据-指数报告-中国新经济指数
https://s.ccxe.com.cn/indices/nei
:return: 中国新经济指数
:rtype: pandas.DataFrame
| 204 | 225 |
def index_nei_cx() -> pd.DataFrame():
"""
财新数据-指数报告-中国新经济指数
https://s.ccxe.com.cn/indices/nei
:return: 中国新经济指数
:rtype: pandas.DataFrame
"""
url = "https://s.ccxe.com.cn/api/index/pro/cxIndexTrendInfo"
params = {"type": "nei"}
r = requests.get(url, params=params)
data_json = r.json()
temp_df = pd.DataFrame(data_json["data"])
temp_df.columns = ["变化值", "中国新经济指数", "日期"]
temp_df = temp_df[
[
"日期",
"中国新经济指数",
"变化值",
]
]
temp_df["日期"] = pd.to_datetime(temp_df["日期"], unit="ms").dt.date
return temp_df
|
https://github.com/akfamily/akshare/blob/087025d8d6f799b30ca114013e82c1ad22dc9294/project25/akshare/index/index_cx.py#L204-L225
| 25 |
[
0,
1,
2,
3,
4,
5,
6
] | 31.818182 |
[
7,
8,
9,
10,
11,
12,
13,
20,
21
] | 40.909091 | false | 10.326087 | 22 | 1 | 59.090909 | 4 |
def index_nei_cx() -> pd.DataFrame():
url = "https://s.ccxe.com.cn/api/index/pro/cxIndexTrendInfo"
params = {"type": "nei"}
r = requests.get(url, params=params)
data_json = r.json()
temp_df = pd.DataFrame(data_json["data"])
temp_df.columns = ["变化值", "中国新经济指数", "日期"]
temp_df = temp_df[
[
"日期",
"中国新经济指数",
"变化值",
]
]
temp_df["日期"] = pd.to_datetime(temp_df["日期"], unit="ms").dt.date
return temp_df
| 18,267 |
akfamily/akshare
|
087025d8d6f799b30ca114013e82c1ad22dc9294
|
akshare/index/index_cx.py
|
index_li_cx
|
()
|
return temp_df
|
财新数据-指数报告-劳动力投入指数
https://s.ccxe.com.cn/indices/nei
:return: 劳动力投入指数
:rtype: pandas.DataFrame
|
财新数据-指数报告-劳动力投入指数
https://s.ccxe.com.cn/indices/nei
:return: 劳动力投入指数
:rtype: pandas.DataFrame
| 228 | 249 |
def index_li_cx() -> pd.DataFrame():
"""
财新数据-指数报告-劳动力投入指数
https://s.ccxe.com.cn/indices/nei
:return: 劳动力投入指数
:rtype: pandas.DataFrame
"""
url = "https://s.ccxe.com.cn/api/index/pro/cxIndexTrendInfo"
params = {"type": "li"}
r = requests.get(url, params=params)
data_json = r.json()
temp_df = pd.DataFrame(data_json["data"])
temp_df.columns = ["变化值", "劳动力投入指数", "日期"]
temp_df = temp_df[
[
"日期",
"劳动力投入指数",
"变化值",
]
]
temp_df["日期"] = pd.to_datetime(temp_df["日期"], unit="ms").dt.date
return temp_df
|
https://github.com/akfamily/akshare/blob/087025d8d6f799b30ca114013e82c1ad22dc9294/project25/akshare/index/index_cx.py#L228-L249
| 25 |
[
0,
1,
2,
3,
4,
5,
6
] | 31.818182 |
[
7,
8,
9,
10,
11,
12,
13,
20,
21
] | 40.909091 | false | 10.326087 | 22 | 1 | 59.090909 | 4 |
def index_li_cx() -> pd.DataFrame():
url = "https://s.ccxe.com.cn/api/index/pro/cxIndexTrendInfo"
params = {"type": "li"}
r = requests.get(url, params=params)
data_json = r.json()
temp_df = pd.DataFrame(data_json["data"])
temp_df.columns = ["变化值", "劳动力投入指数", "日期"]
temp_df = temp_df[
[
"日期",
"劳动力投入指数",
"变化值",
]
]
temp_df["日期"] = pd.to_datetime(temp_df["日期"], unit="ms").dt.date
return temp_df
| 18,268 |
akfamily/akshare
|
087025d8d6f799b30ca114013e82c1ad22dc9294
|
akshare/index/index_cx.py
|
index_ci_cx
|
()
|
return temp_df
|
财新数据-指数报告-资本投入指数
https://s.ccxe.com.cn/indices/nei
:return: 资本投入指数
:rtype: pandas.DataFrame
|
财新数据-指数报告-资本投入指数
https://s.ccxe.com.cn/indices/nei
:return: 资本投入指数
:rtype: pandas.DataFrame
| 252 | 273 |
def index_ci_cx() -> pd.DataFrame():
"""
财新数据-指数报告-资本投入指数
https://s.ccxe.com.cn/indices/nei
:return: 资本投入指数
:rtype: pandas.DataFrame
"""
url = "https://s.ccxe.com.cn/api/index/pro/cxIndexTrendInfo"
params = {"type": "ci"}
r = requests.get(url, params=params)
data_json = r.json()
temp_df = pd.DataFrame(data_json["data"])
temp_df.columns = ["变化值", "资本投入指数", "日期"]
temp_df = temp_df[
[
"日期",
"资本投入指数",
"变化值",
]
]
temp_df["日期"] = pd.to_datetime(temp_df["日期"], unit="ms").dt.date
return temp_df
|
https://github.com/akfamily/akshare/blob/087025d8d6f799b30ca114013e82c1ad22dc9294/project25/akshare/index/index_cx.py#L252-L273
| 25 |
[
0,
1,
2,
3,
4,
5,
6
] | 31.818182 |
[
7,
8,
9,
10,
11,
12,
13,
20,
21
] | 40.909091 | false | 10.326087 | 22 | 1 | 59.090909 | 4 |
def index_ci_cx() -> pd.DataFrame():
url = "https://s.ccxe.com.cn/api/index/pro/cxIndexTrendInfo"
params = {"type": "ci"}
r = requests.get(url, params=params)
data_json = r.json()
temp_df = pd.DataFrame(data_json["data"])
temp_df.columns = ["变化值", "资本投入指数", "日期"]
temp_df = temp_df[
[
"日期",
"资本投入指数",
"变化值",
]
]
temp_df["日期"] = pd.to_datetime(temp_df["日期"], unit="ms").dt.date
return temp_df
| 18,269 |
akfamily/akshare
|
087025d8d6f799b30ca114013e82c1ad22dc9294
|
akshare/index/index_cx.py
|
index_ti_cx
|
()
|
return temp_df
|
财新数据-指数报告-科技投入指数
https://s.ccxe.com.cn/indices/nei
:return: 科技投入指数
:rtype: pandas.DataFrame
|
财新数据-指数报告-科技投入指数
https://s.ccxe.com.cn/indices/nei
:return: 科技投入指数
:rtype: pandas.DataFrame
| 276 | 297 |
def index_ti_cx() -> pd.DataFrame():
"""
财新数据-指数报告-科技投入指数
https://s.ccxe.com.cn/indices/nei
:return: 科技投入指数
:rtype: pandas.DataFrame
"""
url = "https://s.ccxe.com.cn/api/index/pro/cxIndexTrendInfo"
params = {"type": "ti"}
r = requests.get(url, params=params)
data_json = r.json()
temp_df = pd.DataFrame(data_json["data"])
temp_df.columns = ["变化值", "科技投入指数", "日期"]
temp_df = temp_df[
[
"日期",
"科技投入指数",
"变化值",
]
]
temp_df["日期"] = pd.to_datetime(temp_df["日期"], unit="ms").dt.date
return temp_df
|
https://github.com/akfamily/akshare/blob/087025d8d6f799b30ca114013e82c1ad22dc9294/project25/akshare/index/index_cx.py#L276-L297
| 25 |
[
0,
1,
2,
3,
4,
5,
6
] | 31.818182 |
[
7,
8,
9,
10,
11,
12,
13,
20,
21
] | 40.909091 | false | 10.326087 | 22 | 1 | 59.090909 | 4 |
def index_ti_cx() -> pd.DataFrame():
url = "https://s.ccxe.com.cn/api/index/pro/cxIndexTrendInfo"
params = {"type": "ti"}
r = requests.get(url, params=params)
data_json = r.json()
temp_df = pd.DataFrame(data_json["data"])
temp_df.columns = ["变化值", "科技投入指数", "日期"]
temp_df = temp_df[
[
"日期",
"科技投入指数",
"变化值",
]
]
temp_df["日期"] = pd.to_datetime(temp_df["日期"], unit="ms").dt.date
return temp_df
| 18,270 |
akfamily/akshare
|
087025d8d6f799b30ca114013e82c1ad22dc9294
|
akshare/index/index_cx.py
|
index_neaw_cx
|
()
|
return temp_df
|
财新数据-指数报告-新经济行业入职平均工资水平
https://s.ccxe.com.cn/indices/nei
:return: 新经济行业入职平均工资水平
:rtype: pandas.DataFrame
|
财新数据-指数报告-新经济行业入职平均工资水平
https://s.ccxe.com.cn/indices/nei
:return: 新经济行业入职平均工资水平
:rtype: pandas.DataFrame
| 300 | 321 |
def index_neaw_cx() -> pd.DataFrame():
"""
财新数据-指数报告-新经济行业入职平均工资水平
https://s.ccxe.com.cn/indices/nei
:return: 新经济行业入职平均工资水平
:rtype: pandas.DataFrame
"""
url = "https://s.ccxe.com.cn/api/index/pro/cxIndexTrendInfo"
params = {"type": "neaw"}
r = requests.get(url, params=params)
data_json = r.json()
temp_df = pd.DataFrame(data_json["data"])
temp_df.columns = ["变化值", "新经济行业入职平均工资水平", "日期"]
temp_df = temp_df[
[
"日期",
"新经济行业入职平均工资水平",
"变化值",
]
]
temp_df["日期"] = pd.to_datetime(temp_df["日期"], unit="ms").dt.date
return temp_df
|
https://github.com/akfamily/akshare/blob/087025d8d6f799b30ca114013e82c1ad22dc9294/project25/akshare/index/index_cx.py#L300-L321
| 25 |
[
0,
1,
2,
3,
4,
5,
6
] | 31.818182 |
[
7,
8,
9,
10,
11,
12,
13,
20,
21
] | 40.909091 | false | 10.326087 | 22 | 1 | 59.090909 | 4 |
def index_neaw_cx() -> pd.DataFrame():
url = "https://s.ccxe.com.cn/api/index/pro/cxIndexTrendInfo"
params = {"type": "neaw"}
r = requests.get(url, params=params)
data_json = r.json()
temp_df = pd.DataFrame(data_json["data"])
temp_df.columns = ["变化值", "新经济行业入职平均工资水平", "日期"]
temp_df = temp_df[
[
"日期",
"新经济行业入职平均工资水平",
"变化值",
]
]
temp_df["日期"] = pd.to_datetime(temp_df["日期"], unit="ms").dt.date
return temp_df
| 18,271 |
akfamily/akshare
|
087025d8d6f799b30ca114013e82c1ad22dc9294
|
akshare/index/index_cx.py
|
index_awpr_cx
|
()
|
return temp_df
|
财新数据-指数报告-新经济入职工资溢价水平
https://s.ccxe.com.cn/indices/nei
:return: 新经济入职工资溢价水平
:rtype: pandas.DataFrame
|
财新数据-指数报告-新经济入职工资溢价水平
https://s.ccxe.com.cn/indices/nei
:return: 新经济入职工资溢价水平
:rtype: pandas.DataFrame
| 324 | 345 |
def index_awpr_cx() -> pd.DataFrame():
"""
财新数据-指数报告-新经济入职工资溢价水平
https://s.ccxe.com.cn/indices/nei
:return: 新经济入职工资溢价水平
:rtype: pandas.DataFrame
"""
url = "https://s.ccxe.com.cn/api/index/pro/cxIndexTrendInfo"
params = {"type": "awpr"}
r = requests.get(url, params=params)
data_json = r.json()
temp_df = pd.DataFrame(data_json["data"])
temp_df.columns = ["变化值", "新经济入职工资溢价水平", "日期"]
temp_df = temp_df[
[
"日期",
"新经济入职工资溢价水平",
"变化值",
]
]
temp_df["日期"] = pd.to_datetime(temp_df["日期"], unit="ms").dt.date
return temp_df
|
https://github.com/akfamily/akshare/blob/087025d8d6f799b30ca114013e82c1ad22dc9294/project25/akshare/index/index_cx.py#L324-L345
| 25 |
[
0,
1,
2,
3,
4,
5,
6
] | 31.818182 |
[
7,
8,
9,
10,
11,
12,
13,
20,
21
] | 40.909091 | false | 10.326087 | 22 | 1 | 59.090909 | 4 |
def index_awpr_cx() -> pd.DataFrame():
url = "https://s.ccxe.com.cn/api/index/pro/cxIndexTrendInfo"
params = {"type": "awpr"}
r = requests.get(url, params=params)
data_json = r.json()
temp_df = pd.DataFrame(data_json["data"])
temp_df.columns = ["变化值", "新经济入职工资溢价水平", "日期"]
temp_df = temp_df[
[
"日期",
"新经济入职工资溢价水平",
"变化值",
]
]
temp_df["日期"] = pd.to_datetime(temp_df["日期"], unit="ms").dt.date
return temp_df
| 18,272 |
akfamily/akshare
|
087025d8d6f799b30ca114013e82c1ad22dc9294
|
akshare/index/index_cx.py
|
index_cci_cx
|
()
|
return temp_df
|
财新数据-指数报告-大宗商品指数
https://s.ccxe.com.cn/indices/cci
:return: 大宗商品指数
:rtype: pandas.DataFrame
|
财新数据-指数报告-大宗商品指数
https://s.ccxe.com.cn/indices/cci
:return: 大宗商品指数
:rtype: pandas.DataFrame
| 348 | 373 |
def index_cci_cx() -> pd.DataFrame():
"""
财新数据-指数报告-大宗商品指数
https://s.ccxe.com.cn/indices/cci
:return: 大宗商品指数
:rtype: pandas.DataFrame
"""
url = "https://s.ccxe.com.cn/api/index/pro/cxIndexTrendInfo"
params = {
"type": "cci",
"code": "1000050",
"month": "-1",
}
r = requests.get(url, params=params)
data_json = r.json()
temp_df = pd.DataFrame(data_json["data"])
temp_df.columns = ["变化值", "大宗商品指数", "日期"]
temp_df = temp_df[
[
"日期",
"大宗商品指数",
"变化值",
]
]
temp_df["日期"] = pd.to_datetime(temp_df["日期"], unit="ms").dt.date
return temp_df
|
https://github.com/akfamily/akshare/blob/087025d8d6f799b30ca114013e82c1ad22dc9294/project25/akshare/index/index_cx.py#L348-L373
| 25 |
[
0,
1,
2,
3,
4,
5,
6
] | 26.923077 |
[
7,
8,
13,
14,
15,
16,
17,
24,
25
] | 34.615385 | false | 10.326087 | 26 | 1 | 65.384615 | 4 |
def index_cci_cx() -> pd.DataFrame():
url = "https://s.ccxe.com.cn/api/index/pro/cxIndexTrendInfo"
params = {
"type": "cci",
"code": "1000050",
"month": "-1",
}
r = requests.get(url, params=params)
data_json = r.json()
temp_df = pd.DataFrame(data_json["data"])
temp_df.columns = ["变化值", "大宗商品指数", "日期"]
temp_df = temp_df[
[
"日期",
"大宗商品指数",
"变化值",
]
]
temp_df["日期"] = pd.to_datetime(temp_df["日期"], unit="ms").dt.date
return temp_df
| 18,273 |
akfamily/akshare
|
087025d8d6f799b30ca114013e82c1ad22dc9294
|
akshare/index/index_cflp.py
|
index_cflp_price
|
(symbol: str = "周指数") -> pd
|
return temp_df
|
中国公路物流运价指数
http://index.0256.cn/expx.htm
:param symbol: choice of {"周指数", "月指数", "季度指数", "年度指数"}
:type symbol: str
:return: 中国公路物流运价指数
:rtype: pandas.DataFrame
|
中国公路物流运价指数
http://index.0256.cn/expx.htm
:param symbol: choice of {"周指数", "月指数", "季度指数", "年度指数"}
:type symbol: str
:return: 中国公路物流运价指数
:rtype: pandas.DataFrame
| 12 | 58 |
def index_cflp_price(symbol: str = "周指数") -> pd.DataFrame:
"""
中国公路物流运价指数
http://index.0256.cn/expx.htm
:param symbol: choice of {"周指数", "月指数", "季度指数", "年度指数"}
:type symbol: str
:return: 中国公路物流运价指数
:rtype: pandas.DataFrame
"""
symbol_map = {
"周指数": "2",
"月指数": "3",
"季度指数": "4",
"年度指数": "5",
}
url = "http://index.0256.cn/expcenter_trend.action"
params = {
"marketId": "1",
"attribute1": "5",
"exponentTypeId": symbol_map[symbol],
"cateId": "2",
"attribute2": "华北",
"city": "",
"startLine": "",
"endLine": "",
}
headers = {
"Origin": "http://index.0256.cn",
"Referer": "http://index.0256.cn/expx.htm",
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/90.0.4430.212 Safari/537.36",
}
r = requests.post(url, data=params, headers=headers)
data_json = r.json()
temp_df = pd.DataFrame(
[
data_json["chart1"]["xLebal"],
data_json["chart1"]["yLebal"],
data_json["chart2"]["yLebal"],
data_json["chart3"]["yLebal"],
]
).T
temp_df.columns = ["日期", "定基指数", "环比指数", "同比指数"]
temp_df["日期"] = pd.to_datetime(temp_df["日期"]).dt.date
temp_df["定基指数"] = pd.to_numeric(temp_df["定基指数"])
temp_df["环比指数"] = pd.to_numeric(temp_df["环比指数"])
temp_df["同比指数"] = pd.to_numeric(temp_df["同比指数"])
return temp_df
|
https://github.com/akfamily/akshare/blob/087025d8d6f799b30ca114013e82c1ad22dc9294/project25/akshare/index/index_cflp.py#L12-L58
| 25 |
[
0,
1,
2,
3,
4,
5,
6,
7,
8
] | 19.148936 |
[
9,
15,
16,
26,
31,
32,
33,
41,
42,
43,
44,
45,
46
] | 27.659574 | false | 13.043478 | 47 | 1 | 72.340426 | 6 |
def index_cflp_price(symbol: str = "周指数") -> pd.DataFrame:
symbol_map = {
"周指数": "2",
"月指数": "3",
"季度指数": "4",
"年度指数": "5",
}
url = "http://index.0256.cn/expcenter_trend.action"
params = {
"marketId": "1",
"attribute1": "5",
"exponentTypeId": symbol_map[symbol],
"cateId": "2",
"attribute2": "华北",
"city": "",
"startLine": "",
"endLine": "",
}
headers = {
"Origin": "http://index.0256.cn",
"Referer": "http://index.0256.cn/expx.htm",
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/90.0.4430.212 Safari/537.36",
}
r = requests.post(url, data=params, headers=headers)
data_json = r.json()
temp_df = pd.DataFrame(
[
data_json["chart1"]["xLebal"],
data_json["chart1"]["yLebal"],
data_json["chart2"]["yLebal"],
data_json["chart3"]["yLebal"],
]
).T
temp_df.columns = ["日期", "定基指数", "环比指数", "同比指数"]
temp_df["日期"] = pd.to_datetime(temp_df["日期"]).dt.date
temp_df["定基指数"] = pd.to_numeric(temp_df["定基指数"])
temp_df["环比指数"] = pd.to_numeric(temp_df["环比指数"])
temp_df["同比指数"] = pd.to_numeric(temp_df["同比指数"])
return temp_df
| 18,278 |
akfamily/akshare
|
087025d8d6f799b30ca114013e82c1ad22dc9294
|
akshare/index/index_cflp.py
|
index_cflp_volume
|
(symbol: str = "月指数") -> pd
|
return temp_df
|
中国公路物流运量指数
http://index.0256.cn/expx.htm
:param symbol: choice of {"月指数", "季度指数", "年度指数"}
:type symbol: str
:return: 中国公路物流运量指数
:rtype: pandas.DataFrame
|
中国公路物流运量指数
http://index.0256.cn/expx.htm
:param symbol: choice of {"月指数", "季度指数", "年度指数"}
:type symbol: str
:return: 中国公路物流运量指数
:rtype: pandas.DataFrame
| 61 | 106 |
def index_cflp_volume(symbol: str = "月指数") -> pd.DataFrame:
"""
中国公路物流运量指数
http://index.0256.cn/expx.htm
:param symbol: choice of {"月指数", "季度指数", "年度指数"}
:type symbol: str
:return: 中国公路物流运量指数
:rtype: pandas.DataFrame
"""
symbol_map = {
"月指数": "3",
"季度指数": "4",
"年度指数": "5",
}
url = "http://index.0256.cn/volume_query.action"
params = {
"type": "1",
"marketId": "1",
"expTypeId": symbol_map[symbol],
"startDate1": "",
"endDate1": "",
"city": "",
"startDate3": "",
"endDate3": "",
}
headers = {
"Origin": "http://index.0256.cn",
"Referer": "http://index.0256.cn/expx.htm",
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/90.0.4430.212 Safari/537.36",
}
r = requests.post(url, data=params, headers=headers)
data_json = r.json()
temp_df = pd.DataFrame(
[
data_json["chart1"]["xLebal"],
data_json["chart1"]["yLebal"],
data_json["chart2"]["yLebal"],
data_json["chart3"]["yLebal"],
]
).T
temp_df.columns = ["日期", "定基指数", "环比指数", "同比指数"]
temp_df["日期"] = pd.to_datetime(temp_df["日期"]).dt.date
temp_df["定基指数"] = pd.to_numeric(temp_df["定基指数"])
temp_df["环比指数"] = pd.to_numeric(temp_df["环比指数"])
temp_df["同比指数"] = pd.to_numeric(temp_df["同比指数"])
return temp_df
|
https://github.com/akfamily/akshare/blob/087025d8d6f799b30ca114013e82c1ad22dc9294/project25/akshare/index/index_cflp.py#L61-L106
| 25 |
[
0,
1,
2,
3,
4,
5,
6,
7,
8
] | 19.565217 |
[
9,
14,
15,
25,
30,
31,
32,
40,
41,
42,
43,
44,
45
] | 28.26087 | false | 13.043478 | 46 | 1 | 71.73913 | 6 |
def index_cflp_volume(symbol: str = "月指数") -> pd.DataFrame:
symbol_map = {
"月指数": "3",
"季度指数": "4",
"年度指数": "5",
}
url = "http://index.0256.cn/volume_query.action"
params = {
"type": "1",
"marketId": "1",
"expTypeId": symbol_map[symbol],
"startDate1": "",
"endDate1": "",
"city": "",
"startDate3": "",
"endDate3": "",
}
headers = {
"Origin": "http://index.0256.cn",
"Referer": "http://index.0256.cn/expx.htm",
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/90.0.4430.212 Safari/537.36",
}
r = requests.post(url, data=params, headers=headers)
data_json = r.json()
temp_df = pd.DataFrame(
[
data_json["chart1"]["xLebal"],
data_json["chart1"]["yLebal"],
data_json["chart2"]["yLebal"],
data_json["chart3"]["yLebal"],
]
).T
temp_df.columns = ["日期", "定基指数", "环比指数", "同比指数"]
temp_df["日期"] = pd.to_datetime(temp_df["日期"]).dt.date
temp_df["定基指数"] = pd.to_numeric(temp_df["定基指数"])
temp_df["环比指数"] = pd.to_numeric(temp_df["环比指数"])
temp_df["同比指数"] = pd.to_numeric(temp_df["同比指数"])
return temp_df
| 18,279 |
akfamily/akshare
|
087025d8d6f799b30ca114013e82c1ad22dc9294
|
akshare/index/index_weibo.py
|
_get_items
|
(word="股票"):
|
return {word: re.findall(r"\d+", res.json()["html"])[0]}
| 16 | 20 |
def _get_items(word="股票"):
url = "https://data.weibo.com/index/ajax/newindex/searchword"
payload = {"word": word}
res = requests.post(url, data=payload, headers=index_weibo_headers)
return {word: re.findall(r"\d+", res.json()["html"])[0]}
|
https://github.com/akfamily/akshare/blob/087025d8d6f799b30ca114013e82c1ad22dc9294/project25/akshare/index/index_weibo.py#L16-L20
| 25 |
[
0
] | 20 |
[
1,
2,
3,
4
] | 80 | false | 22.916667 | 5 | 1 | 20 | 0 |
def _get_items(word="股票"):
url = "https://data.weibo.com/index/ajax/newindex/searchword"
payload = {"word": word}
res = requests.post(url, data=payload, headers=index_weibo_headers)
return {word: re.findall(r"\d+", res.json()["html"])[0]}
| 18,280 |
||
akfamily/akshare
|
087025d8d6f799b30ca114013e82c1ad22dc9294
|
akshare/index/index_weibo.py
|
_get_index_data
|
(wid, time_type)
|
return df
| 23 | 36 |
def _get_index_data(wid, time_type):
url = "http://data.weibo.com/index/ajax/newindex/getchartdata"
data = {
"wid": wid,
"dateGroup": time_type,
}
res = requests.get(url, params=data, headers=index_weibo_headers)
json_df = res.json()
data = {
"index": json_df["data"][0]["trend"]["x"],
"value": json_df["data"][0]["trend"]["s"],
}
df = pd.DataFrame(data)
return df
|
https://github.com/akfamily/akshare/blob/087025d8d6f799b30ca114013e82c1ad22dc9294/project25/akshare/index/index_weibo.py#L23-L36
| 25 |
[
0
] | 7.142857 |
[
1,
2,
6,
7,
8,
12,
13
] | 50 | false | 22.916667 | 14 | 1 | 50 | 0 |
def _get_index_data(wid, time_type):
url = "http://data.weibo.com/index/ajax/newindex/getchartdata"
data = {
"wid": wid,
"dateGroup": time_type,
}
res = requests.get(url, params=data, headers=index_weibo_headers)
json_df = res.json()
data = {
"index": json_df["data"][0]["trend"]["x"],
"value": json_df["data"][0]["trend"]["s"],
}
df = pd.DataFrame(data)
return df
| 18,281 |
||
akfamily/akshare
|
087025d8d6f799b30ca114013e82c1ad22dc9294
|
akshare/index/index_weibo.py
|
_process_index
|
(index)
|
return index
| 39 | 49 |
def _process_index(index):
now = datetime.datetime.now()
curr_year = now.year
curr_date = "%04d%02d%02d" % (now.year, now.month, now.day)
if "月" in index:
tmp = index.replace("日", "").split("月")
date = "%04d%02d%02d" % (curr_year, int(tmp[0]), int(tmp[1]))
if date > curr_date:
date = "%04d%02d%02d" % (curr_year - 1, int(tmp[0]), int(tmp[1]))
return date
return index
|
https://github.com/akfamily/akshare/blob/087025d8d6f799b30ca114013e82c1ad22dc9294/project25/akshare/index/index_weibo.py#L39-L49
| 25 |
[
0
] | 9.090909 |
[
1,
2,
3,
4,
5,
6,
7,
8,
9,
10
] | 90.909091 | false | 22.916667 | 11 | 3 | 9.090909 | 0 |
def _process_index(index):
now = datetime.datetime.now()
curr_year = now.year
curr_date = "%04d%02d%02d" % (now.year, now.month, now.day)
if "月" in index:
tmp = index.replace("日", "").split("月")
date = "%04d%02d%02d" % (curr_year, int(tmp[0]), int(tmp[1]))
if date > curr_date:
date = "%04d%02d%02d" % (curr_year - 1, int(tmp[0]), int(tmp[1]))
return date
return index
| 18,282 |
||
akfamily/akshare
|
087025d8d6f799b30ca114013e82c1ad22dc9294
|
akshare/index/index_weibo.py
|
weibo_index
|
(word="python", time_type="3month")
|
:param word: str
:param time_type: str 1hour, 1day, 1month, 3month
:return:
|
:param word: str
:param time_type: str 1hour, 1day, 1month, 3month
:return:
| 52 | 73 |
def weibo_index(word="python", time_type="3month"):
"""
:param word: str
:param time_type: str 1hour, 1day, 1month, 3month
:return:
"""
dict_keyword = _get_items(word)
df_list = []
for keyword, wid in dict_keyword.items():
df = _get_index_data(wid, time_type)
if df is not None:
df.columns = ["index", keyword]
df["index"] = df["index"].apply(lambda x: _process_index(x))
df.set_index("index", inplace=True)
df_list.append(df)
if len(df_list) > 0:
df = pd.concat(df_list, axis=1)
if time_type == "1hour" or "1day":
df.index = pd.to_datetime(df.index)
else:
df.index = pd.to_datetime(df.index, format="%Y%m%d")
return df
|
https://github.com/akfamily/akshare/blob/087025d8d6f799b30ca114013e82c1ad22dc9294/project25/akshare/index/index_weibo.py#L52-L73
| 25 |
[
0,
1,
2,
3,
4,
5
] | 27.272727 |
[
6,
7,
8,
9,
10,
11,
12,
13,
14,
15,
16,
17,
18,
21
] | 63.636364 | false | 22.916667 | 22 | 6 | 36.363636 | 3 |
def weibo_index(word="python", time_type="3month"):
dict_keyword = _get_items(word)
df_list = []
for keyword, wid in dict_keyword.items():
df = _get_index_data(wid, time_type)
if df is not None:
df.columns = ["index", keyword]
df["index"] = df["index"].apply(lambda x: _process_index(x))
df.set_index("index", inplace=True)
df_list.append(df)
if len(df_list) > 0:
df = pd.concat(df_list, axis=1)
if time_type == "1hour" or "1day":
df.index = pd.to_datetime(df.index)
else:
df.index = pd.to_datetime(df.index, format="%Y%m%d")
return df
| 18,283 |
|
akfamily/akshare
|
087025d8d6f799b30ca114013e82c1ad22dc9294
|
akshare/index/index_sugar.py
|
index_sugar_msweet
|
()
|
return temp_df
|
沐甜科技数据中心-中国食糖指数
http://www.msweet.com.cn/mtkj/sjzx13/index.html
:return: 中国食糖指数
:rtype: pandas.DataFrame
|
沐甜科技数据中心-中国食糖指数
http://www.msweet.com.cn/mtkj/sjzx13/index.html
:return: 中国食糖指数
:rtype: pandas.DataFrame
| 12 | 35 |
def index_sugar_msweet() -> pd.DataFrame:
"""
沐甜科技数据中心-中国食糖指数
http://www.msweet.com.cn/mtkj/sjzx13/index.html
:return: 中国食糖指数
:rtype: pandas.DataFrame
"""
url = "http://www.msweet.com.cn/eportal/ui"
params = {
"struts.portlet.action": "/portlet/price!getSTZSJson.action",
"moduleId": "cb752447cfe24b44b18c7a7e9abab048",
}
r = requests.get(url, params=params)
data_json = r.json()
temp_df = pd.concat(
[pd.DataFrame(data_json["category"]), pd.DataFrame(data_json["data"])], axis=1
)
temp_df.columns = ["日期", "综合价格", "原糖价格", "现货价格"]
temp_df.loc[3226, ["原糖价格"]] = 12.88 # 数据源错误
temp_df["日期"] = pd.to_datetime(temp_df["日期"]).dt.date
temp_df["综合价格"] = pd.to_numeric(temp_df["综合价格"])
temp_df["原糖价格"] = pd.to_numeric(temp_df["原糖价格"])
temp_df["现货价格"] = pd.to_numeric(temp_df["现货价格"])
return temp_df
|
https://github.com/akfamily/akshare/blob/087025d8d6f799b30ca114013e82c1ad22dc9294/project25/akshare/index/index_sugar.py#L12-L35
| 25 |
[
0,
1,
2,
3,
4,
5,
6
] | 29.166667 |
[
7,
8,
12,
13,
14,
17,
18,
19,
20,
21,
22,
23
] | 50 | false | 12.5 | 24 | 1 | 50 | 4 |
def index_sugar_msweet() -> pd.DataFrame:
url = "http://www.msweet.com.cn/eportal/ui"
params = {
"struts.portlet.action": "/portlet/price!getSTZSJson.action",
"moduleId": "cb752447cfe24b44b18c7a7e9abab048",
}
r = requests.get(url, params=params)
data_json = r.json()
temp_df = pd.concat(
[pd.DataFrame(data_json["category"]), pd.DataFrame(data_json["data"])], axis=1
)
temp_df.columns = ["日期", "综合价格", "原糖价格", "现货价格"]
temp_df.loc[3226, ["原糖价格"]] = 12.88 # 数据源错误
temp_df["日期"] = pd.to_datetime(temp_df["日期"]).dt.date
temp_df["综合价格"] = pd.to_numeric(temp_df["综合价格"])
temp_df["原糖价格"] = pd.to_numeric(temp_df["原糖价格"])
temp_df["现货价格"] = pd.to_numeric(temp_df["现货价格"])
return temp_df
| 18,284 |
akfamily/akshare
|
087025d8d6f799b30ca114013e82c1ad22dc9294
|
akshare/index/index_sugar.py
|
index_inner_quote_sugar_msweet
|
()
|
return temp_df
|
沐甜科技数据中心-配额内进口糖估算指数
http://www.msweet.com.cn/mtkj/sjzx13/index.html
:return: 配额内进口糖估算指数
:rtype: pandas.DataFrame
|
沐甜科技数据中心-配额内进口糖估算指数
http://www.msweet.com.cn/mtkj/sjzx13/index.html
:return: 配额内进口糖估算指数
:rtype: pandas.DataFrame
| 38 | 79 |
def index_inner_quote_sugar_msweet() -> pd.DataFrame:
"""
沐甜科技数据中心-配额内进口糖估算指数
http://www.msweet.com.cn/mtkj/sjzx13/index.html
:return: 配额内进口糖估算指数
:rtype: pandas.DataFrame
"""
url = "http://www.msweet.com.cn/datacenterapply/datacenter/json/JinKongTang.json"
r = requests.get(url)
data_json = r.json()
temp_df = pd.concat(
[pd.DataFrame(data_json["category"]), pd.DataFrame(data_json["data"])], axis=1
)
temp_df.columns = [
"日期",
"利润空间",
"泰国糖",
"泰国MA5",
"巴西MA5",
"利润MA5",
"巴西MA10",
"巴西糖",
"柳州现货价",
"广州现货价",
"泰国MA10",
"利润MA30",
"利润MA10",
]
temp_df.loc[988, ["泰国糖"]] = 4045.2 # 数据源错误
temp_df["日期"] = pd.to_datetime(temp_df["日期"]).dt.date
temp_df["利润空间"] = pd.to_numeric(temp_df["利润空间"])
temp_df["泰国糖"] = pd.to_numeric(temp_df["泰国糖"])
temp_df["泰国MA5"] = pd.to_numeric(temp_df["泰国MA5"])
temp_df["巴西MA5"] = pd.to_numeric(temp_df["巴西MA5"])
temp_df["巴西MA10"] = pd.to_numeric(temp_df["巴西MA10"])
temp_df["巴西糖"] = pd.to_numeric(temp_df["巴西糖"])
temp_df["柳州现货价"] = pd.to_numeric(temp_df["柳州现货价"])
temp_df["广州现货价"] = pd.to_numeric(temp_df["广州现货价"])
temp_df["泰国MA10"] = pd.to_numeric(temp_df["泰国MA10"])
temp_df["利润MA30"] = pd.to_numeric(temp_df["利润MA30"])
temp_df["利润MA10"] = pd.to_numeric(temp_df["利润MA10"])
return temp_df
|
https://github.com/akfamily/akshare/blob/087025d8d6f799b30ca114013e82c1ad22dc9294/project25/akshare/index/index_sugar.py#L38-L79
| 25 |
[
0,
1,
2,
3,
4,
5,
6
] | 16.666667 |
[
7,
8,
9,
10,
13,
28,
29,
30,
31,
32,
33,
34,
35,
36,
37,
38,
39,
40,
41
] | 45.238095 | false | 12.5 | 42 | 1 | 54.761905 | 4 |
def index_inner_quote_sugar_msweet() -> pd.DataFrame:
url = "http://www.msweet.com.cn/datacenterapply/datacenter/json/JinKongTang.json"
r = requests.get(url)
data_json = r.json()
temp_df = pd.concat(
[pd.DataFrame(data_json["category"]), pd.DataFrame(data_json["data"])], axis=1
)
temp_df.columns = [
"日期",
"利润空间",
"泰国糖",
"泰国MA5",
"巴西MA5",
"利润MA5",
"巴西MA10",
"巴西糖",
"柳州现货价",
"广州现货价",
"泰国MA10",
"利润MA30",
"利润MA10",
]
temp_df.loc[988, ["泰国糖"]] = 4045.2 # 数据源错误
temp_df["日期"] = pd.to_datetime(temp_df["日期"]).dt.date
temp_df["利润空间"] = pd.to_numeric(temp_df["利润空间"])
temp_df["泰国糖"] = pd.to_numeric(temp_df["泰国糖"])
temp_df["泰国MA5"] = pd.to_numeric(temp_df["泰国MA5"])
temp_df["巴西MA5"] = pd.to_numeric(temp_df["巴西MA5"])
temp_df["巴西MA10"] = pd.to_numeric(temp_df["巴西MA10"])
temp_df["巴西糖"] = pd.to_numeric(temp_df["巴西糖"])
temp_df["柳州现货价"] = pd.to_numeric(temp_df["柳州现货价"])
temp_df["广州现货价"] = pd.to_numeric(temp_df["广州现货价"])
temp_df["泰国MA10"] = pd.to_numeric(temp_df["泰国MA10"])
temp_df["利润MA30"] = pd.to_numeric(temp_df["利润MA30"])
temp_df["利润MA10"] = pd.to_numeric(temp_df["利润MA10"])
return temp_df
| 18,285 |
akfamily/akshare
|
087025d8d6f799b30ca114013e82c1ad22dc9294
|
akshare/index/index_sugar.py
|
index_outer_quote_sugar_msweet
|
()
|
return temp_df
|
沐甜科技数据中心-配额外进口糖估算指数
http://www.msweet.com.cn/mtkj/sjzx13/index.html
:return: 配额内进口糖估算指数
:rtype: pandas.DataFrame
|
沐甜科技数据中心-配额外进口糖估算指数
http://www.msweet.com.cn/mtkj/sjzx13/index.html
:return: 配额内进口糖估算指数
:rtype: pandas.DataFrame
| 82 | 102 |
def index_outer_quote_sugar_msweet() -> pd.DataFrame:
"""
沐甜科技数据中心-配额外进口糖估算指数
http://www.msweet.com.cn/mtkj/sjzx13/index.html
:return: 配额内进口糖估算指数
:rtype: pandas.DataFrame
"""
url = "http://www.msweet.com.cn/datacenterapply/datacenter/json/Jkpewlr.json"
r = requests.get(url)
data_json = r.json()
temp_df = pd.concat(
[pd.DataFrame(data_json["category"]), pd.DataFrame(data_json["data"])], axis=1
)
temp_df.columns = ["日期", "巴西糖进口成本", "泰国糖进口利润空间", "巴西糖进口利润空间", "泰国糖进口成本", "日照现货价"]
temp_df["日期"] = pd.to_datetime(temp_df["日期"]).dt.date
temp_df["巴西糖进口成本"] = pd.to_numeric(temp_df["巴西糖进口成本"])
temp_df["泰国糖进口利润空间"] = pd.to_numeric(temp_df["泰国糖进口利润空间"], errors="coerce")
temp_df["巴西糖进口利润空间"] = pd.to_numeric(temp_df["巴西糖进口利润空间"], errors="coerce")
temp_df["泰国糖进口成本"] = pd.to_numeric(temp_df["泰国糖进口成本"])
temp_df["日照现货价"] = pd.to_numeric(temp_df["日照现货价"], errors="coerce")
return temp_df
|
https://github.com/akfamily/akshare/blob/087025d8d6f799b30ca114013e82c1ad22dc9294/project25/akshare/index/index_sugar.py#L82-L102
| 25 |
[
0,
1,
2,
3,
4,
5,
6
] | 33.333333 |
[
7,
8,
9,
10,
13,
14,
15,
16,
17,
18,
19,
20
] | 57.142857 | false | 12.5 | 21 | 1 | 42.857143 | 4 |
def index_outer_quote_sugar_msweet() -> pd.DataFrame:
url = "http://www.msweet.com.cn/datacenterapply/datacenter/json/Jkpewlr.json"
r = requests.get(url)
data_json = r.json()
temp_df = pd.concat(
[pd.DataFrame(data_json["category"]), pd.DataFrame(data_json["data"])], axis=1
)
temp_df.columns = ["日期", "巴西糖进口成本", "泰国糖进口利润空间", "巴西糖进口利润空间", "泰国糖进口成本", "日照现货价"]
temp_df["日期"] = pd.to_datetime(temp_df["日期"]).dt.date
temp_df["巴西糖进口成本"] = pd.to_numeric(temp_df["巴西糖进口成本"])
temp_df["泰国糖进口利润空间"] = pd.to_numeric(temp_df["泰国糖进口利润空间"], errors="coerce")
temp_df["巴西糖进口利润空间"] = pd.to_numeric(temp_df["巴西糖进口利润空间"], errors="coerce")
temp_df["泰国糖进口成本"] = pd.to_numeric(temp_df["泰国糖进口成本"])
temp_df["日照现货价"] = pd.to_numeric(temp_df["日照现货价"], errors="coerce")
return temp_df
| 18,286 |
akfamily/akshare
|
087025d8d6f799b30ca114013e82c1ad22dc9294
|
akshare/index/index_cons.py
|
index_stock_cons_sina
|
(symbol: str = "000300")
|
return pd.DataFrame(demjson.decode(r.text))
|
新浪新版股票指数成份页面, 目前该接口可获取指数数量较少
http://vip.stock.finance.sina.com.cn/mkt/#zhishu_000040
:param symbol: 指数代码
:type symbol: str
:return: 指数的成份股
:rtype: pandas.DataFrame
|
新浪新版股票指数成份页面, 目前该接口可获取指数数量较少
http://vip.stock.finance.sina.com.cn/mkt/#zhishu_000040
:param symbol: 指数代码
:type symbol: str
:return: 指数的成份股
:rtype: pandas.DataFrame
| 20 | 61 |
def index_stock_cons_sina(symbol: str = "000300") -> pd.DataFrame:
"""
新浪新版股票指数成份页面, 目前该接口可获取指数数量较少
http://vip.stock.finance.sina.com.cn/mkt/#zhishu_000040
:param symbol: 指数代码
:type symbol: str
:return: 指数的成份股
:rtype: pandas.DataFrame
"""
if symbol == "000300":
symbol = "hs300"
url = "http://vip.stock.finance.sina.com.cn/quotes_service/api/json_v2.php/Market_Center.getHQNodeStockCountSimple"
params = {"node": f"{symbol}"}
r = requests.get(url, params=params)
page_num = math.ceil(int(r.json()) / 80) + 1
temp_df = pd.DataFrame()
for page in range(1, page_num):
url = "http://vip.stock.finance.sina.com.cn/quotes_service/api/json_v2.php/Market_Center.getHQNodeData"
params = {
"page": str(page),
"num": "80",
"sort": "symbol",
"asc": "1",
"node": "hs300",
"symbol": "",
"_s_r_a": "init",
}
r = requests.get(url, params=params)
temp_df = pd.concat([temp_df, pd.DataFrame(demjson.decode(r.text))], ignore_index=True)
return temp_df
url = "http://vip.stock.finance.sina.com.cn/quotes_service/api/json_v2.php/Market_Center.getHQNodeDataSimple"
params = {
"page": 1,
"num": "3000",
"sort": "symbol",
"asc": "1",
"node": f"zhishu_{symbol}",
"_s_r_a": "setlen",
}
r = requests.get(url, params=params)
return pd.DataFrame(demjson.decode(r.text))
|
https://github.com/akfamily/akshare/blob/087025d8d6f799b30ca114013e82c1ad22dc9294/project25/akshare/index/index_cons.py#L20-L61
| 25 |
[
0,
1,
2,
3,
4,
5,
6,
7,
8
] | 21.428571 |
[
9,
10,
11,
12,
13,
14,
15,
16,
17,
18,
27,
28,
29,
31,
32,
40,
41
] | 40.47619 | false | 15.09434 | 42 | 3 | 59.52381 | 6 |
def index_stock_cons_sina(symbol: str = "000300") -> pd.DataFrame:
if symbol == "000300":
symbol = "hs300"
url = "http://vip.stock.finance.sina.com.cn/quotes_service/api/json_v2.php/Market_Center.getHQNodeStockCountSimple"
params = {"node": f"{symbol}"}
r = requests.get(url, params=params)
page_num = math.ceil(int(r.json()) / 80) + 1
temp_df = pd.DataFrame()
for page in range(1, page_num):
url = "http://vip.stock.finance.sina.com.cn/quotes_service/api/json_v2.php/Market_Center.getHQNodeData"
params = {
"page": str(page),
"num": "80",
"sort": "symbol",
"asc": "1",
"node": "hs300",
"symbol": "",
"_s_r_a": "init",
}
r = requests.get(url, params=params)
temp_df = pd.concat([temp_df, pd.DataFrame(demjson.decode(r.text))], ignore_index=True)
return temp_df
url = "http://vip.stock.finance.sina.com.cn/quotes_service/api/json_v2.php/Market_Center.getHQNodeDataSimple"
params = {
"page": 1,
"num": "3000",
"sort": "symbol",
"asc": "1",
"node": f"zhishu_{symbol}",
"_s_r_a": "setlen",
}
r = requests.get(url, params=params)
return pd.DataFrame(demjson.decode(r.text))
| 18,287 |
akfamily/akshare
|
087025d8d6f799b30ca114013e82c1ad22dc9294
|
akshare/index/index_cons.py
|
index_stock_info
|
()
|
return index_df[["index_code", "display_name", "publish_date"]]
|
聚宽-指数数据-指数列表
https://www.joinquant.com/data/dict/indexData
:return: 指数信息的数据框
:rtype: pandas.DataFrame
|
聚宽-指数数据-指数列表
https://www.joinquant.com/data/dict/indexData
:return: 指数信息的数据框
:rtype: pandas.DataFrame
| 64 | 74 |
def index_stock_info() -> pd.DataFrame:
"""
聚宽-指数数据-指数列表
https://www.joinquant.com/data/dict/indexData
:return: 指数信息的数据框
:rtype: pandas.DataFrame
"""
index_df = pd.read_html("https://www.joinquant.com/data/dict/indexData")[0]
index_df["指数代码"] = index_df["指数代码"].str.split(".", expand=True)[0]
index_df.columns = ["index_code", "display_name", "publish_date", "-", "-"]
return index_df[["index_code", "display_name", "publish_date"]]
|
https://github.com/akfamily/akshare/blob/087025d8d6f799b30ca114013e82c1ad22dc9294/project25/akshare/index/index_cons.py#L64-L74
| 25 |
[
0,
1,
2,
3,
4,
5,
6
] | 63.636364 |
[
7,
8,
9,
10
] | 36.363636 | false | 15.09434 | 11 | 1 | 63.636364 | 4 |
def index_stock_info() -> pd.DataFrame:
index_df = pd.read_html("https://www.joinquant.com/data/dict/indexData")[0]
index_df["指数代码"] = index_df["指数代码"].str.split(".", expand=True)[0]
index_df.columns = ["index_code", "display_name", "publish_date", "-", "-"]
return index_df[["index_code", "display_name", "publish_date"]]
| 18,288 |
akfamily/akshare
|
087025d8d6f799b30ca114013e82c1ad22dc9294
|
akshare/index/index_cons.py
|
index_stock_cons
|
(symbol: str = "399639")
|
return temp_df
|
最新股票指数的成份股目录
http://vip.stock.finance.sina.com.cn/corp/view/vII_NewestComponent.php?page=1&indexid=399639
:param symbol: 指数代码, 可以通过 ak.index_stock_info() 函数获取
:type symbol: str
:return: 最新股票指数的成份股目录
:rtype: pandas.DataFrame
|
最新股票指数的成份股目录
http://vip.stock.finance.sina.com.cn/corp/view/vII_NewestComponent.php?page=1&indexid=399639
:param symbol: 指数代码, 可以通过 ak.index_stock_info() 函数获取
:type symbol: str
:return: 最新股票指数的成份股目录
:rtype: pandas.DataFrame
| 77 | 110 |
def index_stock_cons(symbol: str = "399639") -> pd.DataFrame:
"""
最新股票指数的成份股目录
http://vip.stock.finance.sina.com.cn/corp/view/vII_NewestComponent.php?page=1&indexid=399639
:param symbol: 指数代码, 可以通过 ak.index_stock_info() 函数获取
:type symbol: str
:return: 最新股票指数的成份股目录
:rtype: pandas.DataFrame
"""
url = f"http://vip.stock.finance.sina.com.cn/corp/go.php/vII_NewestComponent/indexid/{symbol}.phtml"
r = requests.get(url)
r.encoding = "gb2312"
soup = BeautifulSoup(r.text, "lxml")
page_num = (
soup.find(attrs={"class": "table2"})
.find("td")
.find_all("a")[-1]["href"]
.split("page=")[-1]
.split("&")[0]
)
if page_num == "#":
temp_df = pd.read_html(r.text, header=1)[3].iloc[:, :3]
temp_df["品种代码"] = temp_df["品种代码"].astype(str).str.zfill(6)
return temp_df
temp_df = pd.DataFrame()
for page in range(1, int(page_num) + 1):
url = f"http://vip.stock.finance.sina.com.cn/corp/view/vII_NewestComponent.php?page={page}&indexid={symbol}"
r = requests.get(url)
r.encoding = "gb2312"
temp_df = pd.concat([temp_df, pd.read_html(r.text, header=1)[3]], ignore_index=True)
temp_df = temp_df.iloc[:, :3]
temp_df["品种代码"] = temp_df["品种代码"].astype(str).str.zfill(6)
return temp_df
|
https://github.com/akfamily/akshare/blob/087025d8d6f799b30ca114013e82c1ad22dc9294/project25/akshare/index/index_cons.py#L77-L110
| 25 |
[
0,
1,
2,
3,
4,
5,
6,
7,
8
] | 26.470588 |
[
9,
10,
11,
12,
13,
20,
21,
22,
23,
25,
26,
27,
28,
29,
30,
31,
32,
33
] | 52.941176 | false | 15.09434 | 34 | 3 | 47.058824 | 6 |
def index_stock_cons(symbol: str = "399639") -> pd.DataFrame:
url = f"http://vip.stock.finance.sina.com.cn/corp/go.php/vII_NewestComponent/indexid/{symbol}.phtml"
r = requests.get(url)
r.encoding = "gb2312"
soup = BeautifulSoup(r.text, "lxml")
page_num = (
soup.find(attrs={"class": "table2"})
.find("td")
.find_all("a")[-1]["href"]
.split("page=")[-1]
.split("&")[0]
)
if page_num == "#":
temp_df = pd.read_html(r.text, header=1)[3].iloc[:, :3]
temp_df["品种代码"] = temp_df["品种代码"].astype(str).str.zfill(6)
return temp_df
temp_df = pd.DataFrame()
for page in range(1, int(page_num) + 1):
url = f"http://vip.stock.finance.sina.com.cn/corp/view/vII_NewestComponent.php?page={page}&indexid={symbol}"
r = requests.get(url)
r.encoding = "gb2312"
temp_df = pd.concat([temp_df, pd.read_html(r.text, header=1)[3]], ignore_index=True)
temp_df = temp_df.iloc[:, :3]
temp_df["品种代码"] = temp_df["品种代码"].astype(str).str.zfill(6)
return temp_df
| 18,289 |
akfamily/akshare
|
087025d8d6f799b30ca114013e82c1ad22dc9294
|
akshare/index/index_cons.py
|
index_stock_cons_csindex
|
(symbol: str = "000300")
|
return temp_df
|
中证指数网站-成份股目录
http://www.csindex.com.cn/zh-CN/indices/index-detail/000300
:param symbol: 指数代码, 可以通过 ak.index_stock_info() 函数获取
:type symbol: str
:return: 最新指数的成份股
:rtype: pandas.DataFrame
|
中证指数网站-成份股目录
http://www.csindex.com.cn/zh-CN/indices/index-detail/000300
:param symbol: 指数代码, 可以通过 ak.index_stock_info() 函数获取
:type symbol: str
:return: 最新指数的成份股
:rtype: pandas.DataFrame
| 113 | 139 |
def index_stock_cons_csindex(symbol: str = "000300") -> pd.DataFrame:
"""
中证指数网站-成份股目录
http://www.csindex.com.cn/zh-CN/indices/index-detail/000300
:param symbol: 指数代码, 可以通过 ak.index_stock_info() 函数获取
:type symbol: str
:return: 最新指数的成份股
:rtype: pandas.DataFrame
"""
url = f"https://csi-web-dev.oss-cn-shanghai-finance-1-pub.aliyuncs.com/static/html/csindex/public/uploads/file/autofile/cons/{symbol}cons.xls"
r = requests.get(url)
temp_df = pd.read_excel(BytesIO(r.content))
temp_df.columns = [
"日期",
"指数代码",
"指数名称",
"指数英文名称",
"成分券代码",
"成分券名称",
"成分券英文名称",
"交易所",
"交易所英文名称",
]
temp_df['日期'] = pd.to_datetime(temp_df['日期'], format="%Y%m%d").dt.date
temp_df["指数代码"] = temp_df["指数代码"].astype(str).str.zfill(6)
temp_df["成分券代码"] = temp_df["成分券代码"].astype(str).str.zfill(6)
return temp_df
|
https://github.com/akfamily/akshare/blob/087025d8d6f799b30ca114013e82c1ad22dc9294/project25/akshare/index/index_cons.py#L113-L139
| 25 |
[
0,
1,
2,
3,
4,
5,
6,
7,
8
] | 33.333333 |
[
9,
10,
11,
12,
23,
24,
25,
26
] | 29.62963 | false | 15.09434 | 27 | 1 | 70.37037 | 6 |
def index_stock_cons_csindex(symbol: str = "000300") -> pd.DataFrame:
url = f"https://csi-web-dev.oss-cn-shanghai-finance-1-pub.aliyuncs.com/static/html/csindex/public/uploads/file/autofile/cons/{symbol}cons.xls"
r = requests.get(url)
temp_df = pd.read_excel(BytesIO(r.content))
temp_df.columns = [
"日期",
"指数代码",
"指数名称",
"指数英文名称",
"成分券代码",
"成分券名称",
"成分券英文名称",
"交易所",
"交易所英文名称",
]
temp_df['日期'] = pd.to_datetime(temp_df['日期'], format="%Y%m%d").dt.date
temp_df["指数代码"] = temp_df["指数代码"].astype(str).str.zfill(6)
temp_df["成分券代码"] = temp_df["成分券代码"].astype(str).str.zfill(6)
return temp_df
| 18,290 |
akfamily/akshare
|
087025d8d6f799b30ca114013e82c1ad22dc9294
|
akshare/index/index_cons.py
|
index_stock_cons_weight_csindex
|
(symbol: str = "000300")
|
return temp_df
|
中证指数网站-样本权重
http://www.csindex.com.cn/zh-CN/indices/index-detail/000300
:param symbol: 指数代码, 可以通过 ak.index_stock_info() 接口获取
:type symbol: str
:return: 最新指数的成份股权重
:rtype: pandas.DataFrame
|
中证指数网站-样本权重
http://www.csindex.com.cn/zh-CN/indices/index-detail/000300
:param symbol: 指数代码, 可以通过 ak.index_stock_info() 接口获取
:type symbol: str
:return: 最新指数的成份股权重
:rtype: pandas.DataFrame
| 142 | 170 |
def index_stock_cons_weight_csindex(symbol: str = "000300") -> pd.DataFrame:
"""
中证指数网站-样本权重
http://www.csindex.com.cn/zh-CN/indices/index-detail/000300
:param symbol: 指数代码, 可以通过 ak.index_stock_info() 接口获取
:type symbol: str
:return: 最新指数的成份股权重
:rtype: pandas.DataFrame
"""
url = f"https://csi-web-dev.oss-cn-shanghai-finance-1-pub.aliyuncs.com/static/html/csindex/public/uploads/file/autofile/closeweight/{symbol}closeweight.xls"
r = requests.get(url)
temp_df = pd.read_excel(BytesIO(r.content))
temp_df.columns = [
"日期",
"指数代码",
"指数名称",
"指数英文名称",
"成分券代码",
"成分券名称",
"成分券英文名称",
"交易所",
"交易所英文名称",
"权重",
]
temp_df['日期'] = pd.to_datetime(temp_df['日期'], format="%Y%m%d").dt.date
temp_df["指数代码"] = temp_df["指数代码"].astype(str).str.zfill(6)
temp_df["成分券代码"] = temp_df["成分券代码"].astype(str).str.zfill(6)
temp_df['权重'] = pd.to_numeric(temp_df['权重'])
return temp_df
|
https://github.com/akfamily/akshare/blob/087025d8d6f799b30ca114013e82c1ad22dc9294/project25/akshare/index/index_cons.py#L142-L170
| 25 |
[
0,
1,
2,
3,
4,
5,
6,
7,
8
] | 31.034483 |
[
9,
10,
11,
12,
24,
25,
26,
27,
28
] | 31.034483 | false | 15.09434 | 29 | 1 | 68.965517 | 6 |
def index_stock_cons_weight_csindex(symbol: str = "000300") -> pd.DataFrame:
url = f"https://csi-web-dev.oss-cn-shanghai-finance-1-pub.aliyuncs.com/static/html/csindex/public/uploads/file/autofile/closeweight/{symbol}closeweight.xls"
r = requests.get(url)
temp_df = pd.read_excel(BytesIO(r.content))
temp_df.columns = [
"日期",
"指数代码",
"指数名称",
"指数英文名称",
"成分券代码",
"成分券名称",
"成分券英文名称",
"交易所",
"交易所英文名称",
"权重",
]
temp_df['日期'] = pd.to_datetime(temp_df['日期'], format="%Y%m%d").dt.date
temp_df["指数代码"] = temp_df["指数代码"].astype(str).str.zfill(6)
temp_df["成分券代码"] = temp_df["成分券代码"].astype(str).str.zfill(6)
temp_df['权重'] = pd.to_numeric(temp_df['权重'])
return temp_df
| 18,291 |
akfamily/akshare
|
087025d8d6f799b30ca114013e82c1ad22dc9294
|
akshare/index/index_cons.py
|
index_stock_hist
|
(symbol: str = "sh000300")
|
return temp_df
|
指数历史成份, 从 2005 年开始
http://stock.jrj.com.cn/share,sh000300,2015nlscf_2.shtml
:param symbol: 指数代码, 需要带市场前缀
:type symbol: str
:return: 历史成份的进入和退出数据
:rtype: pandas.DataFrame
|
指数历史成份, 从 2005 年开始
http://stock.jrj.com.cn/share,sh000300,2015nlscf_2.shtml
:param symbol: 指数代码, 需要带市场前缀
:type symbol: str
:return: 历史成份的进入和退出数据
:rtype: pandas.DataFrame
| 173 | 202 |
def index_stock_hist(symbol: str = "sh000300") -> pd.DataFrame:
"""
指数历史成份, 从 2005 年开始
http://stock.jrj.com.cn/share,sh000300,2015nlscf_2.shtml
:param symbol: 指数代码, 需要带市场前缀
:type symbol: str
:return: 历史成份的进入和退出数据
:rtype: pandas.DataFrame
"""
url = f"http://stock.jrj.com.cn/share,{symbol},2015nlscf.shtml"
r = requests.get(url)
r.encoding = "gb2312"
soup = BeautifulSoup(r.text, "lxml")
last_page_num = soup.find_all("a", attrs={"target": "_self"})[-2].text
temp_df = pd.read_html(r.text)[-1]
if last_page_num == "历史成份":
temp_df["股票代码"] = temp_df["股票代码"].astype(str).str.zfill(6)
del temp_df["股票名称"]
temp_df.columns = ["stock_code", "in_date", "out_date"]
return temp_df
for page in tqdm(range(2, int(last_page_num) + 1), leave=False):
url = f"http://stock.jrj.com.cn/share,{symbol},2015nlscf_{page}.shtml"
r = requests.get(url)
r.encoding = "gb2312"
inner_temp_df = pd.read_html(r.text)[-1]
temp_df = pd.concat([temp_df, inner_temp_df], ignore_index=True)
temp_df["股票代码"] = temp_df["股票代码"].astype(str).str.zfill(6)
del temp_df["股票名称"]
temp_df.columns = ["stock_code", "in_date", "out_date"]
return temp_df
|
https://github.com/akfamily/akshare/blob/087025d8d6f799b30ca114013e82c1ad22dc9294/project25/akshare/index/index_cons.py#L173-L202
| 25 |
[
0,
1,
2,
3,
4,
5,
6,
7,
8
] | 30 |
[
9,
10,
11,
12,
13,
14,
15,
16,
17,
18,
19,
20,
21,
22,
23,
24,
25,
26,
27,
28,
29
] | 70 | false | 15.09434 | 30 | 3 | 30 | 6 |
def index_stock_hist(symbol: str = "sh000300") -> pd.DataFrame:
url = f"http://stock.jrj.com.cn/share,{symbol},2015nlscf.shtml"
r = requests.get(url)
r.encoding = "gb2312"
soup = BeautifulSoup(r.text, "lxml")
last_page_num = soup.find_all("a", attrs={"target": "_self"})[-2].text
temp_df = pd.read_html(r.text)[-1]
if last_page_num == "历史成份":
temp_df["股票代码"] = temp_df["股票代码"].astype(str).str.zfill(6)
del temp_df["股票名称"]
temp_df.columns = ["stock_code", "in_date", "out_date"]
return temp_df
for page in tqdm(range(2, int(last_page_num) + 1), leave=False):
url = f"http://stock.jrj.com.cn/share,{symbol},2015nlscf_{page}.shtml"
r = requests.get(url)
r.encoding = "gb2312"
inner_temp_df = pd.read_html(r.text)[-1]
temp_df = pd.concat([temp_df, inner_temp_df], ignore_index=True)
temp_df["股票代码"] = temp_df["股票代码"].astype(str).str.zfill(6)
del temp_df["股票名称"]
temp_df.columns = ["stock_code", "in_date", "out_date"]
return temp_df
| 18,292 |
akfamily/akshare
|
087025d8d6f799b30ca114013e82c1ad22dc9294
|
akshare/index/index_cons.py
|
stock_a_code_to_symbol
|
(symbol: str = "000300")
|
输入股票代码判断股票市场
:param symbol: 股票代码
:type symbol: str
:return: 股票市场
:rtype: str
|
输入股票代码判断股票市场
:param symbol: 股票代码
:type symbol: str
:return: 股票市场
:rtype: str
| 205 | 216 |
def stock_a_code_to_symbol(symbol: str = "000300") -> str:
"""
输入股票代码判断股票市场
:param symbol: 股票代码
:type symbol: str
:return: 股票市场
:rtype: str
"""
if symbol.startswith("6") or symbol.startswith("900"):
return f"sh{symbol}"
else:
return f"sz{symbol}"
|
https://github.com/akfamily/akshare/blob/087025d8d6f799b30ca114013e82c1ad22dc9294/project25/akshare/index/index_cons.py#L205-L216
| 25 |
[
0,
1,
2,
3,
4,
5,
6,
7
] | 66.666667 |
[
8,
9,
11
] | 25 | false | 15.09434 | 12 | 3 | 75 | 5 |
def stock_a_code_to_symbol(symbol: str = "000300") -> str:
if symbol.startswith("6") or symbol.startswith("900"):
return f"sh{symbol}"
else:
return f"sz{symbol}"
| 18,293 |
|
akfamily/akshare
|
087025d8d6f799b30ca114013e82c1ad22dc9294
|
akshare/index/index_stock_zh.py
|
_replace_comma
|
(x)
|
去除单元格中的 ","
:param x: 单元格元素
:type x: str
:return: 处理后的值或原值
:rtype: str
|
去除单元格中的 ","
:param x: 单元格元素
:type x: str
:return: 处理后的值或原值
:rtype: str
| 27 | 38 |
def _replace_comma(x):
"""
去除单元格中的 ","
:param x: 单元格元素
:type x: str
:return: 处理后的值或原值
:rtype: str
"""
if "," in str(x):
return str(x).replace(",", "")
else:
return x
|
https://github.com/akfamily/akshare/blob/087025d8d6f799b30ca114013e82c1ad22dc9294/project25/akshare/index/index_stock_zh.py#L27-L38
| 25 |
[
0,
1,
2,
3,
4,
5,
6,
7
] | 66.666667 |
[
8,
9,
11
] | 25 | false | 14.634146 | 12 | 2 | 75 | 5 |
def _replace_comma(x):
if "," in str(x):
return str(x).replace(",", "")
else:
return x
| 18,294 |
|
akfamily/akshare
|
087025d8d6f799b30ca114013e82c1ad22dc9294
|
akshare/index/index_stock_zh.py
|
get_zh_index_page_count
|
()
|
指数的总页数
http://vip.stock.finance.sina.com.cn/mkt/#hs_s
:return: 需要抓取的指数的总页数
:rtype: int
|
指数的总页数
http://vip.stock.finance.sina.com.cn/mkt/#hs_s
:return: 需要抓取的指数的总页数
:rtype: int
| 41 | 53 |
def get_zh_index_page_count() -> int:
"""
指数的总页数
http://vip.stock.finance.sina.com.cn/mkt/#hs_s
:return: 需要抓取的指数的总页数
:rtype: int
"""
res = requests.get(zh_sina_index_stock_count_url)
page_count = int(re.findall(re.compile(r"\d+"), res.text)[0]) / 80
if isinstance(page_count, int):
return page_count
else:
return int(page_count) + 1
|
https://github.com/akfamily/akshare/blob/087025d8d6f799b30ca114013e82c1ad22dc9294/project25/akshare/index/index_stock_zh.py#L41-L53
| 25 |
[
0,
1,
2,
3,
4,
5,
6
] | 53.846154 |
[
7,
8,
9,
10,
12
] | 38.461538 | false | 14.634146 | 13 | 2 | 61.538462 | 4 |
def get_zh_index_page_count() -> int:
res = requests.get(zh_sina_index_stock_count_url)
page_count = int(re.findall(re.compile(r"\d+"), res.text)[0]) / 80
if isinstance(page_count, int):
return page_count
else:
return int(page_count) + 1
| 18,295 |
|
akfamily/akshare
|
087025d8d6f799b30ca114013e82c1ad22dc9294
|
akshare/index/index_stock_zh.py
|
stock_zh_index_spot
|
()
|
return big_df
|
新浪财经-行情中心首页-A股-分类-所有指数
大量采集会被目标网站服务器封禁 IP, 如果被封禁 IP, 请 10 分钟后再试
http://vip.stock.finance.sina.com.cn/mkt/#hs_s
:return: 所有指数的实时行情数据
:rtype: pandas.DataFrame
|
新浪财经-行情中心首页-A股-分类-所有指数
大量采集会被目标网站服务器封禁 IP, 如果被封禁 IP, 请 10 分钟后再试
http://vip.stock.finance.sina.com.cn/mkt/#hs_s
:return: 所有指数的实时行情数据
:rtype: pandas.DataFrame
| 56 | 115 |
def stock_zh_index_spot() -> pd.DataFrame:
"""
新浪财经-行情中心首页-A股-分类-所有指数
大量采集会被目标网站服务器封禁 IP, 如果被封禁 IP, 请 10 分钟后再试
http://vip.stock.finance.sina.com.cn/mkt/#hs_s
:return: 所有指数的实时行情数据
:rtype: pandas.DataFrame
"""
big_df = pd.DataFrame()
page_count = get_zh_index_page_count()
zh_sina_stock_payload_copy = zh_sina_index_stock_payload.copy()
for page in tqdm(range(1, page_count + 1), leave=False):
zh_sina_stock_payload_copy.update({"page": page})
res = requests.get(zh_sina_index_stock_url, params=zh_sina_stock_payload_copy)
data_json = demjson.decode(res.text)
big_df = pd.concat([big_df, pd.DataFrame(data_json)], ignore_index=True)
big_df = big_df.applymap(_replace_comma)
big_df["trade"] = big_df["trade"].astype(float)
big_df["pricechange"] = big_df["pricechange"].astype(float)
big_df["changepercent"] = big_df["changepercent"].astype(float)
big_df["buy"] = big_df["buy"].astype(float)
big_df["sell"] = big_df["sell"].astype(float)
big_df["settlement"] = big_df["settlement"].astype(float)
big_df["open"] = big_df["open"].astype(float)
big_df["high"] = big_df["high"].astype(float)
big_df["low"] = big_df["low"].astype(float)
big_df.columns = [
"代码",
"名称",
"最新价",
"涨跌额",
"涨跌幅",
"_",
"_",
"昨收",
"今开",
"最高",
"最低",
"成交量",
"成交额",
"_",
"_",
]
big_df = big_df[
[
"代码",
"名称",
"最新价",
"涨跌额",
"涨跌幅",
"昨收",
"今开",
"最高",
"最低",
"成交量",
"成交额",
]
]
return big_df
|
https://github.com/akfamily/akshare/blob/087025d8d6f799b30ca114013e82c1ad22dc9294/project25/akshare/index/index_stock_zh.py#L56-L115
| 25 |
[
0,
1,
2,
3,
4,
5,
6,
7
] | 13.333333 |
[
8,
9,
10,
11,
12,
13,
14,
15,
17,
18,
19,
20,
21,
22,
23,
24,
25,
26,
27,
44,
59
] | 35 | false | 14.634146 | 60 | 2 | 65 | 5 |
def stock_zh_index_spot() -> pd.DataFrame:
big_df = pd.DataFrame()
page_count = get_zh_index_page_count()
zh_sina_stock_payload_copy = zh_sina_index_stock_payload.copy()
for page in tqdm(range(1, page_count + 1), leave=False):
zh_sina_stock_payload_copy.update({"page": page})
res = requests.get(zh_sina_index_stock_url, params=zh_sina_stock_payload_copy)
data_json = demjson.decode(res.text)
big_df = pd.concat([big_df, pd.DataFrame(data_json)], ignore_index=True)
big_df = big_df.applymap(_replace_comma)
big_df["trade"] = big_df["trade"].astype(float)
big_df["pricechange"] = big_df["pricechange"].astype(float)
big_df["changepercent"] = big_df["changepercent"].astype(float)
big_df["buy"] = big_df["buy"].astype(float)
big_df["sell"] = big_df["sell"].astype(float)
big_df["settlement"] = big_df["settlement"].astype(float)
big_df["open"] = big_df["open"].astype(float)
big_df["high"] = big_df["high"].astype(float)
big_df["low"] = big_df["low"].astype(float)
big_df.columns = [
"代码",
"名称",
"最新价",
"涨跌额",
"涨跌幅",
"_",
"_",
"昨收",
"今开",
"最高",
"最低",
"成交量",
"成交额",
"_",
"_",
]
big_df = big_df[
[
"代码",
"名称",
"最新价",
"涨跌额",
"涨跌幅",
"昨收",
"今开",
"最高",
"最低",
"成交量",
"成交额",
]
]
return big_df
| 18,296 |
akfamily/akshare
|
087025d8d6f799b30ca114013e82c1ad22dc9294
|
akshare/index/index_stock_zh.py
|
stock_zh_index_daily
|
(symbol: str = "sh000922")
|
return temp_df
|
新浪财经-指数-历史行情数据, 大量抓取容易封 IP
https://finance.sina.com.cn/realstock/company/sh000909/nc.shtml
:param symbol: sz399998, 指定指数代码
:type symbol: str
:return: 历史行情数据
:rtype: pandas.DataFrame
|
新浪财经-指数-历史行情数据, 大量抓取容易封 IP
https://finance.sina.com.cn/realstock/company/sh000909/nc.shtml
:param symbol: sz399998, 指定指数代码
:type symbol: str
:return: 历史行情数据
:rtype: pandas.DataFrame
| 118 | 141 |
def stock_zh_index_daily(symbol: str = "sh000922") -> pd.DataFrame:
"""
新浪财经-指数-历史行情数据, 大量抓取容易封 IP
https://finance.sina.com.cn/realstock/company/sh000909/nc.shtml
:param symbol: sz399998, 指定指数代码
:type symbol: str
:return: 历史行情数据
:rtype: pandas.DataFrame
"""
params = {"d": "2020_2_4"}
res = requests.get(zh_sina_index_stock_hist_url.format(symbol), params=params)
js_code = py_mini_racer.MiniRacer()
js_code.eval(hk_js_decode)
dict_list = js_code.call(
"d", res.text.split("=")[1].split(";")[0].replace('"', "")
) # 执行js解密代码
temp_df = pd.DataFrame(dict_list)
temp_df["date"] = pd.to_datetime(temp_df["date"]).dt.date
temp_df["open"] = pd.to_numeric(temp_df["open"])
temp_df["close"] = pd.to_numeric(temp_df["close"])
temp_df["high"] = pd.to_numeric(temp_df["high"])
temp_df["low"] = pd.to_numeric(temp_df["low"])
temp_df["volume"] = pd.to_numeric(temp_df["volume"])
return temp_df
|
https://github.com/akfamily/akshare/blob/087025d8d6f799b30ca114013e82c1ad22dc9294/project25/akshare/index/index_stock_zh.py#L118-L141
| 25 |
[
0,
1,
2,
3,
4,
5,
6,
7,
8
] | 37.5 |
[
9,
10,
11,
12,
13,
16,
17,
18,
19,
20,
21,
22,
23
] | 54.166667 | false | 14.634146 | 24 | 1 | 45.833333 | 6 |
def stock_zh_index_daily(symbol: str = "sh000922") -> pd.DataFrame:
params = {"d": "2020_2_4"}
res = requests.get(zh_sina_index_stock_hist_url.format(symbol), params=params)
js_code = py_mini_racer.MiniRacer()
js_code.eval(hk_js_decode)
dict_list = js_code.call(
"d", res.text.split("=")[1].split(";")[0].replace('"', "")
) # 执行js解密代码
temp_df = pd.DataFrame(dict_list)
temp_df["date"] = pd.to_datetime(temp_df["date"]).dt.date
temp_df["open"] = pd.to_numeric(temp_df["open"])
temp_df["close"] = pd.to_numeric(temp_df["close"])
temp_df["high"] = pd.to_numeric(temp_df["high"])
temp_df["low"] = pd.to_numeric(temp_df["low"])
temp_df["volume"] = pd.to_numeric(temp_df["volume"])
return temp_df
| 18,297 |
akfamily/akshare
|
087025d8d6f799b30ca114013e82c1ad22dc9294
|
akshare/index/index_stock_zh.py
|
_get_tx_start_year
|
(symbol: str = "sh000919")
|
return start_date
|
腾讯证券-获取所有股票数据的第一天, 注意这个数据是腾讯证券的历史数据第一天
http://gu.qq.com/sh000919/zs
:param symbol: 带市场标识的股票代码
:type symbol: str
:return: 开始日期
:rtype: pandas.DataFrame
|
腾讯证券-获取所有股票数据的第一天, 注意这个数据是腾讯证券的历史数据第一天
http://gu.qq.com/sh000919/zs
:param symbol: 带市场标识的股票代码
:type symbol: str
:return: 开始日期
:rtype: pandas.DataFrame
| 144 | 176 |
def _get_tx_start_year(symbol: str = "sh000919") -> pd.DataFrame:
"""
腾讯证券-获取所有股票数据的第一天, 注意这个数据是腾讯证券的历史数据第一天
http://gu.qq.com/sh000919/zs
:param symbol: 带市场标识的股票代码
:type symbol: str
:return: 开始日期
:rtype: pandas.DataFrame
"""
url = "http://web.ifzq.gtimg.cn/other/klineweb/klineWeb/weekTrends"
params = {
"code": symbol,
"type": "qfq",
"_var": "trend_qfq",
"r": "0.3506048543943414",
}
r = requests.get(url, params=params)
data_text = r.text
if not demjson.decode(data_text[data_text.find("={") + 1 :])["data"]:
url = "https://proxy.finance.qq.com/ifzqgtimg/appstock/app/newfqkline/get"
params = {
"_var": "kline_dayqfq",
"param": f"{symbol},day,,,320,qfq",
"r": "0.751892490072597",
}
r = requests.get(url, params=params)
data_text = r.text
start_date = demjson.decode(data_text[data_text.find("={") + 1 :])["data"][
symbol
]["day"][0][0]
return start_date
start_date = demjson.decode(data_text[data_text.find("={") + 1 :])["data"][0][0]
return start_date
|
https://github.com/akfamily/akshare/blob/087025d8d6f799b30ca114013e82c1ad22dc9294/project25/akshare/index/index_stock_zh.py#L144-L176
| 25 |
[
0,
1,
2,
3,
4,
5,
6,
7,
8
] | 27.272727 |
[
9,
10,
16,
17,
18,
19,
20,
25,
26,
27,
30,
31,
32
] | 39.393939 | false | 14.634146 | 33 | 2 | 60.606061 | 6 |
def _get_tx_start_year(symbol: str = "sh000919") -> pd.DataFrame:
url = "http://web.ifzq.gtimg.cn/other/klineweb/klineWeb/weekTrends"
params = {
"code": symbol,
"type": "qfq",
"_var": "trend_qfq",
"r": "0.3506048543943414",
}
r = requests.get(url, params=params)
data_text = r.text
if not demjson.decode(data_text[data_text.find("={") + 1 :])["data"]:
url = "https://proxy.finance.qq.com/ifzqgtimg/appstock/app/newfqkline/get"
params = {
"_var": "kline_dayqfq",
"param": f"{symbol},day,,,320,qfq",
"r": "0.751892490072597",
}
r = requests.get(url, params=params)
data_text = r.text
start_date = demjson.decode(data_text[data_text.find("={") + 1 :])["data"][
symbol
]["day"][0][0]
return start_date
start_date = demjson.decode(data_text[data_text.find("={") + 1 :])["data"][0][0]
return start_date
| 18,298 |
akfamily/akshare
|
087025d8d6f799b30ca114013e82c1ad22dc9294
|
akshare/index/index_stock_zh.py
|
stock_zh_index_daily_tx
|
(symbol: str = "sz980017")
|
return temp_df
|
腾讯证券-日频-股票或者指数历史数据
作为 stock_zh_index_daily 的补充, 因为在新浪中有部分指数数据缺失
注意都是: 前复权, 不同网站复权方式不同, 不可混用数据
http://gu.qq.com/sh000919/zs
:param symbol: 带市场标识的股票或者指数代码
:type symbol: str
:return: 前复权的股票和指数数据
:rtype: pandas.DataFrame
|
腾讯证券-日频-股票或者指数历史数据
作为 stock_zh_index_daily 的补充, 因为在新浪中有部分指数数据缺失
注意都是: 前复权, 不同网站复权方式不同, 不可混用数据
http://gu.qq.com/sh000919/zs
:param symbol: 带市场标识的股票或者指数代码
:type symbol: str
:return: 前复权的股票和指数数据
:rtype: pandas.DataFrame
| 179 | 224 |
def stock_zh_index_daily_tx(symbol: str = "sz980017") -> pd.DataFrame:
"""
腾讯证券-日频-股票或者指数历史数据
作为 stock_zh_index_daily 的补充, 因为在新浪中有部分指数数据缺失
注意都是: 前复权, 不同网站复权方式不同, 不可混用数据
http://gu.qq.com/sh000919/zs
:param symbol: 带市场标识的股票或者指数代码
:type symbol: str
:return: 前复权的股票和指数数据
:rtype: pandas.DataFrame
"""
start_date = _get_tx_start_year(symbol=symbol)
url = "https://proxy.finance.qq.com/ifzqgtimg/appstock/app/newfqkline/get"
range_start = int(start_date.split("-")[0])
range_end = datetime.date.today().year + 1
temp_df = pd.DataFrame()
for year in tqdm(range(range_start, range_end)):
params = {
"_var": "kline_dayqfq",
"param": f"{symbol},day,{year}-01-01,{year + 1}-12-31,640,qfq",
"r": "0.8205512681390605",
}
res = requests.get(url, params=params)
text = res.text
try:
inner_temp_df = pd.DataFrame(
demjson.decode(text[text.find("={") + 1 :])["data"][symbol]["day"]
)
except:
inner_temp_df = pd.DataFrame(
demjson.decode(text[text.find("={") + 1 :])["data"][symbol]["qfqday"]
)
temp_df = pd.concat([temp_df, inner_temp_df], ignore_index=True)
if temp_df.shape[1] == 6:
temp_df.columns = ["date", "open", "close", "high", "low", "amount"]
else:
temp_df = temp_df.iloc[:, :6]
temp_df.columns = ["date", "open", "close", "high", "low", "amount"]
temp_df["date"] = pd.to_datetime(temp_df["date"]).dt.date
temp_df["open"] = pd.to_numeric(temp_df["open"])
temp_df["close"] = pd.to_numeric(temp_df["close"])
temp_df["high"] = pd.to_numeric(temp_df["high"])
temp_df["low"] = pd.to_numeric(temp_df["low"])
temp_df["amount"] = pd.to_numeric(temp_df["amount"])
temp_df.drop_duplicates(inplace=True, ignore_index=True)
return temp_df
|
https://github.com/akfamily/akshare/blob/087025d8d6f799b30ca114013e82c1ad22dc9294/project25/akshare/index/index_stock_zh.py#L179-L224
| 25 |
[
0,
1,
2,
3,
4,
5,
6,
7,
8,
9,
10
] | 23.913043 |
[
11,
12,
13,
14,
15,
16,
17,
22,
23,
24,
25,
28,
29,
32,
33,
34,
36,
37,
38,
39,
40,
41,
42,
43,
44,
45
] | 56.521739 | false | 14.634146 | 46 | 4 | 43.478261 | 8 |
def stock_zh_index_daily_tx(symbol: str = "sz980017") -> pd.DataFrame:
start_date = _get_tx_start_year(symbol=symbol)
url = "https://proxy.finance.qq.com/ifzqgtimg/appstock/app/newfqkline/get"
range_start = int(start_date.split("-")[0])
range_end = datetime.date.today().year + 1
temp_df = pd.DataFrame()
for year in tqdm(range(range_start, range_end)):
params = {
"_var": "kline_dayqfq",
"param": f"{symbol},day,{year}-01-01,{year + 1}-12-31,640,qfq",
"r": "0.8205512681390605",
}
res = requests.get(url, params=params)
text = res.text
try:
inner_temp_df = pd.DataFrame(
demjson.decode(text[text.find("={") + 1 :])["data"][symbol]["day"]
)
except:
inner_temp_df = pd.DataFrame(
demjson.decode(text[text.find("={") + 1 :])["data"][symbol]["qfqday"]
)
temp_df = pd.concat([temp_df, inner_temp_df], ignore_index=True)
if temp_df.shape[1] == 6:
temp_df.columns = ["date", "open", "close", "high", "low", "amount"]
else:
temp_df = temp_df.iloc[:, :6]
temp_df.columns = ["date", "open", "close", "high", "low", "amount"]
temp_df["date"] = pd.to_datetime(temp_df["date"]).dt.date
temp_df["open"] = pd.to_numeric(temp_df["open"])
temp_df["close"] = pd.to_numeric(temp_df["close"])
temp_df["high"] = pd.to_numeric(temp_df["high"])
temp_df["low"] = pd.to_numeric(temp_df["low"])
temp_df["amount"] = pd.to_numeric(temp_df["amount"])
temp_df.drop_duplicates(inplace=True, ignore_index=True)
return temp_df
| 18,299 |
akfamily/akshare
|
087025d8d6f799b30ca114013e82c1ad22dc9294
|
akshare/index/index_stock_zh.py
|
stock_zh_index_daily_em
|
(symbol: str = "sh000913")
|
return temp_df
|
东方财富网-股票指数数据
https://quote.eastmoney.com/center/hszs.html
:param symbol: 带市场标识的指数代码
:type symbol: str
:return: 指数数据
:rtype: pandas.DataFrame
|
东方财富网-股票指数数据
https://quote.eastmoney.com/center/hszs.html
:param symbol: 带市场标识的指数代码
:type symbol: str
:return: 指数数据
:rtype: pandas.DataFrame
| 227 | 263 |
def stock_zh_index_daily_em(symbol: str = "sh000913") -> pd.DataFrame:
"""
东方财富网-股票指数数据
https://quote.eastmoney.com/center/hszs.html
:param symbol: 带市场标识的指数代码
:type symbol: str
:return: 指数数据
:rtype: pandas.DataFrame
"""
market_map = {"sz": "0", "sh": "1"}
url = "http://push2his.eastmoney.com/api/qt/stock/kline/get"
params = {
"cb": "jQuery1124033485574041163946_1596700547000",
"secid": f"{market_map[symbol[:2]]}.{symbol[2:]}",
"ut": "fa5fd1943c7b386f172d6893dbfba10b",
"fields1": "f1,f2,f3,f4,f5",
"fields2": "f51,f52,f53,f54,f55,f56,f57,f58",
"klt": "101", # 日频率
"fqt": "0",
"beg": "19900101",
"end": "20320101",
"_": "1596700547039",
}
r = requests.get(url, params=params)
data_text = r.text
data_json = demjson.decode(data_text[data_text.find("{") : -2])
temp_df = pd.DataFrame([item.split(",") for item in data_json["data"]["klines"]])
temp_df.columns = ["date", "open", "close", "high", "low", "volume", "amount", "_"]
temp_df = temp_df[["date", "open", "close", "high", "low", "volume", "amount"]]
temp_df["open"] = pd.to_numeric(temp_df["open"])
temp_df["close"] = pd.to_numeric(temp_df["close"])
temp_df["high"] = pd.to_numeric(temp_df["high"])
temp_df["low"] = pd.to_numeric(temp_df["low"])
temp_df["volume"] = pd.to_numeric(temp_df["volume"])
temp_df["amount"] = pd.to_numeric(temp_df["amount"])
return temp_df
|
https://github.com/akfamily/akshare/blob/087025d8d6f799b30ca114013e82c1ad22dc9294/project25/akshare/index/index_stock_zh.py#L227-L263
| 25 |
[
0,
1,
2,
3,
4,
5,
6,
7,
8
] | 24.324324 |
[
9,
10,
11,
23,
24,
25,
26,
27,
28,
30,
31,
32,
33,
34,
35,
36
] | 43.243243 | false | 14.634146 | 37 | 2 | 56.756757 | 6 |
def stock_zh_index_daily_em(symbol: str = "sh000913") -> pd.DataFrame:
market_map = {"sz": "0", "sh": "1"}
url = "http://push2his.eastmoney.com/api/qt/stock/kline/get"
params = {
"cb": "jQuery1124033485574041163946_1596700547000",
"secid": f"{market_map[symbol[:2]]}.{symbol[2:]}",
"ut": "fa5fd1943c7b386f172d6893dbfba10b",
"fields1": "f1,f2,f3,f4,f5",
"fields2": "f51,f52,f53,f54,f55,f56,f57,f58",
"klt": "101", # 日频率
"fqt": "0",
"beg": "19900101",
"end": "20320101",
"_": "1596700547039",
}
r = requests.get(url, params=params)
data_text = r.text
data_json = demjson.decode(data_text[data_text.find("{") : -2])
temp_df = pd.DataFrame([item.split(",") for item in data_json["data"]["klines"]])
temp_df.columns = ["date", "open", "close", "high", "low", "volume", "amount", "_"]
temp_df = temp_df[["date", "open", "close", "high", "low", "volume", "amount"]]
temp_df["open"] = pd.to_numeric(temp_df["open"])
temp_df["close"] = pd.to_numeric(temp_df["close"])
temp_df["high"] = pd.to_numeric(temp_df["high"])
temp_df["low"] = pd.to_numeric(temp_df["low"])
temp_df["volume"] = pd.to_numeric(temp_df["volume"])
temp_df["amount"] = pd.to_numeric(temp_df["amount"])
return temp_df
| 18,300 |
akfamily/akshare
|
087025d8d6f799b30ca114013e82c1ad22dc9294
|
akshare/index/request.py
|
nested_to_record
|
(
ds,
prefix: str = "",
sep: str = ".",
level: int = 0,
max_level: Optional[int] = None,
)
|
return new_ds
| 15 | 58 |
def nested_to_record(
ds,
prefix: str = "",
sep: str = ".",
level: int = 0,
max_level: Optional[int] = None,
):
"""
"""
singleton = False
if isinstance(ds, dict):
ds = [ds]
singleton = True
new_ds = []
for d in ds:
new_d = copy.deepcopy(d)
for k, v in d.items():
# each key gets renamed with prefix
if not isinstance(k, str):
k = str(k)
if level == 0:
newkey = k
else:
newkey = prefix + sep + k
# flatten if type is dict and
# current dict level < maximum level provided and
# only dicts gets recurse-flattened
# only at level>1 do we rename the rest of the keys
if not isinstance(v, dict) or (
max_level is not None and level >= max_level
):
if level != 0: # so we skip copying for top level, common case
v = new_d.pop(k)
new_d[newkey] = v
continue
else:
v = new_d.pop(k)
new_d.update(nested_to_record(v, newkey, sep, level + 1, max_level))
new_ds.append(new_d)
if singleton:
return new_ds[0]
return new_ds
|
https://github.com/akfamily/akshare/blob/087025d8d6f799b30ca114013e82c1ad22dc9294/project25/akshare/index/request.py#L15-L58
| 25 |
[
0
] | 2.272727 |
[
9,
10,
11,
12,
13,
14,
15,
16,
18,
19,
20,
21,
23,
29,
32,
33,
34,
35,
37,
38,
39,
41,
42,
43
] | 54.545455 | false | 14.444444 | 44 | 11 | 45.454545 | 0 |
def nested_to_record(
ds,
prefix: str = "",
sep: str = ".",
level: int = 0,
max_level: Optional[int] = None,
):
singleton = False
if isinstance(ds, dict):
ds = [ds]
singleton = True
new_ds = []
for d in ds:
new_d = copy.deepcopy(d)
for k, v in d.items():
# each key gets renamed with prefix
if not isinstance(k, str):
k = str(k)
if level == 0:
newkey = k
else:
newkey = prefix + sep + k
# flatten if type is dict and
# current dict level < maximum level provided and
# only dicts gets recurse-flattened
# only at level>1 do we rename the rest of the keys
if not isinstance(v, dict) or (
max_level is not None and level >= max_level
):
if level != 0: # so we skip copying for top level, common case
v = new_d.pop(k)
new_d[newkey] = v
continue
else:
v = new_d.pop(k)
new_d.update(nested_to_record(v, newkey, sep, level + 1, max_level))
new_ds.append(new_d)
if singleton:
return new_ds[0]
return new_ds
| 18,301 |
||
akfamily/akshare
|
087025d8d6f799b30ca114013e82c1ad22dc9294
|
akshare/index/request.py
|
TrendReq.__init__
|
(
self,
hl="en-US",
tz=360,
geo="",
timeout=(2, 5),
proxies="",
retries=0,
backoff_factor=0,
)
|
Initialize default values for params
|
Initialize default values for params
| 84 | 116 |
def __init__(
self,
hl="en-US",
tz=360,
geo="",
timeout=(2, 5),
proxies="",
retries=0,
backoff_factor=0,
):
"""
Initialize default values for params
"""
# google rate limit
self.google_rl = "You have reached your quota limit. Please try again later."
self.results = None
# set user defined options used globally
self.tz = tz
self.hl = hl
self.geo = geo
self.kw_list = list()
self.timeout = timeout
self.proxies = proxies # add a proxy option
self.retries = retries
self.backoff_factor = backoff_factor
self.proxy_index = 0
self.cookies = self.GetGoogleCookie()
# intialize widget payloads
self.token_payload = dict()
self.interest_over_time_widget = dict()
self.interest_by_region_widget = dict()
self.related_topics_widget_list = list()
self.related_queries_widget_list = list()
|
https://github.com/akfamily/akshare/blob/087025d8d6f799b30ca114013e82c1ad22dc9294/project25/akshare/index/request.py#L84-L116
| 25 |
[
0
] | 3.030303 |
[
14,
15,
17,
18,
19,
20,
21,
22,
23,
24,
25,
26,
28,
29,
30,
31,
32
] | 51.515152 | false | 14.444444 | 33 | 1 | 48.484848 | 1 |
def __init__(
self,
hl="en-US",
tz=360,
geo="",
timeout=(2, 5),
proxies="",
retries=0,
backoff_factor=0,
):
# google rate limit
self.google_rl = "You have reached your quota limit. Please try again later."
self.results = None
# set user defined options used globally
self.tz = tz
self.hl = hl
self.geo = geo
self.kw_list = list()
self.timeout = timeout
self.proxies = proxies # add a proxy option
self.retries = retries
self.backoff_factor = backoff_factor
self.proxy_index = 0
self.cookies = self.GetGoogleCookie()
# intialize widget payloads
self.token_payload = dict()
self.interest_over_time_widget = dict()
self.interest_by_region_widget = dict()
self.related_topics_widget_list = list()
self.related_queries_widget_list = list()
| 18,302 |
|
akfamily/akshare
|
087025d8d6f799b30ca114013e82c1ad22dc9294
|
akshare/index/request.py
|
TrendReq.GetGoogleCookie
|
(self)
|
Gets google cookie (used for each and every proxy; once on init otherwise)
Removes proxy from the list on proxy error
|
Gets google cookie (used for each and every proxy; once on init otherwise)
Removes proxy from the list on proxy error
| 118 | 147 |
def GetGoogleCookie(self):
"""
Gets google cookie (used for each and every proxy; once on init otherwise)
Removes proxy from the list on proxy error
"""
while True:
if len(self.proxies) > 0:
proxy = {"https": self.proxies[self.proxy_index]}
else:
proxy = ""
try:
return dict(
filter(
lambda i: i[0] == "NID",
requests.get(
"https://trends.google.com/?geo={geo}".format(
geo=self.hl[-2:]
),
timeout=self.timeout,
proxies=proxy,
).cookies.items(),
)
)
except requests.exceptions.ProxyError:
print("Proxy error. Changing IP")
if len(self.proxies) > 0:
self.proxies.remove(self.proxies[self.proxy_index])
else:
print("Proxy list is empty. Bye!")
continue
|
https://github.com/akfamily/akshare/blob/087025d8d6f799b30ca114013e82c1ad22dc9294/project25/akshare/index/request.py#L118-L147
| 25 |
[
0,
1,
2,
3,
4
] | 16.666667 |
[
5,
6,
7,
9,
10,
11,
23,
24,
25,
26,
28,
29
] | 40 | false | 14.444444 | 30 | 5 | 60 | 2 |
def GetGoogleCookie(self):
while True:
if len(self.proxies) > 0:
proxy = {"https": self.proxies[self.proxy_index]}
else:
proxy = ""
try:
return dict(
filter(
lambda i: i[0] == "NID",
requests.get(
"https://trends.google.com/?geo={geo}".format(
geo=self.hl[-2:]
),
timeout=self.timeout,
proxies=proxy,
).cookies.items(),
)
)
except requests.exceptions.ProxyError:
print("Proxy error. Changing IP")
if len(self.proxies) > 0:
self.proxies.remove(self.proxies[self.proxy_index])
else:
print("Proxy list is empty. Bye!")
continue
| 18,303 |
|
akfamily/akshare
|
087025d8d6f799b30ca114013e82c1ad22dc9294
|
akshare/index/request.py
|
TrendReq.GetNewProxy
|
(self)
|
Increment proxy INDEX; zero on overflow
|
Increment proxy INDEX; zero on overflow
| 149 | 156 |
def GetNewProxy(self):
"""
Increment proxy INDEX; zero on overflow
"""
if self.proxy_index < (len(self.proxies) - 1):
self.proxy_index += 1
else:
self.proxy_index = 0
|
https://github.com/akfamily/akshare/blob/087025d8d6f799b30ca114013e82c1ad22dc9294/project25/akshare/index/request.py#L149-L156
| 25 |
[
0,
1,
2,
3
] | 50 |
[
4,
5,
7
] | 37.5 | false | 14.444444 | 8 | 2 | 62.5 | 1 |
def GetNewProxy(self):
if self.proxy_index < (len(self.proxies) - 1):
self.proxy_index += 1
else:
self.proxy_index = 0
| 18,304 |
|
akfamily/akshare
|
087025d8d6f799b30ca114013e82c1ad22dc9294
|
akshare/index/request.py
|
TrendReq._get_data
|
(self, url, method=GET_METHOD, trim_chars=0, **kwargs)
|
Send a request to Google and return the JSON response as a Python object
:param url: the url to which the request will be sent
:param method: the HTTP method ('get' or 'post')
:param trim_chars: how many characters should be trimmed off the beginning of the content of the response
before this is passed to the JSON parser
:param kwargs: any extra key arguments passed to the request builder (usually query parameters or data)
:return:
|
Send a request to Google and return the JSON response as a Python object
:param url: the url to which the request will be sent
:param method: the HTTP method ('get' or 'post')
:param trim_chars: how many characters should be trimmed off the beginning of the content of the response
before this is passed to the JSON parser
:param kwargs: any extra key arguments passed to the request builder (usually query parameters or data)
:return:
| 158 | 212 |
def _get_data(self, url, method=GET_METHOD, trim_chars=0, **kwargs):
"""Send a request to Google and return the JSON response as a Python object
:param url: the url to which the request will be sent
:param method: the HTTP method ('get' or 'post')
:param trim_chars: how many characters should be trimmed off the beginning of the content of the response
before this is passed to the JSON parser
:param kwargs: any extra key arguments passed to the request builder (usually query parameters or data)
:return:
"""
s = requests.session()
# Retries mechanism. Activated when one of statements >0 (best used for proxy)
if self.retries > 0 or self.backoff_factor > 0:
retry = Retry(
total=self.retries,
read=self.retries,
connect=self.retries,
backoff_factor=self.backoff_factor,
)
s.headers.update({"accept-language": self.hl})
if len(self.proxies) > 0:
self.cookies = self.GetGoogleCookie()
s.proxies.update({"https": self.proxies[self.proxy_index]})
if method == TrendReq.POST_METHOD:
response = s.post(
url, timeout=self.timeout, cookies=self.cookies, **kwargs
) # DO NOT USE retries or backoff_factor here
else:
response = s.get(
url, timeout=self.timeout, cookies=self.cookies, **kwargs
) # DO NOT USE retries or backoff_factor here
# check if the response contains json and throw an exception otherwise
# Google mostly sends 'application/json' in the Content-Type header,
# but occasionally it sends 'application/javascript
# and sometimes even 'text/javascript
if (
response.status_code == 200
and "application/json" in response.headers["Content-Type"]
or "application/javascript" in response.headers["Content-Type"]
or "text/javascript" in response.headers["Content-Type"]
):
# trim initial characters
# some responses start with garbage characters, like ")]}',"
# these have to be cleaned before being passed to the json parser
content = response.text[trim_chars:]
# parse json
self.GetNewProxy()
return json.loads(content)
else:
# error
raise exceptions.ResponseError(
"The request failed: Google returned a "
"response with code {0}.".format(response.status_code),
response=response,
)
|
https://github.com/akfamily/akshare/blob/087025d8d6f799b30ca114013e82c1ad22dc9294/project25/akshare/index/request.py#L158-L212
| 25 |
[
0,
1,
2,
3,
4,
5,
6,
7,
8
] | 16.363636 |
[
9,
11,
12,
19,
20,
21,
22,
23,
24,
28,
35,
44,
46,
47,
50
] | 27.272727 | false | 14.444444 | 55 | 9 | 72.727273 | 7 |
def _get_data(self, url, method=GET_METHOD, trim_chars=0, **kwargs):
s = requests.session()
# Retries mechanism. Activated when one of statements >0 (best used for proxy)
if self.retries > 0 or self.backoff_factor > 0:
retry = Retry(
total=self.retries,
read=self.retries,
connect=self.retries,
backoff_factor=self.backoff_factor,
)
s.headers.update({"accept-language": self.hl})
if len(self.proxies) > 0:
self.cookies = self.GetGoogleCookie()
s.proxies.update({"https": self.proxies[self.proxy_index]})
if method == TrendReq.POST_METHOD:
response = s.post(
url, timeout=self.timeout, cookies=self.cookies, **kwargs
) # DO NOT USE retries or backoff_factor here
else:
response = s.get(
url, timeout=self.timeout, cookies=self.cookies, **kwargs
) # DO NOT USE retries or backoff_factor here
# check if the response contains json and throw an exception otherwise
# Google mostly sends 'application/json' in the Content-Type header,
# but occasionally it sends 'application/javascript
# and sometimes even 'text/javascript
if (
response.status_code == 200
and "application/json" in response.headers["Content-Type"]
or "application/javascript" in response.headers["Content-Type"]
or "text/javascript" in response.headers["Content-Type"]
):
# trim initial characters
# some responses start with garbage characters, like ")]}',"
# these have to be cleaned before being passed to the json parser
content = response.text[trim_chars:]
# parse json
self.GetNewProxy()
return json.loads(content)
else:
# error
raise exceptions.ResponseError(
"The request failed: Google returned a "
"response with code {0}.".format(response.status_code),
response=response,
)
| 18,305 |
|
akfamily/akshare
|
087025d8d6f799b30ca114013e82c1ad22dc9294
|
akshare/index/request.py
|
TrendReq.build_payload
|
(self, kw_list, cat=0, timeframe="today 5-y", geo="", gprop="")
|
return
|
Create the payload for related queries, interest over time and interest by region
|
Create the payload for related queries, interest over time and interest by region
| 214 | 232 |
def build_payload(self, kw_list, cat=0, timeframe="today 5-y", geo="", gprop=""):
"""Create the payload for related queries, interest over time and interest by region"""
self.kw_list = kw_list
self.geo = geo or self.geo
self.token_payload = {
"hl": self.hl,
"tz": self.tz,
"req": {"comparisonItem": [], "category": cat, "property": gprop},
}
# build out json for each keyword
for kw in self.kw_list:
keyword_payload = {"keyword": kw, "time": timeframe, "geo": self.geo}
self.token_payload["req"]["comparisonItem"].append(keyword_payload)
# requests will mangle this if it is not a string
self.token_payload["req"] = json.dumps(self.token_payload["req"])
# get tokens
self._tokens()
return
|
https://github.com/akfamily/akshare/blob/087025d8d6f799b30ca114013e82c1ad22dc9294/project25/akshare/index/request.py#L214-L232
| 25 |
[
0,
1
] | 10.526316 |
[
2,
3,
4,
11,
12,
13,
15,
17,
18
] | 47.368421 | false | 14.444444 | 19 | 3 | 52.631579 | 1 |
def build_payload(self, kw_list, cat=0, timeframe="today 5-y", geo="", gprop=""):
self.kw_list = kw_list
self.geo = geo or self.geo
self.token_payload = {
"hl": self.hl,
"tz": self.tz,
"req": {"comparisonItem": [], "category": cat, "property": gprop},
}
# build out json for each keyword
for kw in self.kw_list:
keyword_payload = {"keyword": kw, "time": timeframe, "geo": self.geo}
self.token_payload["req"]["comparisonItem"].append(keyword_payload)
# requests will mangle this if it is not a string
self.token_payload["req"] = json.dumps(self.token_payload["req"])
# get tokens
self._tokens()
return
| 18,306 |
akfamily/akshare
|
087025d8d6f799b30ca114013e82c1ad22dc9294
|
akshare/index/request.py
|
TrendReq._tokens
|
(self)
|
return
|
Makes request to Google to get API tokens for interest over time, interest by region and related queries
|
Makes request to Google to get API tokens for interest over time, interest by region and related queries
| 234 | 261 |
def _tokens(self):
"""Makes request to Google to get API tokens for interest over time, interest by region and related queries"""
# make the request and parse the returned json
widget_dict = self._get_data(
url=TrendReq.GENERAL_URL,
method=TrendReq.GET_METHOD,
params=self.token_payload,
trim_chars=4,
)["widgets"]
# order of the json matters...
first_region_token = True
# clear self.related_queries_widget_list and self.related_topics_widget_list
# of old keywords'widgets
self.related_queries_widget_list[:] = []
self.related_topics_widget_list[:] = []
# assign requests
for widget in widget_dict:
if widget["id"] == "TIMESERIES":
self.interest_over_time_widget = widget
if widget["id"] == "GEO_MAP" and first_region_token:
self.interest_by_region_widget = widget
first_region_token = False
# response for each term, put into a list
if "RELATED_TOPICS" in widget["id"]:
self.related_topics_widget_list.append(widget)
if "RELATED_QUERIES" in widget["id"]:
self.related_queries_widget_list.append(widget)
return
|
https://github.com/akfamily/akshare/blob/087025d8d6f799b30ca114013e82c1ad22dc9294/project25/akshare/index/request.py#L234-L261
| 25 |
[
0,
1,
2
] | 10.714286 |
[
3,
10,
13,
14,
16,
17,
18,
19,
20,
21,
23,
24,
25,
26,
27
] | 53.571429 | false | 14.444444 | 28 | 7 | 46.428571 | 1 |
def _tokens(self):
# make the request and parse the returned json
widget_dict = self._get_data(
url=TrendReq.GENERAL_URL,
method=TrendReq.GET_METHOD,
params=self.token_payload,
trim_chars=4,
)["widgets"]
# order of the json matters...
first_region_token = True
# clear self.related_queries_widget_list and self.related_topics_widget_list
# of old keywords'widgets
self.related_queries_widget_list[:] = []
self.related_topics_widget_list[:] = []
# assign requests
for widget in widget_dict:
if widget["id"] == "TIMESERIES":
self.interest_over_time_widget = widget
if widget["id"] == "GEO_MAP" and first_region_token:
self.interest_by_region_widget = widget
first_region_token = False
# response for each term, put into a list
if "RELATED_TOPICS" in widget["id"]:
self.related_topics_widget_list.append(widget)
if "RELATED_QUERIES" in widget["id"]:
self.related_queries_widget_list.append(widget)
return
| 18,307 |
akfamily/akshare
|
087025d8d6f799b30ca114013e82c1ad22dc9294
|
akshare/index/request.py
|
TrendReq.interest_over_time
|
(self)
|
return final
|
Request data from Google's Interest Over Time section and return a dataframe
|
Request data from Google's Interest Over Time section and return a dataframe
| 263 | 312 |
def interest_over_time(self):
"""Request data from Google's Interest Over Time section and return a dataframe"""
over_time_payload = {
# convert to string as requests will mangle
"req": json.dumps(self.interest_over_time_widget["request"]),
"token": self.interest_over_time_widget["token"],
"tz": self.tz,
}
# make the request and parse the returned json
req_json = self._get_data(
url=TrendReq.INTEREST_OVER_TIME_URL,
method=TrendReq.GET_METHOD,
trim_chars=5,
params=over_time_payload,
)
df = pd.DataFrame(req_json["default"]["timelineData"])
if df.empty:
return df
df["date"] = pd.to_datetime(df["time"].astype(dtype="float64"), unit="s")
df = df.set_index(["date"]).sort_index()
# split list columns into separate ones, remove brackets and split on comma
result_df = df["value"].apply(
lambda x: pd.Series(str(x).replace("[", "").replace("]", "").split(","))
)
# rename each column with its search term, relying on order that google provides...
for idx, kw in enumerate(self.kw_list):
# there is currently a bug with assigning columns that may be
# parsed as a date in pandas: use explicit insert column method
result_df.insert(len(result_df.columns), kw, result_df[idx].astype("int"))
del result_df[idx]
if "isPartial" in df:
# make other dataframe from isPartial key data
# split list columns into separate ones, remove brackets and split on comma
df = df.fillna(False)
result_df2 = df["isPartial"].apply(
lambda x: pd.Series(str(x).replace("[", "").replace("]", "").split(","))
)
result_df2.columns = ["isPartial"]
# concatenate the two dataframes
final = pd.concat([result_df, result_df2], axis=1)
else:
final = result_df
final["isPartial"] = False
return final
|
https://github.com/akfamily/akshare/blob/087025d8d6f799b30ca114013e82c1ad22dc9294/project25/akshare/index/request.py#L263-L312
| 25 |
[
0,
1,
2
] | 6 |
[
3,
11,
18,
19,
20,
22,
23,
25,
29,
32,
33,
35,
38,
39,
42,
44,
46,
47,
49
] | 38 | false | 14.444444 | 50 | 4 | 62 | 1 |
def interest_over_time(self):
over_time_payload = {
# convert to string as requests will mangle
"req": json.dumps(self.interest_over_time_widget["request"]),
"token": self.interest_over_time_widget["token"],
"tz": self.tz,
}
# make the request and parse the returned json
req_json = self._get_data(
url=TrendReq.INTEREST_OVER_TIME_URL,
method=TrendReq.GET_METHOD,
trim_chars=5,
params=over_time_payload,
)
df = pd.DataFrame(req_json["default"]["timelineData"])
if df.empty:
return df
df["date"] = pd.to_datetime(df["time"].astype(dtype="float64"), unit="s")
df = df.set_index(["date"]).sort_index()
# split list columns into separate ones, remove brackets and split on comma
result_df = df["value"].apply(
lambda x: pd.Series(str(x).replace("[", "").replace("]", "").split(","))
)
# rename each column with its search term, relying on order that google provides...
for idx, kw in enumerate(self.kw_list):
# there is currently a bug with assigning columns that may be
# parsed as a date in pandas: use explicit insert column method
result_df.insert(len(result_df.columns), kw, result_df[idx].astype("int"))
del result_df[idx]
if "isPartial" in df:
# make other dataframe from isPartial key data
# split list columns into separate ones, remove brackets and split on comma
df = df.fillna(False)
result_df2 = df["isPartial"].apply(
lambda x: pd.Series(str(x).replace("[", "").replace("]", "").split(","))
)
result_df2.columns = ["isPartial"]
# concatenate the two dataframes
final = pd.concat([result_df, result_df2], axis=1)
else:
final = result_df
final["isPartial"] = False
return final
| 18,308 |
akfamily/akshare
|
087025d8d6f799b30ca114013e82c1ad22dc9294
|
akshare/index/request.py
|
TrendReq.interest_by_region
|
(
self, resolution="COUNTRY", inc_low_vol=False, inc_geo_code=False
)
|
return result_df
|
Request data from Google's Interest by Region section and return a dataframe
|
Request data from Google's Interest by Region section and return a dataframe
| 314 | 360 |
def interest_by_region(
self, resolution="COUNTRY", inc_low_vol=False, inc_geo_code=False
):
"""Request data from Google's Interest by Region section and return a dataframe"""
# make the request
region_payload = dict()
if self.geo == "":
self.interest_by_region_widget["request"]["resolution"] = resolution
elif self.geo == "US" and resolution in ["DMA", "CITY", "REGION"]:
self.interest_by_region_widget["request"]["resolution"] = resolution
self.interest_by_region_widget["request"][
"includeLowSearchVolumeGeos"
] = inc_low_vol
# convert to string as requests will mangle
region_payload["req"] = json.dumps(self.interest_by_region_widget["request"])
region_payload["token"] = self.interest_by_region_widget["token"]
region_payload["tz"] = self.tz
# parse returned json
req_json = self._get_data(
url=TrendReq.INTEREST_BY_REGION_URL,
method=TrendReq.GET_METHOD,
trim_chars=5,
params=region_payload,
)
df = pd.DataFrame(req_json["default"]["geoMapData"])
if df.empty:
return df
# rename the column with the search keyword
df = df[["geoName", "geoCode", "value"]].set_index(["geoName"]).sort_index()
# split list columns into separate ones, remove brackets and split on comma
result_df = df["value"].apply(
lambda x: pd.Series(str(x).replace("[", "").replace("]", "").split(","))
)
if inc_geo_code:
result_df["geoCode"] = df["geoCode"]
# rename each column with its search term
for idx, kw in enumerate(self.kw_list):
result_df[kw] = result_df[idx].astype("int")
del result_df[idx]
return result_df
|
https://github.com/akfamily/akshare/blob/087025d8d6f799b30ca114013e82c1ad22dc9294/project25/akshare/index/request.py#L314-L360
| 25 |
[
0
] | 2.12766 |
[
6,
7,
8,
9,
10,
12,
17,
18,
19,
22,
28,
29,
30,
33,
35,
38,
39,
42,
43,
44,
46
] | 44.680851 | false | 14.444444 | 47 | 7 | 55.319149 | 1 |
def interest_by_region(
self, resolution="COUNTRY", inc_low_vol=False, inc_geo_code=False
):
# make the request
region_payload = dict()
if self.geo == "":
self.interest_by_region_widget["request"]["resolution"] = resolution
elif self.geo == "US" and resolution in ["DMA", "CITY", "REGION"]:
self.interest_by_region_widget["request"]["resolution"] = resolution
self.interest_by_region_widget["request"][
"includeLowSearchVolumeGeos"
] = inc_low_vol
# convert to string as requests will mangle
region_payload["req"] = json.dumps(self.interest_by_region_widget["request"])
region_payload["token"] = self.interest_by_region_widget["token"]
region_payload["tz"] = self.tz
# parse returned json
req_json = self._get_data(
url=TrendReq.INTEREST_BY_REGION_URL,
method=TrendReq.GET_METHOD,
trim_chars=5,
params=region_payload,
)
df = pd.DataFrame(req_json["default"]["geoMapData"])
if df.empty:
return df
# rename the column with the search keyword
df = df[["geoName", "geoCode", "value"]].set_index(["geoName"]).sort_index()
# split list columns into separate ones, remove brackets and split on comma
result_df = df["value"].apply(
lambda x: pd.Series(str(x).replace("[", "").replace("]", "").split(","))
)
if inc_geo_code:
result_df["geoCode"] = df["geoCode"]
# rename each column with its search term
for idx, kw in enumerate(self.kw_list):
result_df[kw] = result_df[idx].astype("int")
del result_df[idx]
return result_df
| 18,309 |
akfamily/akshare
|
087025d8d6f799b30ca114013e82c1ad22dc9294
|
akshare/index/request.py
|
TrendReq.related_topics
|
(self)
|
return result_dict
|
Request data from Google's Related Topics section and return a dictionary of dataframes
If no top and/or rising related topics are found, the value for the key "top" and/or "rising" will be None
|
Request data from Google's Related Topics section and return a dictionary of dataframes
| 362 | 408 |
def related_topics(self):
"""Request data from Google's Related Topics section and return a dictionary of dataframes
If no top and/or rising related topics are found, the value for the key "top" and/or "rising" will be None
"""
# make the request
related_payload = dict()
result_dict = dict()
for request_json in self.related_topics_widget_list:
# ensure we know which keyword we are looking at rather than relying on order
kw = request_json["request"]["restriction"]["complexKeywordsRestriction"][
"keyword"
][0]["value"]
# convert to string as requests will mangle
related_payload["req"] = json.dumps(request_json["request"])
related_payload["token"] = request_json["token"]
related_payload["tz"] = self.tz
# parse the returned json
req_json = self._get_data(
url=TrendReq.RELATED_QUERIES_URL,
method=TrendReq.GET_METHOD,
trim_chars=5,
params=related_payload,
)
# top topics
try:
top_list = req_json["default"]["rankedList"][0]["rankedKeyword"]
df_top = pd.DataFrame([nested_to_record(d, sep="_") for d in top_list])
except KeyError:
# in case no top topics are found, the lines above will throw a KeyError
df_top = None
# rising topics
try:
rising_list = req_json["default"]["rankedList"][1]["rankedKeyword"]
df_rising = pd.DataFrame(
[nested_to_record(d, sep="_") for d in rising_list]
)
except KeyError:
# in case no rising topics are found, the lines above will throw a KeyError
df_rising = None
result_dict[kw] = {"rising": df_rising, "top": df_top}
return result_dict
|
https://github.com/akfamily/akshare/blob/087025d8d6f799b30ca114013e82c1ad22dc9294/project25/akshare/index/request.py#L362-L408
| 25 |
[
0,
1,
2,
3,
4,
5,
6
] | 14.893617 |
[
7,
8,
9,
11,
15,
16,
17,
20,
28,
29,
30,
31,
33,
36,
37,
38,
41,
43,
45,
46
] | 42.553191 | false | 14.444444 | 47 | 6 | 57.446809 | 3 |
def related_topics(self):
# make the request
related_payload = dict()
result_dict = dict()
for request_json in self.related_topics_widget_list:
# ensure we know which keyword we are looking at rather than relying on order
kw = request_json["request"]["restriction"]["complexKeywordsRestriction"][
"keyword"
][0]["value"]
# convert to string as requests will mangle
related_payload["req"] = json.dumps(request_json["request"])
related_payload["token"] = request_json["token"]
related_payload["tz"] = self.tz
# parse the returned json
req_json = self._get_data(
url=TrendReq.RELATED_QUERIES_URL,
method=TrendReq.GET_METHOD,
trim_chars=5,
params=related_payload,
)
# top topics
try:
top_list = req_json["default"]["rankedList"][0]["rankedKeyword"]
df_top = pd.DataFrame([nested_to_record(d, sep="_") for d in top_list])
except KeyError:
# in case no top topics are found, the lines above will throw a KeyError
df_top = None
# rising topics
try:
rising_list = req_json["default"]["rankedList"][1]["rankedKeyword"]
df_rising = pd.DataFrame(
[nested_to_record(d, sep="_") for d in rising_list]
)
except KeyError:
# in case no rising topics are found, the lines above will throw a KeyError
df_rising = None
result_dict[kw] = {"rising": df_rising, "top": df_top}
return result_dict
| 18,310 |
akfamily/akshare
|
087025d8d6f799b30ca114013e82c1ad22dc9294
|
akshare/index/request.py
|
TrendReq.related_queries
|
(self)
|
return result_dict
|
Request data from Google's Related Queries section and return a dictionary of dataframes
If no top and/or rising related queries are found, the value for the key "top" and/or "rising" will be None
|
Request data from Google's Related Queries section and return a dictionary of dataframes
| 410 | 458 |
def related_queries(self):
"""Request data from Google's Related Queries section and return a dictionary of dataframes
If no top and/or rising related queries are found, the value for the key "top" and/or "rising" will be None
"""
# make the request
related_payload = dict()
result_dict = dict()
for request_json in self.related_queries_widget_list:
# ensure we know which keyword we are looking at rather than relying on order
kw = request_json["request"]["restriction"]["complexKeywordsRestriction"][
"keyword"
][0]["value"]
# convert to string as requests will mangle
related_payload["req"] = json.dumps(request_json["request"])
related_payload["token"] = request_json["token"]
related_payload["tz"] = self.tz
# parse the returned json
req_json = self._get_data(
url=TrendReq.RELATED_QUERIES_URL,
method=TrendReq.GET_METHOD,
trim_chars=5,
params=related_payload,
)
# top queries
try:
top_df = pd.DataFrame(
req_json["default"]["rankedList"][0]["rankedKeyword"]
)
top_df = top_df[["query", "value"]]
except KeyError:
# in case no top queries are found, the lines above will throw a KeyError
top_df = None
# rising queries
try:
rising_df = pd.DataFrame(
req_json["default"]["rankedList"][1]["rankedKeyword"]
)
rising_df = rising_df[["query", "value"]]
except KeyError:
# in case no rising queries are found, the lines above will throw a KeyError
rising_df = None
result_dict[kw] = {"top": top_df, "rising": rising_df}
return result_dict
|
https://github.com/akfamily/akshare/blob/087025d8d6f799b30ca114013e82c1ad22dc9294/project25/akshare/index/request.py#L410-L458
| 25 |
[
0,
1,
2,
3,
4,
5,
6
] | 14.285714 |
[
7,
8,
9,
11,
15,
16,
17,
20,
28,
29,
32,
33,
35,
38,
39,
42,
43,
45,
47,
48
] | 40.816327 | false | 14.444444 | 49 | 4 | 59.183673 | 3 |
def related_queries(self):
# make the request
related_payload = dict()
result_dict = dict()
for request_json in self.related_queries_widget_list:
# ensure we know which keyword we are looking at rather than relying on order
kw = request_json["request"]["restriction"]["complexKeywordsRestriction"][
"keyword"
][0]["value"]
# convert to string as requests will mangle
related_payload["req"] = json.dumps(request_json["request"])
related_payload["token"] = request_json["token"]
related_payload["tz"] = self.tz
# parse the returned json
req_json = self._get_data(
url=TrendReq.RELATED_QUERIES_URL,
method=TrendReq.GET_METHOD,
trim_chars=5,
params=related_payload,
)
# top queries
try:
top_df = pd.DataFrame(
req_json["default"]["rankedList"][0]["rankedKeyword"]
)
top_df = top_df[["query", "value"]]
except KeyError:
# in case no top queries are found, the lines above will throw a KeyError
top_df = None
# rising queries
try:
rising_df = pd.DataFrame(
req_json["default"]["rankedList"][1]["rankedKeyword"]
)
rising_df = rising_df[["query", "value"]]
except KeyError:
# in case no rising queries are found, the lines above will throw a KeyError
rising_df = None
result_dict[kw] = {"top": top_df, "rising": rising_df}
return result_dict
| 18,311 |
akfamily/akshare
|
087025d8d6f799b30ca114013e82c1ad22dc9294
|
akshare/index/request.py
|
TrendReq.trending_searches
|
(self, pn="united_states")
|
return result_df
|
Request data from Google's Hot Searches section and return a dataframe
|
Request data from Google's Hot Searches section and return a dataframe
| 460 | 470 |
def trending_searches(self, pn="united_states"):
"""Request data from Google's Hot Searches section and return a dataframe"""
# make the request
# forms become obsolute due to the new TRENDING_SEACHES_URL
# forms = {'ajax': 1, 'pn': pn, 'htd': '', 'htv': 'l'}
req_json = self._get_data(
url=TrendReq.TRENDING_SEARCHES_URL, method=TrendReq.GET_METHOD
)[pn]
result_df = pd.DataFrame(req_json)
return result_df
|
https://github.com/akfamily/akshare/blob/087025d8d6f799b30ca114013e82c1ad22dc9294/project25/akshare/index/request.py#L460-L470
| 25 |
[
0,
1,
2,
3,
4,
5
] | 54.545455 |
[
6,
9,
10
] | 27.272727 | false | 14.444444 | 11 | 1 | 72.727273 | 1 |
def trending_searches(self, pn="united_states"):
# make the request
# forms become obsolute due to the new TRENDING_SEACHES_URL
# forms = {'ajax': 1, 'pn': pn, 'htd': '', 'htv': 'l'}
req_json = self._get_data(
url=TrendReq.TRENDING_SEARCHES_URL, method=TrendReq.GET_METHOD
)[pn]
result_df = pd.DataFrame(req_json)
return result_df
| 18,312 |
akfamily/akshare
|
087025d8d6f799b30ca114013e82c1ad22dc9294
|
akshare/index/request.py
|
TrendReq.today_searches
|
(self, pn="US")
|
return result_df.iloc[:, -1]
|
Request data from Google Daily Trends section and returns a dataframe
|
Request data from Google Daily Trends section and returns a dataframe
| 472 | 487 |
def today_searches(self, pn="US"):
"""Request data from Google Daily Trends section and returns a dataframe"""
forms = {"ns": 15, "geo": pn, "tz": "-180", "hl": "en-US"}
req_json = self._get_data(
url=TrendReq.TODAY_SEARCHES_URL,
method=TrendReq.GET_METHOD,
trim_chars=5,
params=forms,
)["default"]["trendingSearchesDays"][0]["trendingSearches"]
result_df = pd.DataFrame()
# parse the returned json
sub_df = pd.DataFrame()
for trend in req_json:
sub_df = sub_df.append(trend["title"], ignore_index=True)
result_df = pd.concat([result_df, sub_df])
return result_df.iloc[:, -1]
|
https://github.com/akfamily/akshare/blob/087025d8d6f799b30ca114013e82c1ad22dc9294/project25/akshare/index/request.py#L472-L487
| 25 |
[
0,
1
] | 12.5 |
[
2,
3,
9,
11,
12,
13,
14,
15
] | 50 | false | 14.444444 | 16 | 2 | 50 | 1 |
def today_searches(self, pn="US"):
forms = {"ns": 15, "geo": pn, "tz": "-180", "hl": "en-US"}
req_json = self._get_data(
url=TrendReq.TODAY_SEARCHES_URL,
method=TrendReq.GET_METHOD,
trim_chars=5,
params=forms,
)["default"]["trendingSearchesDays"][0]["trendingSearches"]
result_df = pd.DataFrame()
# parse the returned json
sub_df = pd.DataFrame()
for trend in req_json:
sub_df = sub_df.append(trend["title"], ignore_index=True)
result_df = pd.concat([result_df, sub_df])
return result_df.iloc[:, -1]
| 18,313 |
akfamily/akshare
|
087025d8d6f799b30ca114013e82c1ad22dc9294
|
akshare/index/request.py
|
TrendReq.top_charts
|
(self, date, hl="en-US", tz=300, geo="GLOBAL")
|
return df
|
Request data from Google's Top Charts section and return a dataframe
|
Request data from Google's Top Charts section and return a dataframe
| 489 | 508 |
def top_charts(self, date, hl="en-US", tz=300, geo="GLOBAL"):
"""Request data from Google's Top Charts section and return a dataframe"""
# create the payload
chart_payload = {
"hl": hl,
"tz": tz,
"date": date,
"geo": geo,
"isMobile": False,
}
# make the request and parse the returned json
req_json = self._get_data(
url=TrendReq.TOP_CHARTS_URL,
method=TrendReq.GET_METHOD,
trim_chars=5,
params=chart_payload,
)["topCharts"][0]["listItems"]
df = pd.DataFrame(req_json)
return df
|
https://github.com/akfamily/akshare/blob/087025d8d6f799b30ca114013e82c1ad22dc9294/project25/akshare/index/request.py#L489-L508
| 25 |
[
0,
1,
2
] | 15 |
[
3,
12,
18,
19
] | 20 | false | 14.444444 | 20 | 1 | 80 | 1 |
def top_charts(self, date, hl="en-US", tz=300, geo="GLOBAL"):
# create the payload
chart_payload = {
"hl": hl,
"tz": tz,
"date": date,
"geo": geo,
"isMobile": False,
}
# make the request and parse the returned json
req_json = self._get_data(
url=TrendReq.TOP_CHARTS_URL,
method=TrendReq.GET_METHOD,
trim_chars=5,
params=chart_payload,
)["topCharts"][0]["listItems"]
df = pd.DataFrame(req_json)
return df
| 18,314 |
akfamily/akshare
|
087025d8d6f799b30ca114013e82c1ad22dc9294
|
akshare/index/request.py
|
TrendReq.suggestions
|
(self, keyword)
|
return req_json
|
Request data from Google's Keyword Suggestion dropdown and return a dictionary
|
Request data from Google's Keyword Suggestion dropdown and return a dictionary
| 510 | 523 |
def suggestions(self, keyword):
"""Request data from Google's Keyword Suggestion dropdown and return a dictionary"""
# make the request
kw_param = quote(keyword)
parameters = {"hl": self.hl}
req_json = self._get_data(
url=TrendReq.SUGGESTIONS_URL + kw_param,
params=parameters,
method=TrendReq.GET_METHOD,
trim_chars=5,
)["default"]["topics"]
return req_json
|
https://github.com/akfamily/akshare/blob/087025d8d6f799b30ca114013e82c1ad22dc9294/project25/akshare/index/request.py#L510-L523
| 25 |
[
0,
1,
2,
3
] | 28.571429 |
[
4,
5,
7,
13
] | 28.571429 | false | 14.444444 | 14 | 1 | 71.428571 | 1 |
def suggestions(self, keyword):
# make the request
kw_param = quote(keyword)
parameters = {"hl": self.hl}
req_json = self._get_data(
url=TrendReq.SUGGESTIONS_URL + kw_param,
params=parameters,
method=TrendReq.GET_METHOD,
trim_chars=5,
)["default"]["topics"]
return req_json
| 18,315 |
akfamily/akshare
|
087025d8d6f799b30ca114013e82c1ad22dc9294
|
akshare/index/request.py
|
TrendReq.categories
|
(self)
|
return req_json
|
Request available categories data from Google's API and return a dictionary
|
Request available categories data from Google's API and return a dictionary
| 525 | 536 |
def categories(self):
"""Request available categories data from Google's API and return a dictionary"""
params = {"hl": self.hl}
req_json = self._get_data(
url=TrendReq.CATEGORIES_URL,
params=params,
method=TrendReq.GET_METHOD,
trim_chars=5,
)
return req_json
|
https://github.com/akfamily/akshare/blob/087025d8d6f799b30ca114013e82c1ad22dc9294/project25/akshare/index/request.py#L525-L536
| 25 |
[
0,
1,
2
] | 25 |
[
3,
5,
11
] | 25 | false | 14.444444 | 12 | 1 | 75 | 1 |
def categories(self):
params = {"hl": self.hl}
req_json = self._get_data(
url=TrendReq.CATEGORIES_URL,
params=params,
method=TrendReq.GET_METHOD,
trim_chars=5,
)
return req_json
| 18,316 |
akfamily/akshare
|
087025d8d6f799b30ca114013e82c1ad22dc9294
|
akshare/index/request.py
|
TrendReq.get_historical_interest
|
(
self,
keywords,
year_start=2018,
month_start=1,
day_start=1,
hour_start=0,
year_end=2018,
month_end=2,
day_end=1,
hour_end=0,
cat=0,
geo="",
gprop="",
sleep=0,
)
|
return df.loc[initial_start_date:end_date]
|
Gets historical hourly data for interest by chunking requests to 1 week at a time (which is what Google allows)
|
Gets historical hourly data for interest by chunking requests to 1 week at a time (which is what Google allows)
| 538 | 611 |
def get_historical_interest(
self,
keywords,
year_start=2018,
month_start=1,
day_start=1,
hour_start=0,
year_end=2018,
month_end=2,
day_end=1,
hour_end=0,
cat=0,
geo="",
gprop="",
sleep=0,
):
"""Gets historical hourly data for interest by chunking requests to 1 week at a time (which is what Google allows)"""
# construct datetime obejcts - raises ValueError if invalid parameters
initial_start_date = start_date = datetime(
year_start, month_start, day_start, hour_start
)
end_date = datetime(year_end, month_end, day_end, hour_end)
# the timeframe has to be in 1 week intervals or Google will reject it
delta = timedelta(days=7)
df = pd.DataFrame()
date_iterator = start_date
date_iterator += delta
while True:
# format date to comply with API call
start_date_str = start_date.strftime("%Y-%m-%dT%H")
date_iterator_str = date_iterator.strftime("%Y-%m-%dT%H")
tf = start_date_str + " " + date_iterator_str
try:
self.build_payload(keywords, cat, tf, geo, gprop)
week_df = self.interest_over_time()
df = df.append(week_df)
except Exception as e:
print(e)
pass
start_date += delta
date_iterator += delta
if date_iterator > end_date:
# Run for 7 more days to get remaining data that would have been truncated if we stopped now
# This is needed because google requires 7 days yet we may end up with a week result less than a full week
start_date_str = start_date.strftime("%Y-%m-%dT%H")
date_iterator_str = date_iterator.strftime("%Y-%m-%dT%H")
tf = start_date_str + " " + date_iterator_str
try:
self.build_payload(keywords, cat, tf, geo, gprop)
week_df = self.interest_over_time()
df = df.append(week_df)
except Exception as e:
print(e)
pass
break
# just in case you are rate-limited by Google. Recommended is 60 if you are.
if sleep > 0:
time.sleep(sleep)
# Return the dataframe with results from our timeframe
return df.loc[initial_start_date:end_date]
|
https://github.com/akfamily/akshare/blob/087025d8d6f799b30ca114013e82c1ad22dc9294/project25/akshare/index/request.py#L538-L611
| 25 |
[
0
] | 1.351351 |
[
19,
22,
25,
27,
29,
30,
32,
35,
36,
38,
40,
41,
42,
43,
44,
45,
46,
48,
49,
51,
54,
55,
57,
59,
60,
61,
62,
63,
64,
65,
66,
69,
70,
73
] | 45.945946 | false | 14.444444 | 74 | 6 | 54.054054 | 1 |
def get_historical_interest(
self,
keywords,
year_start=2018,
month_start=1,
day_start=1,
hour_start=0,
year_end=2018,
month_end=2,
day_end=1,
hour_end=0,
cat=0,
geo="",
gprop="",
sleep=0,
):
# construct datetime obejcts - raises ValueError if invalid parameters
initial_start_date = start_date = datetime(
year_start, month_start, day_start, hour_start
)
end_date = datetime(year_end, month_end, day_end, hour_end)
# the timeframe has to be in 1 week intervals or Google will reject it
delta = timedelta(days=7)
df = pd.DataFrame()
date_iterator = start_date
date_iterator += delta
while True:
# format date to comply with API call
start_date_str = start_date.strftime("%Y-%m-%dT%H")
date_iterator_str = date_iterator.strftime("%Y-%m-%dT%H")
tf = start_date_str + " " + date_iterator_str
try:
self.build_payload(keywords, cat, tf, geo, gprop)
week_df = self.interest_over_time()
df = df.append(week_df)
except Exception as e:
print(e)
pass
start_date += delta
date_iterator += delta
if date_iterator > end_date:
# Run for 7 more days to get remaining data that would have been truncated if we stopped now
# This is needed because google requires 7 days yet we may end up with a week result less than a full week
start_date_str = start_date.strftime("%Y-%m-%dT%H")
date_iterator_str = date_iterator.strftime("%Y-%m-%dT%H")
tf = start_date_str + " " + date_iterator_str
try:
self.build_payload(keywords, cat, tf, geo, gprop)
week_df = self.interest_over_time()
df = df.append(week_df)
except Exception as e:
print(e)
pass
break
# just in case you are rate-limited by Google. Recommended is 60 if you are.
if sleep > 0:
time.sleep(sleep)
# Return the dataframe with results from our timeframe
return df.loc[initial_start_date:end_date]
| 18,317 |
akfamily/akshare
|
087025d8d6f799b30ca114013e82c1ad22dc9294
|
akshare/index/index_sw_research.py
|
index_hist_sw
|
(symbol: str = "801030", period: str = "day")
|
return temp_df
|
申万宏源研究-指数发布-指数详情-指数历史数据
https://www.swhyresearch.com/institute_sw/allIndex/releasedIndex/releasedetail?code=801001&name=%E7%94%B3%E4%B8%8750
:param symbol: 指数代码
:type symbol: str
:param period: choice of {"day", "week", "month"}
:type period: str
:return: 指数历史数据
:rtype: pandas.DataFrame
|
申万宏源研究-指数发布-指数详情-指数历史数据
https://www.swhyresearch.com/institute_sw/allIndex/releasedIndex/releasedetail?code=801001&name=%E7%94%B3%E4%B8%8750
:param symbol: 指数代码
:type symbol: str
:param period: choice of {"day", "week", "month"}
:type period: str
:return: 指数历史数据
:rtype: pandas.DataFrame
| 15 | 67 |
def index_hist_sw(symbol: str = "801030", period: str = "day") -> pd.DataFrame:
"""
申万宏源研究-指数发布-指数详情-指数历史数据
https://www.swhyresearch.com/institute_sw/allIndex/releasedIndex/releasedetail?code=801001&name=%E7%94%B3%E4%B8%8750
:param symbol: 指数代码
:type symbol: str
:param period: choice of {"day", "week", "month"}
:type period: str
:return: 指数历史数据
:rtype: pandas.DataFrame
"""
period_map = {
"day": "DAY",
"week": "WEEK",
"month": "MONTH",
}
url = "https://www.swhyresearch.com/institute-sw/api/index_publish/trend/"
params = {
"swindexcode": symbol,
"period": period_map[period],
}
r = requests.get(url, params=params)
data_json = r.json()
temp_df = pd.DataFrame(data_json["data"])
temp_df.rename(
columns={
"swindexcode": "代码",
"bargaindate": "日期",
"openindex": "开盘",
"maxindex": "最高",
"minindex": "最低",
"closeindex": "收盘",
"hike": "",
"markup": "",
"bargainamount": "成交量",
"bargainsum": "成交额",
},
inplace=True,
)
temp_df = temp_df[
[
"代码",
"日期",
"收盘",
"开盘",
"最高",
"最低",
"成交量",
"成交额",
]
]
temp_df["日期"] = pd.to_datetime(temp_df["日期"]).dt.date
return temp_df
|
https://github.com/akfamily/akshare/blob/087025d8d6f799b30ca114013e82c1ad22dc9294/project25/akshare/index/index_sw_research.py#L15-L67
| 25 |
[
0,
1,
2,
3,
4,
5,
6,
7,
8,
9,
10
] | 20.754717 |
[
11,
16,
17,
21,
22,
23,
24,
39,
51,
52
] | 18.867925 | false | 7.865169 | 53 | 1 | 81.132075 | 8 |
def index_hist_sw(symbol: str = "801030", period: str = "day") -> pd.DataFrame:
period_map = {
"day": "DAY",
"week": "WEEK",
"month": "MONTH",
}
url = "https://www.swhyresearch.com/institute-sw/api/index_publish/trend/"
params = {
"swindexcode": symbol,
"period": period_map[period],
}
r = requests.get(url, params=params)
data_json = r.json()
temp_df = pd.DataFrame(data_json["data"])
temp_df.rename(
columns={
"swindexcode": "代码",
"bargaindate": "日期",
"openindex": "开盘",
"maxindex": "最高",
"minindex": "最低",
"closeindex": "收盘",
"hike": "",
"markup": "",
"bargainamount": "成交量",
"bargainsum": "成交额",
},
inplace=True,
)
temp_df = temp_df[
[
"代码",
"日期",
"收盘",
"开盘",
"最高",
"最低",
"成交量",
"成交额",
]
]
temp_df["日期"] = pd.to_datetime(temp_df["日期"]).dt.date
return temp_df
| 18,318 |
akfamily/akshare
|
087025d8d6f799b30ca114013e82c1ad22dc9294
|
akshare/index/index_sw_research.py
|
index_min_sw
|
(symbol: str = "801001")
|
return temp_df
|
申万宏源研究-指数发布-指数详情-指数分时数据
https://www.swhyresearch.com/institute_sw/allIndex/releasedIndex/releasedetail?code=801001&name=%E7%94%B3%E4%B8%8750
:param symbol: 指数代码
:type symbol: str
:return: 指数分时数据
:rtype: pandas.DataFrame
|
申万宏源研究-指数发布-指数详情-指数分时数据
https://www.swhyresearch.com/institute_sw/allIndex/releasedIndex/releasedetail?code=801001&name=%E7%94%B3%E4%B8%8750
:param symbol: 指数代码
:type symbol: str
:return: 指数分时数据
:rtype: pandas.DataFrame
| 70 | 109 |
def index_min_sw(symbol: str = "801001") -> pd.DataFrame:
"""
申万宏源研究-指数发布-指数详情-指数分时数据
https://www.swhyresearch.com/institute_sw/allIndex/releasedIndex/releasedetail?code=801001&name=%E7%94%B3%E4%B8%8750
:param symbol: 指数代码
:type symbol: str
:return: 指数分时数据
:rtype: pandas.DataFrame
"""
url = (
"https://www.swhyresearch.com/institute-sw/api/index_publish/details/timelines/"
)
params = {
"swindexcode": symbol,
}
r = requests.get(url, params=params)
data_json = r.json()
temp_df = pd.DataFrame(data_json["data"])
temp_df.rename(
columns={
"l1": "代码",
"l2": "名称",
"l8": "价格",
"trading_date": "日期",
"trading_time": "时间",
},
inplace=True,
)
temp_df = temp_df[
[
"代码",
"名称",
"价格",
"日期",
"时间",
]
]
temp_df["日期"] = pd.to_datetime(temp_df["日期"]).dt.date
temp_df["价格"] = pd.to_numeric(temp_df["价格"])
return temp_df
|
https://github.com/akfamily/akshare/blob/087025d8d6f799b30ca114013e82c1ad22dc9294/project25/akshare/index/index_sw_research.py#L70-L109
| 25 |
[
0,
1,
2,
3,
4,
5,
6,
7,
8
] | 22.5 |
[
9,
12,
15,
16,
17,
18,
28,
37,
38,
39
] | 25 | false | 7.865169 | 40 | 1 | 75 | 6 |
def index_min_sw(symbol: str = "801001") -> pd.DataFrame:
url = (
"https://www.swhyresearch.com/institute-sw/api/index_publish/details/timelines/"
)
params = {
"swindexcode": symbol,
}
r = requests.get(url, params=params)
data_json = r.json()
temp_df = pd.DataFrame(data_json["data"])
temp_df.rename(
columns={
"l1": "代码",
"l2": "名称",
"l8": "价格",
"trading_date": "日期",
"trading_time": "时间",
},
inplace=True,
)
temp_df = temp_df[
[
"代码",
"名称",
"价格",
"日期",
"时间",
]
]
temp_df["日期"] = pd.to_datetime(temp_df["日期"]).dt.date
temp_df["价格"] = pd.to_numeric(temp_df["价格"])
return temp_df
| 18,319 |
akfamily/akshare
|
087025d8d6f799b30ca114013e82c1ad22dc9294
|
akshare/index/index_sw_research.py
|
index_component_sw
|
(symbol: str = "801001")
|
return temp_df
|
申万宏源研究-指数发布-指数详情-成分股
https://www.swhyresearch.com/institute_sw/allIndex/releasedIndex/releasedetail?code=801001&name=%E7%94%B3%E4%B8%8750
:param symbol: 指数代码
:type symbol: str
:return: 成分股
:rtype: pandas.DataFrame
|
申万宏源研究-指数发布-指数详情-成分股
https://www.swhyresearch.com/institute_sw/allIndex/releasedIndex/releasedetail?code=801001&name=%E7%94%B3%E4%B8%8750
:param symbol: 指数代码
:type symbol: str
:return: 成分股
:rtype: pandas.DataFrame
| 112 | 149 |
def index_component_sw(symbol: str = "801001") -> pd.DataFrame:
"""
申万宏源研究-指数发布-指数详情-成分股
https://www.swhyresearch.com/institute_sw/allIndex/releasedIndex/releasedetail?code=801001&name=%E7%94%B3%E4%B8%8750
:param symbol: 指数代码
:type symbol: str
:return: 成分股
:rtype: pandas.DataFrame
"""
url = "https://www.swhyresearch.com/institute-sw/api/index_publish/details/component_stocks/"
params = {"swindexcode": symbol, "page": "1", "page_size": "10000"}
r = requests.get(url, params=params)
data_json = r.json()
temp_df = pd.DataFrame(data_json["data"]["results"])
temp_df.reset_index(inplace=True)
temp_df["index"] = temp_df["index"] + 1
temp_df.rename(
columns={
"index": "序号",
"stockcode": "证券代码",
"stockname": "证券名称",
"newweight": "最新权重",
"beginningdate": "计入日期",
},
inplace=True,
)
temp_df = temp_df[
[
"序号",
"证券代码",
"证券名称",
"最新权重",
"计入日期",
]
]
temp_df["计入日期"] = pd.to_datetime(temp_df["计入日期"]).dt.date
temp_df["最新权重"] = pd.to_numeric(temp_df["最新权重"])
return temp_df
|
https://github.com/akfamily/akshare/blob/087025d8d6f799b30ca114013e82c1ad22dc9294/project25/akshare/index/index_sw_research.py#L112-L149
| 25 |
[
0,
1,
2,
3,
4,
5,
6,
7,
8
] | 23.684211 |
[
9,
10,
11,
12,
13,
14,
15,
16,
26,
35,
36,
37
] | 31.578947 | false | 7.865169 | 38 | 1 | 68.421053 | 6 |
def index_component_sw(symbol: str = "801001") -> pd.DataFrame:
url = "https://www.swhyresearch.com/institute-sw/api/index_publish/details/component_stocks/"
params = {"swindexcode": symbol, "page": "1", "page_size": "10000"}
r = requests.get(url, params=params)
data_json = r.json()
temp_df = pd.DataFrame(data_json["data"]["results"])
temp_df.reset_index(inplace=True)
temp_df["index"] = temp_df["index"] + 1
temp_df.rename(
columns={
"index": "序号",
"stockcode": "证券代码",
"stockname": "证券名称",
"newweight": "最新权重",
"beginningdate": "计入日期",
},
inplace=True,
)
temp_df = temp_df[
[
"序号",
"证券代码",
"证券名称",
"最新权重",
"计入日期",
]
]
temp_df["计入日期"] = pd.to_datetime(temp_df["计入日期"]).dt.date
temp_df["最新权重"] = pd.to_numeric(temp_df["最新权重"])
return temp_df
| 18,320 |
akfamily/akshare
|
087025d8d6f799b30ca114013e82c1ad22dc9294
|
akshare/index/index_sw_research.py
|
index_realtime_sw
|
(symbol: str = "二级行业") -> pd.D
|
return big_df
|
申万宏源研究-指数系列
https://www.swhyresearch.com/institute_sw/allIndex/releasedIndex
:param symbol: choice of {"市场表征", "一级行业", "二级行业", "风格指数"}
:type symbol: str
:return: 指数系列实时行情数据
:rtype: pandas.DataFrame
|
申万宏源研究-指数系列
https://www.swhyresearch.com/institute_sw/allIndex/releasedIndex
:param symbol: choice of {"市场表征", "一级行业", "二级行业", "风格指数"}
:type symbol: str
:return: 指数系列实时行情数据
:rtype: pandas.DataFrame
| 152 | 205 |
def index_realtime_sw(symbol: str = "二级行业") -> pd.DataFrame:
"""
申万宏源研究-指数系列
https://www.swhyresearch.com/institute_sw/allIndex/releasedIndex
:param symbol: choice of {"市场表征", "一级行业", "二级行业", "风格指数"}
:type symbol: str
:return: 指数系列实时行情数据
:rtype: pandas.DataFrame
"""
url = "https://www.swhyresearch.com/institute-sw/api/index_publish/current/"
params = {"page": "1", "page_size": "50", "indextype": symbol}
r = requests.get(url, params=params)
data_json = r.json()
total_num = data_json["data"]["count"]
total_page = math.ceil(total_num / 50)
big_df = pd.DataFrame()
for page in tqdm(range(1, total_page + 1), leave=False):
params.update({"page": page})
r = requests.get(url, params=params)
data_json = r.json()
temp_df = pd.DataFrame(data_json["data"]["results"])
big_df = pd.concat([big_df, temp_df], ignore_index=True)
big_df.columns = [
"指数代码",
"指数名称",
"昨收盘",
"今开盘",
"成交额",
"最高价",
"最低价",
"最新价",
"成交量",
]
big_df = big_df[
[
"指数代码",
"指数名称",
"昨收盘",
"今开盘",
"最新价",
"成交额",
"成交量",
"最高价",
"最低价",
]
]
big_df["昨收盘"] = pd.to_numeric(big_df["昨收盘"])
big_df["今开盘"] = pd.to_numeric(big_df["今开盘"])
big_df["最新价"] = pd.to_numeric(big_df["最新价"])
big_df["成交额"] = pd.to_numeric(big_df["成交额"])
big_df["成交量"] = pd.to_numeric(big_df["成交量"])
big_df["最高价"] = pd.to_numeric(big_df["最高价"])
big_df["最低价"] = pd.to_numeric(big_df["最低价"])
return big_df
|
https://github.com/akfamily/akshare/blob/087025d8d6f799b30ca114013e82c1ad22dc9294/project25/akshare/index/index_sw_research.py#L152-L205
| 25 |
[
0,
1,
2,
3,
4,
5,
6,
7,
8
] | 16.666667 |
[
9,
10,
11,
12,
13,
14,
15,
16,
17,
18,
19,
20,
21,
22,
33,
46,
47,
48,
49,
50,
51,
52,
53
] | 42.592593 | false | 7.865169 | 54 | 2 | 57.407407 | 6 |
def index_realtime_sw(symbol: str = "二级行业") -> pd.DataFrame:
url = "https://www.swhyresearch.com/institute-sw/api/index_publish/current/"
params = {"page": "1", "page_size": "50", "indextype": symbol}
r = requests.get(url, params=params)
data_json = r.json()
total_num = data_json["data"]["count"]
total_page = math.ceil(total_num / 50)
big_df = pd.DataFrame()
for page in tqdm(range(1, total_page + 1), leave=False):
params.update({"page": page})
r = requests.get(url, params=params)
data_json = r.json()
temp_df = pd.DataFrame(data_json["data"]["results"])
big_df = pd.concat([big_df, temp_df], ignore_index=True)
big_df.columns = [
"指数代码",
"指数名称",
"昨收盘",
"今开盘",
"成交额",
"最高价",
"最低价",
"最新价",
"成交量",
]
big_df = big_df[
[
"指数代码",
"指数名称",
"昨收盘",
"今开盘",
"最新价",
"成交额",
"成交量",
"最高价",
"最低价",
]
]
big_df["昨收盘"] = pd.to_numeric(big_df["昨收盘"])
big_df["今开盘"] = pd.to_numeric(big_df["今开盘"])
big_df["最新价"] = pd.to_numeric(big_df["最新价"])
big_df["成交额"] = pd.to_numeric(big_df["成交额"])
big_df["成交量"] = pd.to_numeric(big_df["成交量"])
big_df["最高价"] = pd.to_numeric(big_df["最高价"])
big_df["最低价"] = pd.to_numeric(big_df["最低价"])
return big_df
| 18,321 |
akfamily/akshare
|
087025d8d6f799b30ca114013e82c1ad22dc9294
|
akshare/index/index_sw_research.py
|
index_analysis_daily_sw
|
(
symbol: str = "市场表征",
start_date: str = "20221103",
end_date: str = "20221103",
)
|
return big_df
|
申万宏源研究-指数分析
https://www.swhyresearch.com/institute_sw/allIndex/analysisIndex
:param symbol: choice of {"市场表征", "一级行业", "二级行业", "风格指数"}
:type symbol: str
:param start_date: 开始日期
:type start_date: str
:param end_date: 结束日期
:type end_date: str
:return: 指数分析
:rtype: pandas.DataFrame
|
申万宏源研究-指数分析
https://www.swhyresearch.com/institute_sw/allIndex/analysisIndex
:param symbol: choice of {"市场表征", "一级行业", "二级行业", "风格指数"}
:type symbol: str
:param start_date: 开始日期
:type start_date: str
:param end_date: 结束日期
:type end_date: str
:return: 指数分析
:rtype: pandas.DataFrame
| 208 | 279 |
def index_analysis_daily_sw(
symbol: str = "市场表征",
start_date: str = "20221103",
end_date: str = "20221103",
) -> pd.DataFrame:
"""
申万宏源研究-指数分析
https://www.swhyresearch.com/institute_sw/allIndex/analysisIndex
:param symbol: choice of {"市场表征", "一级行业", "二级行业", "风格指数"}
:type symbol: str
:param start_date: 开始日期
:type start_date: str
:param end_date: 结束日期
:type end_date: str
:return: 指数分析
:rtype: pandas.DataFrame
"""
url = "https://www.swhyresearch.com/institute-sw/api/index_analysis/index_analysis_report/"
params = {
"page": "1",
"page_size": "50",
"index_type": symbol,
"start_date": "-".join([start_date[:4], start_date[4:6], start_date[6:]]),
"end_date": "-".join([end_date[:4], end_date[4:6], end_date[6:]]),
"type": 'DAY',
"swindexcode": "all",
}
r = requests.get(url, params=params)
data_json = r.json()
total_num = data_json["data"]["count"]
total_page = math.ceil(total_num / 50)
big_df = pd.DataFrame()
for page in tqdm(range(1, total_page + 1), leave=False):
params.update({"page": page})
r = requests.get(url, params=params)
data_json = r.json()
temp_df = pd.DataFrame(data_json["data"]["results"])
big_df = pd.concat([big_df, temp_df], ignore_index=True)
big_df.rename(
columns={
"swindexcode": "指数代码",
"swindexname": "指数名称",
"bargaindate": "发布日期",
"closeindex": "收盘指数",
"bargainamount": "成交量",
"markup": "涨跌幅",
"turnoverrate": "换手率",
"pe": "市盈率",
"pb": "市净率",
"meanprice": "均价",
"bargainsumrate": "成交额占比",
"negotiablessharesum1": "流通市值",
"negotiablessharesum2": "平均流通市值",
"dp": "股息率",
},
inplace=True,
)
big_df["发布日期"] = pd.to_datetime(big_df["发布日期"]).dt.date
big_df["收盘指数"] = pd.to_numeric(big_df["收盘指数"])
big_df["成交量"] = pd.to_numeric(big_df["成交量"])
big_df["涨跌幅"] = pd.to_numeric(big_df["涨跌幅"])
big_df["换手率"] = pd.to_numeric(big_df["换手率"])
big_df["市盈率"] = pd.to_numeric(big_df["市盈率"])
big_df["市净率"] = pd.to_numeric(big_df["市净率"])
big_df["均价"] = pd.to_numeric(big_df["均价"])
big_df["成交额占比"] = pd.to_numeric(big_df["成交额占比"])
big_df["流通市值"] = pd.to_numeric(big_df["流通市值"])
big_df["平均流通市值"] = pd.to_numeric(big_df["平均流通市值"])
big_df["股息率"] = pd.to_numeric(big_df["股息率"])
big_df.sort_values(['发布日期'], inplace=True, ignore_index=True)
return big_df
|
https://github.com/akfamily/akshare/blob/087025d8d6f799b30ca114013e82c1ad22dc9294/project25/akshare/index/index_sw_research.py#L208-L279
| 25 |
[
0
] | 1.388889 |
[
17,
18,
27,
28,
29,
30,
31,
32,
33,
34,
35,
36,
37,
38,
57,
58,
59,
60,
61,
62,
63,
64,
65,
66,
67,
68,
70,
71
] | 38.888889 | false | 7.865169 | 72 | 2 | 61.111111 | 10 |
def index_analysis_daily_sw(
symbol: str = "市场表征",
start_date: str = "20221103",
end_date: str = "20221103",
) -> pd.DataFrame:
url = "https://www.swhyresearch.com/institute-sw/api/index_analysis/index_analysis_report/"
params = {
"page": "1",
"page_size": "50",
"index_type": symbol,
"start_date": "-".join([start_date[:4], start_date[4:6], start_date[6:]]),
"end_date": "-".join([end_date[:4], end_date[4:6], end_date[6:]]),
"type": 'DAY',
"swindexcode": "all",
}
r = requests.get(url, params=params)
data_json = r.json()
total_num = data_json["data"]["count"]
total_page = math.ceil(total_num / 50)
big_df = pd.DataFrame()
for page in tqdm(range(1, total_page + 1), leave=False):
params.update({"page": page})
r = requests.get(url, params=params)
data_json = r.json()
temp_df = pd.DataFrame(data_json["data"]["results"])
big_df = pd.concat([big_df, temp_df], ignore_index=True)
big_df.rename(
columns={
"swindexcode": "指数代码",
"swindexname": "指数名称",
"bargaindate": "发布日期",
"closeindex": "收盘指数",
"bargainamount": "成交量",
"markup": "涨跌幅",
"turnoverrate": "换手率",
"pe": "市盈率",
"pb": "市净率",
"meanprice": "均价",
"bargainsumrate": "成交额占比",
"negotiablessharesum1": "流通市值",
"negotiablessharesum2": "平均流通市值",
"dp": "股息率",
},
inplace=True,
)
big_df["发布日期"] = pd.to_datetime(big_df["发布日期"]).dt.date
big_df["收盘指数"] = pd.to_numeric(big_df["收盘指数"])
big_df["成交量"] = pd.to_numeric(big_df["成交量"])
big_df["涨跌幅"] = pd.to_numeric(big_df["涨跌幅"])
big_df["换手率"] = pd.to_numeric(big_df["换手率"])
big_df["市盈率"] = pd.to_numeric(big_df["市盈率"])
big_df["市净率"] = pd.to_numeric(big_df["市净率"])
big_df["均价"] = pd.to_numeric(big_df["均价"])
big_df["成交额占比"] = pd.to_numeric(big_df["成交额占比"])
big_df["流通市值"] = pd.to_numeric(big_df["流通市值"])
big_df["平均流通市值"] = pd.to_numeric(big_df["平均流通市值"])
big_df["股息率"] = pd.to_numeric(big_df["股息率"])
big_df.sort_values(['发布日期'], inplace=True, ignore_index=True)
return big_df
| 18,322 |
akfamily/akshare
|
087025d8d6f799b30ca114013e82c1ad22dc9294
|
akshare/index/index_sw_research.py
|
index_analysis_week_month_sw
|
(symbol: str = "month")
|
return temp_df
|
申万宏源研究-周/月报表-日期序列
https://www.swhyresearch.com/institute_sw/allIndex/analysisIndex
:param symbol: choice of {"week", "month"}
:type symbol: str
:return: 日期序列
:rtype: pandas.DataFrame
|
申万宏源研究-周/月报表-日期序列
https://www.swhyresearch.com/institute_sw/allIndex/analysisIndex
:param symbol: choice of {"week", "month"}
:type symbol: str
:return: 日期序列
:rtype: pandas.DataFrame
| 282 | 301 |
def index_analysis_week_month_sw(symbol: str = "month") -> pd.DataFrame:
"""
申万宏源研究-周/月报表-日期序列
https://www.swhyresearch.com/institute_sw/allIndex/analysisIndex
:param symbol: choice of {"week", "month"}
:type symbol: str
:return: 日期序列
:rtype: pandas.DataFrame
"""
url = "https://www.swhyresearch.com/institute-sw/api/index_analysis/week_month_datetime/"
params = {
'type': symbol.upper()
}
r = requests.get(url, params=params)
data_json = r.json()
temp_df = pd.DataFrame(data_json['data'])
temp_df['bargaindate'] = pd.to_datetime(temp_df['bargaindate']).dt.date
temp_df.columns = ['date']
temp_df.sort_values(['date'], inplace=True, ignore_index=True)
return temp_df
|
https://github.com/akfamily/akshare/blob/087025d8d6f799b30ca114013e82c1ad22dc9294/project25/akshare/index/index_sw_research.py#L282-L301
| 25 |
[
0,
1,
2,
3,
4,
5,
6,
7,
8
] | 45 |
[
9,
10,
13,
14,
15,
16,
17,
18,
19
] | 45 | false | 7.865169 | 20 | 1 | 55 | 6 |
def index_analysis_week_month_sw(symbol: str = "month") -> pd.DataFrame:
url = "https://www.swhyresearch.com/institute-sw/api/index_analysis/week_month_datetime/"
params = {
'type': symbol.upper()
}
r = requests.get(url, params=params)
data_json = r.json()
temp_df = pd.DataFrame(data_json['data'])
temp_df['bargaindate'] = pd.to_datetime(temp_df['bargaindate']).dt.date
temp_df.columns = ['date']
temp_df.sort_values(['date'], inplace=True, ignore_index=True)
return temp_df
| 18,323 |
akfamily/akshare
|
087025d8d6f799b30ca114013e82c1ad22dc9294
|
akshare/index/index_sw_research.py
|
index_analysis_weekly_sw
|
(
symbol: str = "市场表征",
date: str = "20221104",
)
|
return big_df
|
申万宏源研究-指数分析-周报告
https://www.swhyresearch.com/institute_sw/allIndex/analysisIndex
:param symbol: choice of {"市场表征", "一级行业", "二级行业", "风格指数"}
:type symbol: str
:param date: 查询日期; 通过调用 ak.index_analysis_week_month_sw(date="20221104") 接口获取
:type date: str
:return: 指数分析
:rtype: pandas.DataFrame
|
申万宏源研究-指数分析-周报告
https://www.swhyresearch.com/institute_sw/allIndex/analysisIndex
:param symbol: choice of {"市场表征", "一级行业", "二级行业", "风格指数"}
:type symbol: str
:param date: 查询日期; 通过调用 ak.index_analysis_week_month_sw(date="20221104") 接口获取
:type date: str
:return: 指数分析
:rtype: pandas.DataFrame
| 304 | 371 |
def index_analysis_weekly_sw(
symbol: str = "市场表征",
date: str = "20221104",
) -> pd.DataFrame:
"""
申万宏源研究-指数分析-周报告
https://www.swhyresearch.com/institute_sw/allIndex/analysisIndex
:param symbol: choice of {"市场表征", "一级行业", "二级行业", "风格指数"}
:type symbol: str
:param date: 查询日期; 通过调用 ak.index_analysis_week_month_sw(date="20221104") 接口获取
:type date: str
:return: 指数分析
:rtype: pandas.DataFrame
"""
url = "https://www.swhyresearch.com/institute-sw/api/index_analysis/index_analysis_reports/"
params = {
"page": "1",
"page_size": "50",
"index_type": symbol,
"bargaindate": "-".join([date[:4], date[4:6], date[6:]]),
"type": "WEEK",
"swindexcode": "all",
}
r = requests.get(url, params=params)
data_json = r.json()
total_num = data_json["data"]["count"]
total_page = math.ceil(total_num / 50)
big_df = pd.DataFrame()
for page in tqdm(range(1, total_page + 1), leave=False):
params.update({"page": page})
r = requests.get(url, params=params)
data_json = r.json()
temp_df = pd.DataFrame(data_json["data"]["results"])
big_df = pd.concat([big_df, temp_df], ignore_index=True)
big_df.rename(
columns={
"swindexcode": "指数代码",
"swindexname": "指数名称",
"bargaindate": "发布日期",
"closeindex": "收盘指数",
"bargainamount": "成交量",
"markup": "涨跌幅",
"turnoverrate": "换手率",
"pe": "市盈率",
"pb": "市净率",
"meanprice": "均价",
"bargainsumrate": "成交额占比",
"negotiablessharesum1": "流通市值",
"negotiablessharesum2": "平均流通市值",
"dp": "股息率",
},
inplace=True,
)
big_df["发布日期"] = pd.to_datetime(big_df["发布日期"]).dt.date
big_df["收盘指数"] = pd.to_numeric(big_df["收盘指数"])
big_df["成交量"] = pd.to_numeric(big_df["成交量"])
big_df["涨跌幅"] = pd.to_numeric(big_df["涨跌幅"])
big_df["换手率"] = pd.to_numeric(big_df["换手率"])
big_df["市盈率"] = pd.to_numeric(big_df["市盈率"])
big_df["市净率"] = pd.to_numeric(big_df["市净率"])
big_df["均价"] = pd.to_numeric(big_df["均价"])
big_df["成交额占比"] = pd.to_numeric(big_df["成交额占比"])
big_df["流通市值"] = pd.to_numeric(big_df["流通市值"])
big_df["平均流通市值"] = pd.to_numeric(big_df["平均流通市值"])
big_df["股息率"] = pd.to_numeric(big_df["股息率"])
big_df.sort_values(['发布日期'], inplace=True, ignore_index=True)
return big_df
|
https://github.com/akfamily/akshare/blob/087025d8d6f799b30ca114013e82c1ad22dc9294/project25/akshare/index/index_sw_research.py#L304-L371
| 25 |
[
0
] | 1.470588 |
[
14,
15,
23,
24,
25,
26,
27,
28,
29,
30,
31,
32,
33,
34,
53,
54,
55,
56,
57,
58,
59,
60,
61,
62,
63,
64,
66,
67
] | 41.176471 | false | 7.865169 | 68 | 2 | 58.823529 | 8 |
def index_analysis_weekly_sw(
symbol: str = "市场表征",
date: str = "20221104",
) -> pd.DataFrame:
url = "https://www.swhyresearch.com/institute-sw/api/index_analysis/index_analysis_reports/"
params = {
"page": "1",
"page_size": "50",
"index_type": symbol,
"bargaindate": "-".join([date[:4], date[4:6], date[6:]]),
"type": "WEEK",
"swindexcode": "all",
}
r = requests.get(url, params=params)
data_json = r.json()
total_num = data_json["data"]["count"]
total_page = math.ceil(total_num / 50)
big_df = pd.DataFrame()
for page in tqdm(range(1, total_page + 1), leave=False):
params.update({"page": page})
r = requests.get(url, params=params)
data_json = r.json()
temp_df = pd.DataFrame(data_json["data"]["results"])
big_df = pd.concat([big_df, temp_df], ignore_index=True)
big_df.rename(
columns={
"swindexcode": "指数代码",
"swindexname": "指数名称",
"bargaindate": "发布日期",
"closeindex": "收盘指数",
"bargainamount": "成交量",
"markup": "涨跌幅",
"turnoverrate": "换手率",
"pe": "市盈率",
"pb": "市净率",
"meanprice": "均价",
"bargainsumrate": "成交额占比",
"negotiablessharesum1": "流通市值",
"negotiablessharesum2": "平均流通市值",
"dp": "股息率",
},
inplace=True,
)
big_df["发布日期"] = pd.to_datetime(big_df["发布日期"]).dt.date
big_df["收盘指数"] = pd.to_numeric(big_df["收盘指数"])
big_df["成交量"] = pd.to_numeric(big_df["成交量"])
big_df["涨跌幅"] = pd.to_numeric(big_df["涨跌幅"])
big_df["换手率"] = pd.to_numeric(big_df["换手率"])
big_df["市盈率"] = pd.to_numeric(big_df["市盈率"])
big_df["市净率"] = pd.to_numeric(big_df["市净率"])
big_df["均价"] = pd.to_numeric(big_df["均价"])
big_df["成交额占比"] = pd.to_numeric(big_df["成交额占比"])
big_df["流通市值"] = pd.to_numeric(big_df["流通市值"])
big_df["平均流通市值"] = pd.to_numeric(big_df["平均流通市值"])
big_df["股息率"] = pd.to_numeric(big_df["股息率"])
big_df.sort_values(['发布日期'], inplace=True, ignore_index=True)
return big_df
| 18,324 |
akfamily/akshare
|
087025d8d6f799b30ca114013e82c1ad22dc9294
|
akshare/index/index_sw_research.py
|
index_analysis_monthly_sw
|
(
symbol: str = "市场表征",
date: str = "20221031",
)
|
return big_df
|
申万宏源研究-指数分析-月报告
https://www.swhyresearch.com/institute_sw/allIndex/analysisIndex
:param symbol: choice of {"市场表征", "一级行业", "二级行业", "风格指数"}
:type symbol: str
:param date: 查询日期; 通过调用 ak.index_analysis_week_month_sw() 接口获取
:type date: str
:return: 指数分析
:rtype: pandas.DataFrame
|
申万宏源研究-指数分析-月报告
https://www.swhyresearch.com/institute_sw/allIndex/analysisIndex
:param symbol: choice of {"市场表征", "一级行业", "二级行业", "风格指数"}
:type symbol: str
:param date: 查询日期; 通过调用 ak.index_analysis_week_month_sw() 接口获取
:type date: str
:return: 指数分析
:rtype: pandas.DataFrame
| 374 | 441 |
def index_analysis_monthly_sw(
symbol: str = "市场表征",
date: str = "20221031",
) -> pd.DataFrame:
"""
申万宏源研究-指数分析-月报告
https://www.swhyresearch.com/institute_sw/allIndex/analysisIndex
:param symbol: choice of {"市场表征", "一级行业", "二级行业", "风格指数"}
:type symbol: str
:param date: 查询日期; 通过调用 ak.index_analysis_week_month_sw() 接口获取
:type date: str
:return: 指数分析
:rtype: pandas.DataFrame
"""
url = "https://www.swhyresearch.com/institute-sw/api/index_analysis/index_analysis_reports/"
params = {
"page": "1",
"page_size": "50",
"index_type": symbol,
"bargaindate": "-".join([date[:4], date[4:6], date[6:]]),
"type": "MONTH",
"swindexcode": "all",
}
r = requests.get(url, params=params)
data_json = r.json()
total_num = data_json["data"]["count"]
total_page = math.ceil(total_num / 50)
big_df = pd.DataFrame()
for page in tqdm(range(1, total_page + 1), leave=False):
params.update({"page": page})
r = requests.get(url, params=params)
data_json = r.json()
temp_df = pd.DataFrame(data_json["data"]["results"])
big_df = pd.concat([big_df, temp_df], ignore_index=True)
big_df.rename(
columns={
"swindexcode": "指数代码",
"swindexname": "指数名称",
"bargaindate": "发布日期",
"closeindex": "收盘指数",
"bargainamount": "成交量",
"markup": "涨跌幅",
"turnoverrate": "换手率",
"pe": "市盈率",
"pb": "市净率",
"meanprice": "均价",
"bargainsumrate": "成交额占比",
"negotiablessharesum1": "流通市值",
"negotiablessharesum2": "平均流通市值",
"dp": "股息率",
},
inplace=True,
)
big_df["发布日期"] = pd.to_datetime(big_df["发布日期"]).dt.date
big_df["收盘指数"] = pd.to_numeric(big_df["收盘指数"])
big_df["成交量"] = pd.to_numeric(big_df["成交量"])
big_df["涨跌幅"] = pd.to_numeric(big_df["涨跌幅"])
big_df["换手率"] = pd.to_numeric(big_df["换手率"])
big_df["市盈率"] = pd.to_numeric(big_df["市盈率"])
big_df["市净率"] = pd.to_numeric(big_df["市净率"])
big_df["均价"] = pd.to_numeric(big_df["均价"])
big_df["成交额占比"] = pd.to_numeric(big_df["成交额占比"])
big_df["流通市值"] = pd.to_numeric(big_df["流通市值"])
big_df["平均流通市值"] = pd.to_numeric(big_df["平均流通市值"])
big_df["股息率"] = pd.to_numeric(big_df["股息率"])
big_df.sort_values(['发布日期'], inplace=True, ignore_index=True)
return big_df
|
https://github.com/akfamily/akshare/blob/087025d8d6f799b30ca114013e82c1ad22dc9294/project25/akshare/index/index_sw_research.py#L374-L441
| 25 |
[
0
] | 1.470588 |
[
14,
15,
23,
24,
25,
26,
27,
28,
29,
30,
31,
32,
33,
34,
53,
54,
55,
56,
57,
58,
59,
60,
61,
62,
63,
64,
66,
67
] | 41.176471 | false | 7.865169 | 68 | 2 | 58.823529 | 8 |
def index_analysis_monthly_sw(
symbol: str = "市场表征",
date: str = "20221031",
) -> pd.DataFrame:
url = "https://www.swhyresearch.com/institute-sw/api/index_analysis/index_analysis_reports/"
params = {
"page": "1",
"page_size": "50",
"index_type": symbol,
"bargaindate": "-".join([date[:4], date[4:6], date[6:]]),
"type": "MONTH",
"swindexcode": "all",
}
r = requests.get(url, params=params)
data_json = r.json()
total_num = data_json["data"]["count"]
total_page = math.ceil(total_num / 50)
big_df = pd.DataFrame()
for page in tqdm(range(1, total_page + 1), leave=False):
params.update({"page": page})
r = requests.get(url, params=params)
data_json = r.json()
temp_df = pd.DataFrame(data_json["data"]["results"])
big_df = pd.concat([big_df, temp_df], ignore_index=True)
big_df.rename(
columns={
"swindexcode": "指数代码",
"swindexname": "指数名称",
"bargaindate": "发布日期",
"closeindex": "收盘指数",
"bargainamount": "成交量",
"markup": "涨跌幅",
"turnoverrate": "换手率",
"pe": "市盈率",
"pb": "市净率",
"meanprice": "均价",
"bargainsumrate": "成交额占比",
"negotiablessharesum1": "流通市值",
"negotiablessharesum2": "平均流通市值",
"dp": "股息率",
},
inplace=True,
)
big_df["发布日期"] = pd.to_datetime(big_df["发布日期"]).dt.date
big_df["收盘指数"] = pd.to_numeric(big_df["收盘指数"])
big_df["成交量"] = pd.to_numeric(big_df["成交量"])
big_df["涨跌幅"] = pd.to_numeric(big_df["涨跌幅"])
big_df["换手率"] = pd.to_numeric(big_df["换手率"])
big_df["市盈率"] = pd.to_numeric(big_df["市盈率"])
big_df["市净率"] = pd.to_numeric(big_df["市净率"])
big_df["均价"] = pd.to_numeric(big_df["均价"])
big_df["成交额占比"] = pd.to_numeric(big_df["成交额占比"])
big_df["流通市值"] = pd.to_numeric(big_df["流通市值"])
big_df["平均流通市值"] = pd.to_numeric(big_df["平均流通市值"])
big_df["股息率"] = pd.to_numeric(big_df["股息率"])
big_df.sort_values(['发布日期'], inplace=True, ignore_index=True)
return big_df
| 18,325 |
akfamily/akshare
|
087025d8d6f799b30ca114013e82c1ad22dc9294
|
akshare/index/stock_zh_index_csindex.py
|
stock_zh_index_hist_csindex
|
(
symbol: str = "H30374",
start_date: str = "20160101",
end_date: str = "20211015",
)
|
return temp_df
|
中证指数-具体指数-历史行情数据
P.S. 只有收盘价,正常情况下不应使用该接口,除非指数只有中证网站有
https://www.csindex.com.cn/zh-CN/indices/index-detail/H30374#/indices/family/detail?indexCode=H30374
:param symbol: 指数代码; e.g., H30374
:type symbol: str
:param start_date: 开始日期
:type start_date: str
:param end_date: 结束日期
:type end_date: str
:return: 包含日期和收盘价的指数数据
:rtype: pandas.DataFrame
|
中证指数-具体指数-历史行情数据
P.S. 只有收盘价,正常情况下不应使用该接口,除非指数只有中证网站有
https://www.csindex.com.cn/zh-CN/indices/index-detail/H30374#/indices/family/detail?indexCode=H30374
:param symbol: 指数代码; e.g., H30374
:type symbol: str
:param start_date: 开始日期
:type start_date: str
:param end_date: 结束日期
:type end_date: str
:return: 包含日期和收盘价的指数数据
:rtype: pandas.DataFrame
| 14 | 69 |
def stock_zh_index_hist_csindex(
symbol: str = "H30374",
start_date: str = "20160101",
end_date: str = "20211015",
) -> pd.DataFrame:
"""
中证指数-具体指数-历史行情数据
P.S. 只有收盘价,正常情况下不应使用该接口,除非指数只有中证网站有
https://www.csindex.com.cn/zh-CN/indices/index-detail/H30374#/indices/family/detail?indexCode=H30374
:param symbol: 指数代码; e.g., H30374
:type symbol: str
:param start_date: 开始日期
:type start_date: str
:param end_date: 结束日期
:type end_date: str
:return: 包含日期和收盘价的指数数据
:rtype: pandas.DataFrame
"""
url = "https://www.csindex.com.cn/csindex-home/perf/index-perf"
params = {
"indexCode": symbol,
"startDate": start_date,
"endDate": end_date,
}
r = requests.get(url, params=params)
data_json = r.json()
temp_df = pd.DataFrame(data_json["data"])
del temp_df["peg"]
temp_df.columns = [
"日期",
"指数代码",
"指数中文全称",
"指数中文简称",
"指数英文全称",
"指数英文简称",
"开盘",
"最高",
"最低",
"收盘",
"涨跌",
"涨跌幅",
"成交量",
"成交金额",
"样本数量",
]
temp_df["日期"] = pd.to_datetime(temp_df["日期"]).dt.date
temp_df["开盘"] = pd.to_numeric(temp_df["开盘"])
temp_df["最高"] = pd.to_numeric(temp_df["最高"])
temp_df["最低"] = pd.to_numeric(temp_df["最低"])
temp_df["收盘"] = pd.to_numeric(temp_df["收盘"])
temp_df["涨跌"] = pd.to_numeric(temp_df["涨跌"])
temp_df["涨跌幅"] = pd.to_numeric(temp_df["涨跌幅"])
temp_df["成交量"] = pd.to_numeric(temp_df["成交量"])
temp_df["成交金额"] = pd.to_numeric(temp_df["成交金额"])
temp_df["样本数量"] = pd.to_numeric(temp_df["样本数量"])
return temp_df
|
https://github.com/akfamily/akshare/blob/087025d8d6f799b30ca114013e82c1ad22dc9294/project25/akshare/index/stock_zh_index_csindex.py#L14-L69
| 25 |
[
0
] | 1.785714 |
[
18,
19,
24,
25,
26,
27,
28,
45,
46,
47,
48,
49,
50,
51,
52,
53,
54,
55
] | 32.142857 | false | 13.157895 | 56 | 1 | 67.857143 | 11 |
def stock_zh_index_hist_csindex(
symbol: str = "H30374",
start_date: str = "20160101",
end_date: str = "20211015",
) -> pd.DataFrame:
url = "https://www.csindex.com.cn/csindex-home/perf/index-perf"
params = {
"indexCode": symbol,
"startDate": start_date,
"endDate": end_date,
}
r = requests.get(url, params=params)
data_json = r.json()
temp_df = pd.DataFrame(data_json["data"])
del temp_df["peg"]
temp_df.columns = [
"日期",
"指数代码",
"指数中文全称",
"指数中文简称",
"指数英文全称",
"指数英文简称",
"开盘",
"最高",
"最低",
"收盘",
"涨跌",
"涨跌幅",
"成交量",
"成交金额",
"样本数量",
]
temp_df["日期"] = pd.to_datetime(temp_df["日期"]).dt.date
temp_df["开盘"] = pd.to_numeric(temp_df["开盘"])
temp_df["最高"] = pd.to_numeric(temp_df["最高"])
temp_df["最低"] = pd.to_numeric(temp_df["最低"])
temp_df["收盘"] = pd.to_numeric(temp_df["收盘"])
temp_df["涨跌"] = pd.to_numeric(temp_df["涨跌"])
temp_df["涨跌幅"] = pd.to_numeric(temp_df["涨跌幅"])
temp_df["成交量"] = pd.to_numeric(temp_df["成交量"])
temp_df["成交金额"] = pd.to_numeric(temp_df["成交金额"])
temp_df["样本数量"] = pd.to_numeric(temp_df["样本数量"])
return temp_df
| 18,326 |
akfamily/akshare
|
087025d8d6f799b30ca114013e82c1ad22dc9294
|
akshare/index/stock_zh_index_csindex.py
|
stock_zh_index_value_csindex
|
(symbol: str = "H30374")
|
return temp_df
|
中证指数-指数估值数据
https://www.csindex.com.cn/zh-CN/indices/index-detail/H30374#/indices/family/detail?indexCode=H30374
:param symbol: 指数代码; e.g., H30374
:type symbol: str
:return: 指数估值数据
:rtype: pandas.DataFrame
|
中证指数-指数估值数据
https://www.csindex.com.cn/zh-CN/indices/index-detail/H30374#/indices/family/detail?indexCode=H30374
:param symbol: 指数代码; e.g., H30374
:type symbol: str
:return: 指数估值数据
:rtype: pandas.DataFrame
| 72 | 100 |
def stock_zh_index_value_csindex(symbol: str = "H30374") -> pd.DataFrame:
"""
中证指数-指数估值数据
https://www.csindex.com.cn/zh-CN/indices/index-detail/H30374#/indices/family/detail?indexCode=H30374
:param symbol: 指数代码; e.g., H30374
:type symbol: str
:return: 指数估值数据
:rtype: pandas.DataFrame
"""
url = f"https://csi-web-dev.oss-cn-shanghai-finance-1-pub.aliyuncs.com/static/html/csindex/public/uploads/file/autofile/indicator/{symbol}indicator.xls"
temp_df = pd.read_excel(url)
temp_df.columns = [
"日期",
"指数代码",
"指数中文全称",
"指数中文简称",
"指数英文全称",
"指数英文简称",
"市盈率1",
"市盈率2",
"股息率1",
"股息率2",
]
temp_df["日期"] = pd.to_datetime(temp_df["日期"], format="%Y%m%d").dt.date
temp_df["市盈率1"] = pd.to_numeric(temp_df["市盈率1"])
temp_df["市盈率2"] = pd.to_numeric(temp_df["市盈率2"])
temp_df["股息率1"] = pd.to_numeric(temp_df["股息率1"])
temp_df["股息率2"] = pd.to_numeric(temp_df["股息率2"])
return temp_df
|
https://github.com/akfamily/akshare/blob/087025d8d6f799b30ca114013e82c1ad22dc9294/project25/akshare/index/stock_zh_index_csindex.py#L72-L100
| 25 |
[
0,
1,
2,
3,
4,
5,
6,
7,
8
] | 31.034483 |
[
9,
10,
11,
23,
24,
25,
26,
27,
28
] | 31.034483 | false | 13.157895 | 29 | 1 | 68.965517 | 6 |
def stock_zh_index_value_csindex(symbol: str = "H30374") -> pd.DataFrame:
url = f"https://csi-web-dev.oss-cn-shanghai-finance-1-pub.aliyuncs.com/static/html/csindex/public/uploads/file/autofile/indicator/{symbol}indicator.xls"
temp_df = pd.read_excel(url)
temp_df.columns = [
"日期",
"指数代码",
"指数中文全称",
"指数中文简称",
"指数英文全称",
"指数英文简称",
"市盈率1",
"市盈率2",
"股息率1",
"股息率2",
]
temp_df["日期"] = pd.to_datetime(temp_df["日期"], format="%Y%m%d").dt.date
temp_df["市盈率1"] = pd.to_numeric(temp_df["市盈率1"])
temp_df["市盈率2"] = pd.to_numeric(temp_df["市盈率2"])
temp_df["股息率1"] = pd.to_numeric(temp_df["股息率1"])
temp_df["股息率2"] = pd.to_numeric(temp_df["股息率2"])
return temp_df
| 18,327 |
akfamily/akshare
|
087025d8d6f799b30ca114013e82c1ad22dc9294
|
akshare/index/stock_zh_index_csindex.py
|
index_value_name_funddb
|
()
|
return temp_df
|
funddb-指数估值-指数代码
https://funddb.cn/site/index
:return: pandas.DataFrame
:rtype: 指数代码
|
funddb-指数估值-指数代码
https://funddb.cn/site/index
:return: pandas.DataFrame
:rtype: 指数代码
| 104 | 168 |
def index_value_name_funddb() -> pd.DataFrame:
"""
funddb-指数估值-指数代码
https://funddb.cn/site/index
:return: pandas.DataFrame
:rtype: 指数代码
"""
url = "https://api.jiucaishuo.com/v2/guzhi/showcategory"
r = requests.get(url)
data_json = r.json()
temp_df = pd.DataFrame(data_json["data"]["right_list"])
temp_df.columns = [
"指数开始时间",
"-",
"指数名称",
"指数代码",
"最新PE",
"最新PB",
"PE分位",
"PB分位",
"股息率",
"-",
"-",
"-",
"更新时间",
"股息率分位",
"-",
"-",
"-",
"-",
"-",
"-",
"-",
"-",
"-",
"-",
"-",
"-",
"-",
"-",
"-",
"-",
]
temp_df = temp_df[
[
"指数名称",
"最新PE",
"PE分位",
"最新PB",
"PB分位",
"股息率",
"股息率分位",
"指数代码",
"指数开始时间",
"更新时间",
]
]
temp_df["指数开始时间"] = pd.to_datetime(temp_df["指数开始时间"]).dt.date
temp_df["最新PE"] = pd.to_numeric(temp_df["最新PE"])
temp_df["PE分位"] = pd.to_numeric(temp_df["PE分位"])
temp_df["最新PB"] = pd.to_numeric(temp_df["最新PB"])
temp_df["PB分位"] = pd.to_numeric(temp_df["PB分位"])
temp_df["股息率"] = pd.to_numeric(temp_df["股息率"])
temp_df["股息率分位"] = pd.to_numeric(temp_df["股息率分位"])
return temp_df
|
https://github.com/akfamily/akshare/blob/087025d8d6f799b30ca114013e82c1ad22dc9294/project25/akshare/index/stock_zh_index_csindex.py#L104-L168
| 25 |
[
0,
1,
2,
3,
4,
5,
6
] | 10.769231 |
[
7,
8,
9,
10,
11,
43,
57,
58,
59,
60,
61,
62,
63,
64
] | 21.538462 | false | 13.157895 | 65 | 1 | 78.461538 | 4 |
def index_value_name_funddb() -> pd.DataFrame:
url = "https://api.jiucaishuo.com/v2/guzhi/showcategory"
r = requests.get(url)
data_json = r.json()
temp_df = pd.DataFrame(data_json["data"]["right_list"])
temp_df.columns = [
"指数开始时间",
"-",
"指数名称",
"指数代码",
"最新PE",
"最新PB",
"PE分位",
"PB分位",
"股息率",
"-",
"-",
"-",
"更新时间",
"股息率分位",
"-",
"-",
"-",
"-",
"-",
"-",
"-",
"-",
"-",
"-",
"-",
"-",
"-",
"-",
"-",
"-",
]
temp_df = temp_df[
[
"指数名称",
"最新PE",
"PE分位",
"最新PB",
"PB分位",
"股息率",
"股息率分位",
"指数代码",
"指数开始时间",
"更新时间",
]
]
temp_df["指数开始时间"] = pd.to_datetime(temp_df["指数开始时间"]).dt.date
temp_df["最新PE"] = pd.to_numeric(temp_df["最新PE"])
temp_df["PE分位"] = pd.to_numeric(temp_df["PE分位"])
temp_df["最新PB"] = pd.to_numeric(temp_df["最新PB"])
temp_df["PB分位"] = pd.to_numeric(temp_df["PB分位"])
temp_df["股息率"] = pd.to_numeric(temp_df["股息率"])
temp_df["股息率分位"] = pd.to_numeric(temp_df["股息率分位"])
return temp_df
| 18,328 |
akfamily/akshare
|
087025d8d6f799b30ca114013e82c1ad22dc9294
|
akshare/index/stock_zh_index_csindex.py
|
index_value_hist_funddb
|
(
symbol: str = "大盘成长", indicator: str = "市盈率"
)
|
return big_df
|
funddb-指数估值-估值信息
https://funddb.cn/site/index
:param symbol: 指数名称; 通过调用 ak.index_value_name_funddb() 来获取
:type symbol: str
:param indicator: choice of {'市盈率', '市净率', '股息率'}
:type indicator: str
:return: 估值信息
:rtype: pandas.DataFrame
|
funddb-指数估值-估值信息
https://funddb.cn/site/index
:param symbol: 指数名称; 通过调用 ak.index_value_name_funddb() 来获取
:type symbol: str
:param indicator: choice of {'市盈率', '市净率', '股息率'}
:type indicator: str
:return: 估值信息
:rtype: pandas.DataFrame
| 171 | 230 |
def index_value_hist_funddb(
symbol: str = "大盘成长", indicator: str = "市盈率"
) -> pd.DataFrame:
"""
funddb-指数估值-估值信息
https://funddb.cn/site/index
:param symbol: 指数名称; 通过调用 ak.index_value_name_funddb() 来获取
:type symbol: str
:param indicator: choice of {'市盈率', '市净率', '股息率'}
:type indicator: str
:return: 估值信息
:rtype: pandas.DataFrame
"""
indicator_map = {
"市盈率": "pe",
"市净率": "pb",
"股息率": "xilv",
"风险溢价": "fed",
}
index_value_name_funddb_df = index_value_name_funddb()
name_code_map = dict(
zip(
index_value_name_funddb_df["指数名称"],
index_value_name_funddb_df["指数代码"],
)
)
url = "https://api.jiucaishuo.com/v2/guzhi/newtubiaolinedata"
payload = {
"gu_code": name_code_map[symbol],
"pe_category": indicator_map[indicator],
}
r = requests.post(url, json=payload)
data_json = r.json()
big_df = pd.DataFrame()
temp_df = pd.DataFrame(
data_json["data"]["tubiao"]["series"][0]["data"],
columns=["timestamp", "value"],
)
big_df["日期"] = (
pd.to_datetime(temp_df["timestamp"], unit="ms", utc=True)
.dt.tz_convert("Asia/Shanghai")
.dt.date
)
big_df["平均值"] = pd.to_numeric(temp_df["value"])
big_df[indicator] = pd.to_numeric(
[item[1] for item in data_json["data"]["tubiao"]["series"][1]["data"]]
)
big_df["最低30"] = pd.to_numeric(
[item[1] for item in data_json["data"]["tubiao"]["series"][2]["data"]]
)
big_df["最低10"] = pd.to_numeric(
[item[1] for item in data_json["data"]["tubiao"]["series"][3]["data"]]
)
big_df["最高30"] = pd.to_numeric(
[item[1] for item in data_json["data"]["tubiao"]["series"][4]["data"]]
)
big_df["最高10"] = pd.to_numeric(
[item[1] for item in data_json["data"]["tubiao"]["series"][5]["data"]]
)
return big_df
|
https://github.com/akfamily/akshare/blob/087025d8d6f799b30ca114013e82c1ad22dc9294/project25/akshare/index/stock_zh_index_csindex.py#L171-L230
| 25 |
[
0
] | 1.666667 |
[
13,
19,
20,
26,
27,
31,
32,
33,
34,
38,
43,
44,
47,
50,
53,
56,
59
] | 28.333333 | false | 13.157895 | 60 | 6 | 71.666667 | 8 |
def index_value_hist_funddb(
symbol: str = "大盘成长", indicator: str = "市盈率"
) -> pd.DataFrame:
indicator_map = {
"市盈率": "pe",
"市净率": "pb",
"股息率": "xilv",
"风险溢价": "fed",
}
index_value_name_funddb_df = index_value_name_funddb()
name_code_map = dict(
zip(
index_value_name_funddb_df["指数名称"],
index_value_name_funddb_df["指数代码"],
)
)
url = "https://api.jiucaishuo.com/v2/guzhi/newtubiaolinedata"
payload = {
"gu_code": name_code_map[symbol],
"pe_category": indicator_map[indicator],
}
r = requests.post(url, json=payload)
data_json = r.json()
big_df = pd.DataFrame()
temp_df = pd.DataFrame(
data_json["data"]["tubiao"]["series"][0]["data"],
columns=["timestamp", "value"],
)
big_df["日期"] = (
pd.to_datetime(temp_df["timestamp"], unit="ms", utc=True)
.dt.tz_convert("Asia/Shanghai")
.dt.date
)
big_df["平均值"] = pd.to_numeric(temp_df["value"])
big_df[indicator] = pd.to_numeric(
[item[1] for item in data_json["data"]["tubiao"]["series"][1]["data"]]
)
big_df["最低30"] = pd.to_numeric(
[item[1] for item in data_json["data"]["tubiao"]["series"][2]["data"]]
)
big_df["最低10"] = pd.to_numeric(
[item[1] for item in data_json["data"]["tubiao"]["series"][3]["data"]]
)
big_df["最高30"] = pd.to_numeric(
[item[1] for item in data_json["data"]["tubiao"]["series"][4]["data"]]
)
big_df["最高10"] = pd.to_numeric(
[item[1] for item in data_json["data"]["tubiao"]["series"][5]["data"]]
)
return big_df
| 18,329 |
akfamily/akshare
|
087025d8d6f799b30ca114013e82c1ad22dc9294
|
akshare/index/index_hog.py
|
index_hog_spot_price
|
()
|
return temp_df
|
行情宝-生猪市场价格指数
http://hqb.nxin.com/pigindex/index.shtml
:return: 生猪市场价格指数
:rtype: pandas.DataFrame
|
行情宝-生猪市场价格指数
http://hqb.nxin.com/pigindex/index.shtml
:return: 生猪市场价格指数
:rtype: pandas.DataFrame
| 12 | 44 |
def index_hog_spot_price() -> pd.DataFrame:
"""
行情宝-生猪市场价格指数
http://hqb.nxin.com/pigindex/index.shtml
:return: 生猪市场价格指数
:rtype: pandas.DataFrame
"""
url = "http://hqb.nxin.com/pigindex/getPigIndexChart.shtml"
params = {
'regionId': '0'
}
r = requests.get(url, params=params)
data_json = r.json()
temp_df = pd.DataFrame(data_json['data'])
temp_df.columns = [
'日期',
'指数',
'4个月均线',
'6个月均线',
'12个月均线',
'预售均价',
'成交均价',
'成交均重',
]
temp_df['日期'] = (pd.to_datetime(temp_df['日期'], unit="ms") + pd.Timedelta(hours=8)).dt.date
temp_df['指数'] = pd.to_numeric(temp_df['指数'], errors="coerce")
temp_df['4个月均线'] = pd.to_numeric(temp_df['4个月均线'], errors="coerce")
temp_df['6个月均线'] = pd.to_numeric(temp_df['6个月均线'], errors="coerce")
temp_df['12个月均线'] = pd.to_numeric(temp_df['12个月均线'], errors="coerce")
temp_df['预售均价'] = pd.to_numeric(temp_df['预售均价'], errors="coerce")
temp_df['成交均价'] = pd.to_numeric(temp_df['成交均价'], errors="coerce")
temp_df['成交均重'] = pd.to_numeric(temp_df['成交均重'], errors="coerce")
return temp_df
|
https://github.com/akfamily/akshare/blob/087025d8d6f799b30ca114013e82c1ad22dc9294/project25/akshare/index/index_hog.py#L12-L44
| 25 |
[
0,
1,
2,
3,
4,
5,
6
] | 21.212121 |
[
7,
8,
11,
12,
13,
14,
24,
25,
26,
27,
28,
29,
30,
31,
32
] | 45.454545 | false | 22.727273 | 33 | 1 | 54.545455 | 4 |
def index_hog_spot_price() -> pd.DataFrame:
url = "http://hqb.nxin.com/pigindex/getPigIndexChart.shtml"
params = {
'regionId': '0'
}
r = requests.get(url, params=params)
data_json = r.json()
temp_df = pd.DataFrame(data_json['data'])
temp_df.columns = [
'日期',
'指数',
'4个月均线',
'6个月均线',
'12个月均线',
'预售均价',
'成交均价',
'成交均重',
]
temp_df['日期'] = (pd.to_datetime(temp_df['日期'], unit="ms") + pd.Timedelta(hours=8)).dt.date
temp_df['指数'] = pd.to_numeric(temp_df['指数'], errors="coerce")
temp_df['4个月均线'] = pd.to_numeric(temp_df['4个月均线'], errors="coerce")
temp_df['6个月均线'] = pd.to_numeric(temp_df['6个月均线'], errors="coerce")
temp_df['12个月均线'] = pd.to_numeric(temp_df['12个月均线'], errors="coerce")
temp_df['预售均价'] = pd.to_numeric(temp_df['预售均价'], errors="coerce")
temp_df['成交均价'] = pd.to_numeric(temp_df['成交均价'], errors="coerce")
temp_df['成交均重'] = pd.to_numeric(temp_df['成交均重'], errors="coerce")
return temp_df
| 18,330 |
akfamily/akshare
|
087025d8d6f799b30ca114013e82c1ad22dc9294
|
akshare/index/index_yw.py
|
index_yw
|
(symbol: str = "月景气指数") -> pd.Dat
|
义乌小商品指数
http://www.ywindex.com/Home/Product/index/
:param symbol: choice of {"周价格指数", "月价格指数", "月景气指数"}
:type symbol: str
:return: 指数结果
:rtype: pandas.DataFrame
|
义乌小商品指数
http://www.ywindex.com/Home/Product/index/
:param symbol: choice of {"周价格指数", "月价格指数", "月景气指数"}
:type symbol: str
:return: 指数结果
:rtype: pandas.DataFrame
| 13 | 72 |
def index_yw(symbol: str = "月景气指数") -> pd.DataFrame:
"""
义乌小商品指数
http://www.ywindex.com/Home/Product/index/
:param symbol: choice of {"周价格指数", "月价格指数", "月景气指数"}
:type symbol: str
:return: 指数结果
:rtype: pandas.DataFrame
"""
name_num_dict = {
"周价格指数": 1,
"月价格指数": 3,
"月景气指数": 5,
}
url = "http://www.ywindex.com/Home/Product/index/"
res = requests.get(url)
soup = BeautifulSoup(res.text, "lxml")
table_name = (
soup.find_all(attrs={"class": "tablex"})[name_num_dict[symbol]]
.get_text()
.split("\n\n\n\n\n")[2]
.split("\n")
)
table_content = (
soup.find_all(attrs={"class": "tablex"})[name_num_dict[symbol]]
.get_text()
.split("\n\n\n\n\n")[3]
.split("\n\n")
)
if symbol == "月景气指数":
table_df = pd.DataFrame([item.split("\n") for item in table_content]).iloc[
:, :5
]
table_df.columns = ['期数', '景气指数', '规模指数', '效益指数', '市场信心指数']
table_df['期数'] = pd.to_datetime(table_df['期数']).dt.date
table_df['景气指数'] = pd.to_numeric(table_df['景气指数'])
table_df['规模指数'] = pd.to_numeric(table_df['规模指数'])
table_df['效益指数'] = pd.to_numeric(table_df['效益指数'])
table_df['市场信心指数'] = pd.to_numeric(table_df['市场信心指数'])
return table_df
elif symbol == "周价格指数":
table_df = pd.DataFrame([item.split("\n") for item in table_content]).iloc[:, :6]
table_df.columns = table_name
table_df['期数'] = pd.to_datetime(table_df['期数']).dt.date
table_df['价格指数'] = pd.to_numeric(table_df['价格指数'])
table_df['场内价格指数'] = pd.to_numeric(table_df['场内价格指数'])
table_df['网上价格指数'] = pd.to_numeric(table_df['网上价格指数'])
table_df['订单价格指数'] = pd.to_numeric(table_df['订单价格指数'])
table_df['出口价格指数'] = pd.to_numeric(table_df['出口价格指数'])
return table_df
elif symbol == "月价格指数":
table_df = pd.DataFrame([item.split("\n") for item in table_content]).iloc[:, :6]
table_df.columns = table_name
table_df['期数'] = pd.to_datetime(table_df['期数']).dt.date
table_df['价格指数'] = pd.to_numeric(table_df['价格指数'])
table_df['场内价格指数'] = pd.to_numeric(table_df['场内价格指数'])
table_df['网上价格指数'] = pd.to_numeric(table_df['网上价格指数'])
table_df['订单价格指数'] = pd.to_numeric(table_df['订单价格指数'])
table_df['出口价格指数'] = pd.to_numeric(table_df['出口价格指数'])
return table_df
|
https://github.com/akfamily/akshare/blob/087025d8d6f799b30ca114013e82c1ad22dc9294/project25/akshare/index/index_yw.py#L13-L72
| 25 |
[
0,
1,
2,
3,
4,
5,
6,
7,
8
] | 15 |
[
9,
14,
15,
16,
17,
23,
29,
30,
33,
34,
35,
36,
37,
38,
39,
40,
41,
42,
43,
44,
45,
46,
47,
48,
49,
50,
51,
52,
53,
54,
55,
56,
57,
58,
59
] | 58.333333 | false | 12.765957 | 60 | 7 | 41.666667 | 6 |
def index_yw(symbol: str = "月景气指数") -> pd.DataFrame:
name_num_dict = {
"周价格指数": 1,
"月价格指数": 3,
"月景气指数": 5,
}
url = "http://www.ywindex.com/Home/Product/index/"
res = requests.get(url)
soup = BeautifulSoup(res.text, "lxml")
table_name = (
soup.find_all(attrs={"class": "tablex"})[name_num_dict[symbol]]
.get_text()
.split("\n\n\n\n\n")[2]
.split("\n")
)
table_content = (
soup.find_all(attrs={"class": "tablex"})[name_num_dict[symbol]]
.get_text()
.split("\n\n\n\n\n")[3]
.split("\n\n")
)
if symbol == "月景气指数":
table_df = pd.DataFrame([item.split("\n") for item in table_content]).iloc[
:, :5
]
table_df.columns = ['期数', '景气指数', '规模指数', '效益指数', '市场信心指数']
table_df['期数'] = pd.to_datetime(table_df['期数']).dt.date
table_df['景气指数'] = pd.to_numeric(table_df['景气指数'])
table_df['规模指数'] = pd.to_numeric(table_df['规模指数'])
table_df['效益指数'] = pd.to_numeric(table_df['效益指数'])
table_df['市场信心指数'] = pd.to_numeric(table_df['市场信心指数'])
return table_df
elif symbol == "周价格指数":
table_df = pd.DataFrame([item.split("\n") for item in table_content]).iloc[:, :6]
table_df.columns = table_name
table_df['期数'] = pd.to_datetime(table_df['期数']).dt.date
table_df['价格指数'] = pd.to_numeric(table_df['价格指数'])
table_df['场内价格指数'] = pd.to_numeric(table_df['场内价格指数'])
table_df['网上价格指数'] = pd.to_numeric(table_df['网上价格指数'])
table_df['订单价格指数'] = pd.to_numeric(table_df['订单价格指数'])
table_df['出口价格指数'] = pd.to_numeric(table_df['出口价格指数'])
return table_df
elif symbol == "月价格指数":
table_df = pd.DataFrame([item.split("\n") for item in table_content]).iloc[:, :6]
table_df.columns = table_name
table_df['期数'] = pd.to_datetime(table_df['期数']).dt.date
table_df['价格指数'] = pd.to_numeric(table_df['价格指数'])
table_df['场内价格指数'] = pd.to_numeric(table_df['场内价格指数'])
table_df['网上价格指数'] = pd.to_numeric(table_df['网上价格指数'])
table_df['订单价格指数'] = pd.to_numeric(table_df['订单价格指数'])
table_df['出口价格指数'] = pd.to_numeric(table_df['出口价格指数'])
return table_df
| 18,331 |
|
akfamily/akshare
|
087025d8d6f799b30ca114013e82c1ad22dc9294
|
akshare/index/index_kq_ss.py
|
index_kq_fashion
|
(symbol: str = "时尚创意指数") -> pd.DataF
|
return temp_df
|
柯桥时尚指数
http://ss.kqindex.cn:9559/rinder_web_kqsszs/index/index_page.do
:param symbol: choice of {'柯桥时尚指数', '时尚创意指数', '时尚设计人才数', '新花型推出数', '创意产品成交数', '创意企业数量', '时尚活跃度指数', '电商运行数', '时尚平台拓展数', '新产品销售额占比', '企业合作占比', '品牌传播费用', '时尚推广度指数', '国际交流合作次数', '企业参展次数', '外商驻点数量变化', '时尚评价指数'}
:type symbol: str
:return: 柯桥时尚指数及其子项数据
:rtype: pandas.DataFrame
|
柯桥时尚指数
http://ss.kqindex.cn:9559/rinder_web_kqsszs/index/index_page.do
:param symbol: choice of {'柯桥时尚指数', '时尚创意指数', '时尚设计人才数', '新花型推出数', '创意产品成交数', '创意企业数量', '时尚活跃度指数', '电商运行数', '时尚平台拓展数', '新产品销售额占比', '企业合作占比', '品牌传播费用', '时尚推广度指数', '国际交流合作次数', '企业参展次数', '外商驻点数量变化', '时尚评价指数'}
:type symbol: str
:return: 柯桥时尚指数及其子项数据
:rtype: pandas.DataFrame
| 12 | 71 |
def index_kq_fashion(symbol: str = "时尚创意指数") -> pd.DataFrame:
"""
柯桥时尚指数
http://ss.kqindex.cn:9559/rinder_web_kqsszs/index/index_page.do
:param symbol: choice of {'柯桥时尚指数', '时尚创意指数', '时尚设计人才数', '新花型推出数', '创意产品成交数', '创意企业数量', '时尚活跃度指数', '电商运行数', '时尚平台拓展数', '新产品销售额占比', '企业合作占比', '品牌传播费用', '时尚推广度指数', '国际交流合作次数', '企业参展次数', '外商驻点数量变化', '时尚评价指数'}
:type symbol: str
:return: 柯桥时尚指数及其子项数据
:rtype: pandas.DataFrame
"""
url = "http://api.idx365.com/index/project/34/data"
symbol_map = {
"柯桥时尚指数": "root",
"时尚创意指数": "01",
"时尚设计人才数": "0101",
"新花型推出数": "0102",
"创意产品成交数": "0103",
"创意企业数量": "0104",
"时尚活跃度指数": "02",
"电商运行数": "0201",
"时尚平台拓展数": "0201",
"新产品销售额占比": "0201",
"企业合作占比": "0201",
"品牌传播费用": "0201",
"时尚推广度指数": "03",
"国际交流合作次数": "0301",
"企业参展次数": "0302",
"外商驻点数量变化": "0302",
"时尚评价指数": "04",
}
params = {"structCode": symbol_map[symbol]}
r = requests.get(url, params=params)
data_json = r.json()
temp_df = pd.DataFrame(data_json["data"])
temp_df.rename(
columns={
"id": "_",
"indexValue": "指数",
"lastValue": "_",
"projId": "_",
"publishTime": "日期",
"sameValue": "_",
"stageId": "_",
"structCode": "_",
"structName": "_",
"version": "_",
},
inplace=True,
)
temp_df = temp_df[
[
"日期",
"指数",
]
]
temp_df["日期"] = pd.to_datetime(temp_df["日期"]).dt.date
temp_df.sort_values("日期", inplace=True)
temp_df["涨跌值"] = temp_df["指数"].diff()
temp_df["涨跌幅"] = temp_df["指数"].pct_change()
temp_df.sort_values("日期", ascending=False, inplace=True)
return temp_df
|
https://github.com/akfamily/akshare/blob/087025d8d6f799b30ca114013e82c1ad22dc9294/project25/akshare/index/index_kq_ss.py#L12-L71
| 25 |
[
0,
1,
2,
3,
4,
5,
6,
7,
8
] | 15 |
[
9,
10,
29,
30,
31,
32,
33,
48,
54,
55,
56,
57,
58,
59
] | 23.333333 | false | 22.727273 | 60 | 1 | 76.666667 | 6 |
def index_kq_fashion(symbol: str = "时尚创意指数") -> pd.DataFrame:
url = "http://api.idx365.com/index/project/34/data"
symbol_map = {
"柯桥时尚指数": "root",
"时尚创意指数": "01",
"时尚设计人才数": "0101",
"新花型推出数": "0102",
"创意产品成交数": "0103",
"创意企业数量": "0104",
"时尚活跃度指数": "02",
"电商运行数": "0201",
"时尚平台拓展数": "0201",
"新产品销售额占比": "0201",
"企业合作占比": "0201",
"品牌传播费用": "0201",
"时尚推广度指数": "03",
"国际交流合作次数": "0301",
"企业参展次数": "0302",
"外商驻点数量变化": "0302",
"时尚评价指数": "04",
}
params = {"structCode": symbol_map[symbol]}
r = requests.get(url, params=params)
data_json = r.json()
temp_df = pd.DataFrame(data_json["data"])
temp_df.rename(
columns={
"id": "_",
"indexValue": "指数",
"lastValue": "_",
"projId": "_",
"publishTime": "日期",
"sameValue": "_",
"stageId": "_",
"structCode": "_",
"structName": "_",
"version": "_",
},
inplace=True,
)
temp_df = temp_df[
[
"日期",
"指数",
]
]
temp_df["日期"] = pd.to_datetime(temp_df["日期"]).dt.date
temp_df.sort_values("日期", inplace=True)
temp_df["涨跌值"] = temp_df["指数"].diff()
temp_df["涨跌幅"] = temp_df["指数"].pct_change()
temp_df.sort_values("日期", ascending=False, inplace=True)
return temp_df
| 18,332 |
akfamily/akshare
|
087025d8d6f799b30ca114013e82c1ad22dc9294
|
akshare/index/index_sw.py
|
sw_index_representation_spot
|
()
|
return temp_df
|
申万-市场表征实时行情数据
http://www.swsindex.com/idx0120.aspx?columnid=8831
:return: 市场表征实时行情数据
:rtype: pandas.DataFrame
|
申万-市场表征实时行情数据
http://www.swsindex.com/idx0120.aspx?columnid=8831
:return: 市场表征实时行情数据
:rtype: pandas.DataFrame
| 20 | 59 |
def sw_index_representation_spot() -> pd.DataFrame:
"""
申万-市场表征实时行情数据
http://www.swsindex.com/idx0120.aspx?columnid=8831
:return: 市场表征实时行情数据
:rtype: pandas.DataFrame
"""
url = "http://www.swsindex.com/handler.aspx"
params = {
"tablename": "swzs",
"key": "L1",
"p": "1",
"where": "L1 in('801001','801002','801003','801005','801300','801901','801903','801905','801250','801260','801270','801280','802613')",
"orderby": "",
"fieldlist": "L1,L2,L3,L4,L5,L6,L7,L8,L11",
"pagecount": "9",
"timed": "1632300641756",
}
r = requests.get(url, params=params)
data_json = demjson.decode(r.text)
temp_df = pd.DataFrame(data_json["root"])
temp_df.columns = [
"指数代码",
"指数名称",
"昨收盘",
"今开盘",
"成交额",
"最高价",
"最低价",
"最新价",
"成交量",
]
temp_df["昨收盘"] = pd.to_numeric(temp_df["昨收盘"])
temp_df["今开盘"] = pd.to_numeric(temp_df["今开盘"])
temp_df["成交额"] = pd.to_numeric(temp_df["成交额"])
temp_df["最高价"] = pd.to_numeric(temp_df["最高价"])
temp_df["最低价"] = pd.to_numeric(temp_df["最低价"])
temp_df["最新价"] = pd.to_numeric(temp_df["最新价"])
temp_df["成交量"] = pd.to_numeric(temp_df["成交量"])
return temp_df
|
https://github.com/akfamily/akshare/blob/087025d8d6f799b30ca114013e82c1ad22dc9294/project25/akshare/index/index_sw.py#L20-L59
| 25 |
[
0,
1,
2,
3,
4,
5,
6
] | 17.5 |
[
7,
8,
18,
19,
20,
21,
32,
33,
34,
35,
36,
37,
38,
39
] | 35 | false | 7.006369 | 40 | 1 | 65 | 4 |
def sw_index_representation_spot() -> pd.DataFrame:
url = "http://www.swsindex.com/handler.aspx"
params = {
"tablename": "swzs",
"key": "L1",
"p": "1",
"where": "L1 in('801001','801002','801003','801005','801300','801901','801903','801905','801250','801260','801270','801280','802613')",
"orderby": "",
"fieldlist": "L1,L2,L3,L4,L5,L6,L7,L8,L11",
"pagecount": "9",
"timed": "1632300641756",
}
r = requests.get(url, params=params)
data_json = demjson.decode(r.text)
temp_df = pd.DataFrame(data_json["root"])
temp_df.columns = [
"指数代码",
"指数名称",
"昨收盘",
"今开盘",
"成交额",
"最高价",
"最低价",
"最新价",
"成交量",
]
temp_df["昨收盘"] = pd.to_numeric(temp_df["昨收盘"])
temp_df["今开盘"] = pd.to_numeric(temp_df["今开盘"])
temp_df["成交额"] = pd.to_numeric(temp_df["成交额"])
temp_df["最高价"] = pd.to_numeric(temp_df["最高价"])
temp_df["最低价"] = pd.to_numeric(temp_df["最低价"])
temp_df["最新价"] = pd.to_numeric(temp_df["最新价"])
temp_df["成交量"] = pd.to_numeric(temp_df["成交量"])
return temp_df
| 18,333 |
akfamily/akshare
|
087025d8d6f799b30ca114013e82c1ad22dc9294
|
akshare/index/index_sw.py
|
sw_index_spot
|
()
|
return temp_df
|
申万一级行业-实时行情数据
http://www.swsindex.com/idx0120.aspx?columnid=8832
:return: 申万一级行业实时行情数据
:rtype: pandas.DataFrame
|
申万一级行业-实时行情数据
http://www.swsindex.com/idx0120.aspx?columnid=8832
:return: 申万一级行业实时行情数据
:rtype: pandas.DataFrame
| 62 | 100 |
def sw_index_spot() -> pd.DataFrame:
"""
申万一级行业-实时行情数据
http://www.swsindex.com/idx0120.aspx?columnid=8832
:return: 申万一级行业实时行情数据
:rtype: pandas.DataFrame
"""
url = "http://www.swsindex.com/handler.aspx"
result = []
for i in range(1, 3):
payload = sw_payload.copy()
payload.update({"p": i})
payload.update({"timed": int(time.time() * 1000)})
r = requests.post(url, headers=sw_headers, data=payload)
data = r.content.decode()
data = data.replace("'", '"')
data = json.loads(data)
result.extend(data["root"])
temp_df = pd.DataFrame(result)
temp_df["L2"] = temp_df["L2"].str.strip()
temp_df.columns = [
"指数代码",
"指数名称",
"昨收盘",
"今开盘",
"成交额",
"最高价",
"最低价",
"最新价",
"成交量",
]
temp_df["昨收盘"] = pd.to_numeric(temp_df["昨收盘"])
temp_df["今开盘"] = pd.to_numeric(temp_df["今开盘"])
temp_df["成交额"] = pd.to_numeric(temp_df["成交额"])
temp_df["最高价"] = pd.to_numeric(temp_df["最高价"])
temp_df["最低价"] = pd.to_numeric(temp_df["最低价"])
temp_df["最新价"] = pd.to_numeric(temp_df["最新价"])
temp_df["成交量"] = pd.to_numeric(temp_df["成交量"])
return temp_df
|
https://github.com/akfamily/akshare/blob/087025d8d6f799b30ca114013e82c1ad22dc9294/project25/akshare/index/index_sw.py#L62-L100
| 25 |
[
0,
1,
2,
3,
4,
5,
6
] | 17.948718 |
[
7,
8,
9,
10,
11,
12,
13,
14,
15,
16,
17,
18,
19,
20,
31,
32,
33,
34,
35,
36,
37,
38
] | 56.410256 | false | 7.006369 | 39 | 2 | 43.589744 | 4 |
def sw_index_spot() -> pd.DataFrame:
url = "http://www.swsindex.com/handler.aspx"
result = []
for i in range(1, 3):
payload = sw_payload.copy()
payload.update({"p": i})
payload.update({"timed": int(time.time() * 1000)})
r = requests.post(url, headers=sw_headers, data=payload)
data = r.content.decode()
data = data.replace("'", '"')
data = json.loads(data)
result.extend(data["root"])
temp_df = pd.DataFrame(result)
temp_df["L2"] = temp_df["L2"].str.strip()
temp_df.columns = [
"指数代码",
"指数名称",
"昨收盘",
"今开盘",
"成交额",
"最高价",
"最低价",
"最新价",
"成交量",
]
temp_df["昨收盘"] = pd.to_numeric(temp_df["昨收盘"])
temp_df["今开盘"] = pd.to_numeric(temp_df["今开盘"])
temp_df["成交额"] = pd.to_numeric(temp_df["成交额"])
temp_df["最高价"] = pd.to_numeric(temp_df["最高价"])
temp_df["最低价"] = pd.to_numeric(temp_df["最低价"])
temp_df["最新价"] = pd.to_numeric(temp_df["最新价"])
temp_df["成交量"] = pd.to_numeric(temp_df["成交量"])
return temp_df
| 18,334 |
akfamily/akshare
|
087025d8d6f799b30ca114013e82c1ad22dc9294
|
akshare/index/index_sw.py
|
sw_index_second_spot
|
()
|
return temp_df
|
申万二级行业-实时行情数据
http://www.swsindex.com/idx0120.aspx?columnId=8833
:return: 申万二级行业-实时行情数据
:rtype: pandas.DataFrame
|
申万二级行业-实时行情数据
http://www.swsindex.com/idx0120.aspx?columnId=8833
:return: 申万二级行业-实时行情数据
:rtype: pandas.DataFrame
| 103 | 149 |
def sw_index_second_spot() -> pd.DataFrame:
"""
申万二级行业-实时行情数据
http://www.swsindex.com/idx0120.aspx?columnId=8833
:return: 申万二级行业-实时行情数据
:rtype: pandas.DataFrame
"""
result = []
for i in range(1, 8):
payload = {
"tablename": "swzs",
"key": "L1",
"p": "1",
"where": "L1 in('801012','801014','801015','801016','801017','801018','801032','801033','801034','801036','801037','801038','801039','801043','801044','801045','801051','801053','801054','801055','801056','801072','801074','801076','801077','801078','801081','801082','801083','801084','801085','801086','801092','801093','801095','801096','801101','801102','801103','801104','801111','801112','801113','801114','801115','801116','801124','801125','801126','801127','801128','801129','801131','801132','801133','801141','801142','801143','801145','801151','801152','801153','801154','801155','801156','801161','801163','801178','801179','801181','801183','801191','801193','801194','801202','801203','801204','801206','801218','801219','801223','801231','801711','801712','801713','801721','801722','801723','801724','801726','801731','801733','801735','801736','801737','801738','801741','801742','801743','801744','801745','801764','801765','801766','801767','801769','801782','801783','801784','801785','801881','801951','801952','801962','801963','801971','801972','801981','801982','801991','801992','801993','801994','801995')",
"orderby": "",
"fieldlist": "L1,L2,L3,L4,L5,L6,L7,L8,L11",
"pagecount": "124",
"timed": "",
}
payload.update({"p": i})
payload.update({"timed": int(time.time() * 1000)})
r = requests.post(sw_url, headers=sw_headers, data=payload)
data = r.content.decode()
data = data.replace("'", '"')
data = json.loads(data)
result.extend(data["root"])
temp_df = pd.DataFrame(result)
temp_df["L2"] = temp_df["L2"].str.strip()
temp_df.columns = [
"指数代码",
"指数名称",
"昨收盘",
"今开盘",
"成交额",
"最高价",
"最低价",
"最新价",
"成交量",
]
temp_df["昨收盘"] = pd.to_numeric(temp_df["昨收盘"])
temp_df["今开盘"] = pd.to_numeric(temp_df["今开盘"])
temp_df["成交额"] = pd.to_numeric(temp_df["成交额"])
temp_df["最高价"] = pd.to_numeric(temp_df["最高价"])
temp_df["最低价"] = pd.to_numeric(temp_df["最低价"])
temp_df["最新价"] = pd.to_numeric(temp_df["最新价"])
temp_df["成交量"] = pd.to_numeric(temp_df["成交量"])
return temp_df
|
https://github.com/akfamily/akshare/blob/087025d8d6f799b30ca114013e82c1ad22dc9294/project25/akshare/index/index_sw.py#L103-L149
| 25 |
[
0,
1,
2,
3,
4,
5,
6
] | 14.893617 |
[
7,
8,
9,
19,
20,
21,
22,
23,
24,
25,
26,
27,
28,
39,
40,
41,
42,
43,
44,
45,
46
] | 44.680851 | false | 7.006369 | 47 | 2 | 55.319149 | 4 |
def sw_index_second_spot() -> pd.DataFrame:
result = []
for i in range(1, 8):
payload = {
"tablename": "swzs",
"key": "L1",
"p": "1",
"where": "L1 in('801012','801014','801015','801016','801017','801018','801032','801033','801034','801036','801037','801038','801039','801043','801044','801045','801051','801053','801054','801055','801056','801072','801074','801076','801077','801078','801081','801082','801083','801084','801085','801086','801092','801093','801095','801096','801101','801102','801103','801104','801111','801112','801113','801114','801115','801116','801124','801125','801126','801127','801128','801129','801131','801132','801133','801141','801142','801143','801145','801151','801152','801153','801154','801155','801156','801161','801163','801178','801179','801181','801183','801191','801193','801194','801202','801203','801204','801206','801218','801219','801223','801231','801711','801712','801713','801721','801722','801723','801724','801726','801731','801733','801735','801736','801737','801738','801741','801742','801743','801744','801745','801764','801765','801766','801767','801769','801782','801783','801784','801785','801881','801951','801952','801962','801963','801971','801972','801981','801982','801991','801992','801993','801994','801995')",
"orderby": "",
"fieldlist": "L1,L2,L3,L4,L5,L6,L7,L8,L11",
"pagecount": "124",
"timed": "",
}
payload.update({"p": i})
payload.update({"timed": int(time.time() * 1000)})
r = requests.post(sw_url, headers=sw_headers, data=payload)
data = r.content.decode()
data = data.replace("'", '"')
data = json.loads(data)
result.extend(data["root"])
temp_df = pd.DataFrame(result)
temp_df["L2"] = temp_df["L2"].str.strip()
temp_df.columns = [
"指数代码",
"指数名称",
"昨收盘",
"今开盘",
"成交额",
"最高价",
"最低价",
"最新价",
"成交量",
]
temp_df["昨收盘"] = pd.to_numeric(temp_df["昨收盘"])
temp_df["今开盘"] = pd.to_numeric(temp_df["今开盘"])
temp_df["成交额"] = pd.to_numeric(temp_df["成交额"])
temp_df["最高价"] = pd.to_numeric(temp_df["最高价"])
temp_df["最低价"] = pd.to_numeric(temp_df["最低价"])
temp_df["最新价"] = pd.to_numeric(temp_df["最新价"])
temp_df["成交量"] = pd.to_numeric(temp_df["成交量"])
return temp_df
| 18,335 |
akfamily/akshare
|
087025d8d6f799b30ca114013e82c1ad22dc9294
|
akshare/index/index_sw.py
|
sw_index_cons
|
(symbol: str = "801011")
|
return temp_df
|
申万指数成份信息-包括一级和二级行业都可以查询
http://www.swsindex.com/idx0210.aspx?swindexcode=801010
:param symbol: 指数代码
:type symbol: str
:return: 申万指数成份信息
:rtype: pandas.DataFrame
|
申万指数成份信息-包括一级和二级行业都可以查询
http://www.swsindex.com/idx0210.aspx?swindexcode=801010
:param symbol: 指数代码
:type symbol: str
:return: 申万指数成份信息
:rtype: pandas.DataFrame
| 152 | 186 |
def sw_index_cons(symbol: str = "801011") -> pd.DataFrame:
"""
申万指数成份信息-包括一级和二级行业都可以查询
http://www.swsindex.com/idx0210.aspx?swindexcode=801010
:param symbol: 指数代码
:type symbol: str
:return: 申万指数成份信息
:rtype: pandas.DataFrame
"""
url = f"http://www.swsindex.com/downfile.aspx?code={symbol}"
r = requests.get(url)
soup = BeautifulSoup(r.text, "html5lib")
data = []
table = soup.findAll("table")[0]
rows = table.findAll("tr")
for row in rows:
cols = row.findAll("td")
if len(cols) >= 4:
stock_code = cols[0].text
stock_name = cols[1].text
weight = cols[2].text
start_date = cols[3].text
data.append(
{
"stock_code": stock_code,
"stock_name": stock_name,
"start_date": start_date,
"weight": weight,
}
)
temp_df = pd.DataFrame(data)
temp_df["start_date"] = pd.to_datetime(temp_df["start_date"]).dt.date
temp_df["weight"] = pd.to_numeric(temp_df["weight"])
return temp_df
|
https://github.com/akfamily/akshare/blob/087025d8d6f799b30ca114013e82c1ad22dc9294/project25/akshare/index/index_sw.py#L152-L186
| 25 |
[
0,
1,
2,
3,
4,
5,
6,
7,
8
] | 25.714286 |
[
9,
10,
11,
12,
13,
14,
15,
16,
17,
18,
19,
20,
21,
23,
31,
32,
33,
34
] | 51.428571 | false | 7.006369 | 35 | 3 | 48.571429 | 6 |
def sw_index_cons(symbol: str = "801011") -> pd.DataFrame:
url = f"http://www.swsindex.com/downfile.aspx?code={symbol}"
r = requests.get(url)
soup = BeautifulSoup(r.text, "html5lib")
data = []
table = soup.findAll("table")[0]
rows = table.findAll("tr")
for row in rows:
cols = row.findAll("td")
if len(cols) >= 4:
stock_code = cols[0].text
stock_name = cols[1].text
weight = cols[2].text
start_date = cols[3].text
data.append(
{
"stock_code": stock_code,
"stock_name": stock_name,
"start_date": start_date,
"weight": weight,
}
)
temp_df = pd.DataFrame(data)
temp_df["start_date"] = pd.to_datetime(temp_df["start_date"]).dt.date
temp_df["weight"] = pd.to_numeric(temp_df["weight"])
return temp_df
| 18,336 |
akfamily/akshare
|
087025d8d6f799b30ca114013e82c1ad22dc9294
|
akshare/index/index_sw.py
|
sw_index_daily
|
(
symbol: str = "801011",
start_date: str = "20191201",
end_date: str = "20201207",
)
|
return temp_df
|
申万指数一级和二级日频率行情数据
http://www.swsindex.com/idx0200.aspx?columnid=8838&type=Day
:param symbol: 申万指数
:type symbol: str
:param start_date: 开始日期
:type start_date: str
:param end_date: 结束日期
:type end_date: str
:return: 申万指数日频率行情数据
:rtype: pandas.DataFrame
|
申万指数一级和二级日频率行情数据
http://www.swsindex.com/idx0200.aspx?columnid=8838&type=Day
:param symbol: 申万指数
:type symbol: str
:param start_date: 开始日期
:type start_date: str
:param end_date: 结束日期
:type end_date: str
:return: 申万指数日频率行情数据
:rtype: pandas.DataFrame
| 189 | 254 |
def sw_index_daily(
symbol: str = "801011",
start_date: str = "20191201",
end_date: str = "20201207",
) -> pd.DataFrame:
"""
申万指数一级和二级日频率行情数据
http://www.swsindex.com/idx0200.aspx?columnid=8838&type=Day
:param symbol: 申万指数
:type symbol: str
:param start_date: 开始日期
:type start_date: str
:param end_date: 结束日期
:type end_date: str
:return: 申万指数日频率行情数据
:rtype: pandas.DataFrame
"""
start_date = "-".join([start_date[:4], start_date[4:6], start_date[6:]])
end_date = "-".join([end_date[:4], end_date[4:6], end_date[6:]])
url = "http://www.swsindex.com/excel2.aspx"
params = {
"ctable": "swindexhistory",
"where": f" swindexcode in ('{symbol}') and BargainDate >= '{start_date}' and BargainDate <= '{end_date}'",
}
r = requests.get(url, params=params)
soup = BeautifulSoup(r.text, "html5lib")
data = []
table = soup.findAll("table")[0]
rows = table.findAll("tr")
for row in rows:
cols = row.findAll("td")
if len(cols) >= 10:
symbol = cols[0].text
index_name = cols[1].text
date = cols[2].text
open_ = cols[3].text
high = cols[4].text
low = cols[5].text
close = cols[6].text
vol = cols[7].text
amount = cols[8].text
change_pct = cols[9].text
data.append(
{
"index_code": symbol.replace(",", ""),
"index_name": index_name.replace(",", ""),
"date": date.replace(",", ""),
"open": open_.replace(",", ""),
"high": high.replace(",", ""),
"low": low.replace(",", ""),
"close": close.replace(",", ""),
"vol": vol.replace(",", ""),
"amount": amount.replace(",", ""),
"change_pct": change_pct.replace(",", ""),
}
)
temp_df = pd.DataFrame(data)
temp_df["date"] = pd.to_datetime(temp_df["date"]).dt.date
temp_df["open"] = pd.to_numeric(temp_df["open"])
temp_df["high"] = pd.to_numeric(temp_df["high"])
temp_df["low"] = pd.to_numeric(temp_df["low"])
temp_df["close"] = pd.to_numeric(temp_df["close"])
temp_df["vol"] = pd.to_numeric(temp_df["vol"])
temp_df["amount"] = pd.to_numeric(temp_df["amount"])
temp_df["change_pct"] = pd.to_numeric(temp_df["change_pct"])
return temp_df
|
https://github.com/akfamily/akshare/blob/087025d8d6f799b30ca114013e82c1ad22dc9294/project25/akshare/index/index_sw.py#L189-L254
| 25 |
[
0
] | 1.515152 |
[
17,
18,
19,
20,
24,
25,
26,
27,
28,
29,
30,
31,
32,
33,
34,
35,
36,
37,
38,
39,
40,
41,
42,
56,
57,
58,
59,
60,
61,
62,
63,
64,
65
] | 50 | false | 7.006369 | 66 | 3 | 50 | 10 |
def sw_index_daily(
symbol: str = "801011",
start_date: str = "20191201",
end_date: str = "20201207",
) -> pd.DataFrame:
start_date = "-".join([start_date[:4], start_date[4:6], start_date[6:]])
end_date = "-".join([end_date[:4], end_date[4:6], end_date[6:]])
url = "http://www.swsindex.com/excel2.aspx"
params = {
"ctable": "swindexhistory",
"where": f" swindexcode in ('{symbol}') and BargainDate >= '{start_date}' and BargainDate <= '{end_date}'",
}
r = requests.get(url, params=params)
soup = BeautifulSoup(r.text, "html5lib")
data = []
table = soup.findAll("table")[0]
rows = table.findAll("tr")
for row in rows:
cols = row.findAll("td")
if len(cols) >= 10:
symbol = cols[0].text
index_name = cols[1].text
date = cols[2].text
open_ = cols[3].text
high = cols[4].text
low = cols[5].text
close = cols[6].text
vol = cols[7].text
amount = cols[8].text
change_pct = cols[9].text
data.append(
{
"index_code": symbol.replace(",", ""),
"index_name": index_name.replace(",", ""),
"date": date.replace(",", ""),
"open": open_.replace(",", ""),
"high": high.replace(",", ""),
"low": low.replace(",", ""),
"close": close.replace(",", ""),
"vol": vol.replace(",", ""),
"amount": amount.replace(",", ""),
"change_pct": change_pct.replace(",", ""),
}
)
temp_df = pd.DataFrame(data)
temp_df["date"] = pd.to_datetime(temp_df["date"]).dt.date
temp_df["open"] = pd.to_numeric(temp_df["open"])
temp_df["high"] = pd.to_numeric(temp_df["high"])
temp_df["low"] = pd.to_numeric(temp_df["low"])
temp_df["close"] = pd.to_numeric(temp_df["close"])
temp_df["vol"] = pd.to_numeric(temp_df["vol"])
temp_df["amount"] = pd.to_numeric(temp_df["amount"])
temp_df["change_pct"] = pd.to_numeric(temp_df["change_pct"])
return temp_df
| 18,337 |
akfamily/akshare
|
087025d8d6f799b30ca114013e82c1ad22dc9294
|
akshare/index/index_sw.py
|
sw_index_daily_indicator
|
(
symbol: str = "801011",
start_date: str = "20191201",
end_date: str = "20210907",
data_type: str = "Day",
)
|
return temp_df
|
申万一级和二级行业历史行情指标
http://www.swsindex.com/idx0200.aspx?columnid=8838&type=Day
:param symbol: 申万指数
:type symbol: str
:param start_date: 开始时间
:type start_date: str
:param end_date: 结束时间
:type end_date: str
:param data_type: choice of {"Day": 日报表, "Week": 周报表}
:type data_type: str
:return: 申万指数不同频率数据
:rtype: pandas.DataFrame
|
申万一级和二级行业历史行情指标
http://www.swsindex.com/idx0200.aspx?columnid=8838&type=Day
:param symbol: 申万指数
:type symbol: str
:param start_date: 开始时间
:type start_date: str
:param end_date: 结束时间
:type end_date: str
:param data_type: choice of {"Day": 日报表, "Week": 周报表}
:type data_type: str
:return: 申万指数不同频率数据
:rtype: pandas.DataFrame
| 257 | 348 |
def sw_index_daily_indicator(
symbol: str = "801011",
start_date: str = "20191201",
end_date: str = "20210907",
data_type: str = "Day",
) -> pd.DataFrame:
"""
申万一级和二级行业历史行情指标
http://www.swsindex.com/idx0200.aspx?columnid=8838&type=Day
:param symbol: 申万指数
:type symbol: str
:param start_date: 开始时间
:type start_date: str
:param end_date: 结束时间
:type end_date: str
:param data_type: choice of {"Day": 日报表, "Week": 周报表}
:type data_type: str
:return: 申万指数不同频率数据
:rtype: pandas.DataFrame
"""
start_date = "-".join([start_date[:4], start_date[4:6], start_date[6:]])
end_date = "-".join([end_date[:4], end_date[4:6], end_date[6:]])
url = "http://www.swsindex.com/excel.aspx"
params = {
"ctable": "V_Report",
"where": f" swindexcode in ('{symbol}') and BargainDate >= '{start_date}' and BargainDate <= '{end_date}' and type='{data_type}'",
}
r = requests.get(url, params=params)
soup = BeautifulSoup(r.text, "html5lib")
data = []
table = soup.findAll("table")[0]
rows = table.findAll("tr")
for row in rows:
cols = row.findAll("td")
if len(cols) >= 14:
symbol = cols[0].text
index_name = cols[1].text
date = cols[2].text
close = cols[3].text
volume = cols[4].text
chg_pct = cols[5].text
turn_rate = cols[6].text
pe = cols[7].text
pb = cols[8].text
v_wap = cols[9].text
turnover_pct = cols[10].text
float_mv = cols[11].text
avg_float_mv = cols[12].text
dividend_yield_ratio = cols[13].text
data.append(
{
"index_code": symbol,
"index_name": index_name,
"date": date,
"close": close,
"volume": volume,
"chg_pct": chg_pct,
"turn_rate": turn_rate,
"pe": pe,
"pb": pb,
"vwap": v_wap,
"float_mv": float_mv,
"avg_float_mv": avg_float_mv,
"dividend_yield_ratio": dividend_yield_ratio,
"turnover_pct": turnover_pct,
}
)
temp_df = pd.DataFrame(data)
temp_df["date"] = pd.to_datetime(temp_df["date"]).dt.date
temp_df["close"] = pd.to_numeric(temp_df["close"])
temp_df["volume"] = temp_df["volume"].apply(lambda x: x.replace(",", ""))
temp_df["volume"] = pd.to_numeric(temp_df["volume"])
temp_df["chg_pct"] = pd.to_numeric(temp_df["chg_pct"])
temp_df["turn_rate"] = pd.to_numeric(temp_df["turn_rate"])
temp_df["pe"] = pd.to_numeric(temp_df["pe"])
temp_df["pb"] = pd.to_numeric(temp_df["pb"])
temp_df["vwap"] = pd.to_numeric(temp_df["vwap"])
temp_df["float_mv"] = temp_df["float_mv"].apply(
lambda x: x.replace(",", "")
)
temp_df["float_mv"] = pd.to_numeric(
temp_df["float_mv"],
)
temp_df["avg_float_mv"] = temp_df["avg_float_mv"].apply(
lambda x: x.replace(",", "")
)
temp_df["avg_float_mv"] = pd.to_numeric(temp_df["avg_float_mv"])
temp_df["dividend_yield_ratio"] = pd.to_numeric(
temp_df["dividend_yield_ratio"]
)
temp_df["turnover_pct"] = pd.to_numeric(temp_df["turnover_pct"])
return temp_df
|
https://github.com/akfamily/akshare/blob/087025d8d6f799b30ca114013e82c1ad22dc9294/project25/akshare/index/index_sw.py#L257-L348
| 25 |
[
0
] | 1.086957 |
[
20,
21,
22,
23,
27,
28,
29,
30,
31,
32,
33,
34,
35,
36,
37,
38,
39,
40,
41,
42,
43,
44,
45,
46,
47,
48,
49,
67,
68,
69,
70,
71,
72,
73,
74,
75,
76,
77,
80,
83,
86,
87,
90,
91
] | 47.826087 | false | 7.006369 | 92 | 3 | 52.173913 | 12 |
def sw_index_daily_indicator(
symbol: str = "801011",
start_date: str = "20191201",
end_date: str = "20210907",
data_type: str = "Day",
) -> pd.DataFrame:
start_date = "-".join([start_date[:4], start_date[4:6], start_date[6:]])
end_date = "-".join([end_date[:4], end_date[4:6], end_date[6:]])
url = "http://www.swsindex.com/excel.aspx"
params = {
"ctable": "V_Report",
"where": f" swindexcode in ('{symbol}') and BargainDate >= '{start_date}' and BargainDate <= '{end_date}' and type='{data_type}'",
}
r = requests.get(url, params=params)
soup = BeautifulSoup(r.text, "html5lib")
data = []
table = soup.findAll("table")[0]
rows = table.findAll("tr")
for row in rows:
cols = row.findAll("td")
if len(cols) >= 14:
symbol = cols[0].text
index_name = cols[1].text
date = cols[2].text
close = cols[3].text
volume = cols[4].text
chg_pct = cols[5].text
turn_rate = cols[6].text
pe = cols[7].text
pb = cols[8].text
v_wap = cols[9].text
turnover_pct = cols[10].text
float_mv = cols[11].text
avg_float_mv = cols[12].text
dividend_yield_ratio = cols[13].text
data.append(
{
"index_code": symbol,
"index_name": index_name,
"date": date,
"close": close,
"volume": volume,
"chg_pct": chg_pct,
"turn_rate": turn_rate,
"pe": pe,
"pb": pb,
"vwap": v_wap,
"float_mv": float_mv,
"avg_float_mv": avg_float_mv,
"dividend_yield_ratio": dividend_yield_ratio,
"turnover_pct": turnover_pct,
}
)
temp_df = pd.DataFrame(data)
temp_df["date"] = pd.to_datetime(temp_df["date"]).dt.date
temp_df["close"] = pd.to_numeric(temp_df["close"])
temp_df["volume"] = temp_df["volume"].apply(lambda x: x.replace(",", ""))
temp_df["volume"] = pd.to_numeric(temp_df["volume"])
temp_df["chg_pct"] = pd.to_numeric(temp_df["chg_pct"])
temp_df["turn_rate"] = pd.to_numeric(temp_df["turn_rate"])
temp_df["pe"] = pd.to_numeric(temp_df["pe"])
temp_df["pb"] = pd.to_numeric(temp_df["pb"])
temp_df["vwap"] = pd.to_numeric(temp_df["vwap"])
temp_df["float_mv"] = temp_df["float_mv"].apply(
lambda x: x.replace(",", "")
)
temp_df["float_mv"] = pd.to_numeric(
temp_df["float_mv"],
)
temp_df["avg_float_mv"] = temp_df["avg_float_mv"].apply(
lambda x: x.replace(",", "")
)
temp_df["avg_float_mv"] = pd.to_numeric(temp_df["avg_float_mv"])
temp_df["dividend_yield_ratio"] = pd.to_numeric(
temp_df["dividend_yield_ratio"]
)
temp_df["turnover_pct"] = pd.to_numeric(temp_df["turnover_pct"])
return temp_df
| 18,338 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.