nwo
stringlengths 10
28
| sha
stringlengths 40
40
| path
stringlengths 11
97
| identifier
stringlengths 1
64
| parameters
stringlengths 2
2.24k
| return_statement
stringlengths 0
2.17k
| docstring
stringlengths 0
5.45k
| docstring_summary
stringlengths 0
3.83k
| func_begin
int64 1
13.4k
| func_end
int64 2
13.4k
| function
stringlengths 28
56.4k
| url
stringlengths 106
209
| project
int64 1
48
| executed_lines
list | executed_lines_pc
float64 0
153
| missing_lines
list | missing_lines_pc
float64 0
100
| covered
bool 2
classes | filecoverage
float64 2.53
100
| function_lines
int64 2
1.46k
| mccabe
int64 1
253
| coverage
float64 0
100
| docstring_lines
int64 0
112
| function_nodoc
stringlengths 9
56.4k
| id
int64 0
29.8k
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
akfamily/akshare
|
087025d8d6f799b30ca114013e82c1ad22dc9294
|
akshare/movie/movie_yien.py
|
movie_boxoffice_daily
|
(date: str = "20201018")
|
return temp_df
|
电影票房-单日票房
https://www.endata.com.cn/BoxOffice/BO/Day/index.html
:param date: 只能设置当前日期的前一天的票房数据
:type date: str
:return: 每日票房
:rtype: pandas.DataFrame
|
电影票房-单日票房
https://www.endata.com.cn/BoxOffice/BO/Day/index.html
:param date: 只能设置当前日期的前一天的票房数据
:type date: str
:return: 每日票房
:rtype: pandas.DataFrame
| 101 | 144 |
def movie_boxoffice_daily(date: str = "20201018") -> pd.DataFrame:
"""
电影票房-单日票房
https://www.endata.com.cn/BoxOffice/BO/Day/index.html
:param date: 只能设置当前日期的前一天的票房数据
:type date: str
:return: 每日票房
:rtype: pandas.DataFrame
"""
last_date = datetime.datetime.strptime(date, "%Y%m%d") - datetime.timedelta(days=1)
last_date = last_date.strftime("%Y%m%d")
url = "https://www.endata.com.cn/API/GetData.ashx"
payload = {
"sdate": f"{date[:4]}-{date[4:6]}-{date[6:]}",
"edate": f"{last_date[:4]}-{last_date[4:6]}-{last_date[6:]}",
"MethodName": "BoxOffice_GetDayBoxOffice",
}
r = requests.post(url, data=payload)
r.encoding = "utf8"
data_json = json.loads(decrypt(r.text))
temp_df = pd.DataFrame(data_json["Data"]["Table"])
temp_df.columns = [
"排序",
"_",
"影片名称",
"_",
"累计票房",
"平均票价",
"上映天数",
"场均人次",
"_",
"_",
"_",
"_",
"_",
"单日票房",
"环比变化",
"_",
"口碑指数",
]
temp_df = temp_df[
["排序", "影片名称", "单日票房", "环比变化", "累计票房", "平均票价", "场均人次", "口碑指数", "上映天数"]
]
return temp_df
|
https://github.com/akfamily/akshare/blob/087025d8d6f799b30ca114013e82c1ad22dc9294/project25/akshare/movie/movie_yien.py#L101-L144
| 25 |
[
0,
1,
2,
3,
4,
5,
6,
7,
8
] | 20.454545 |
[
9,
10,
11,
12,
17,
18,
19,
20,
21,
40,
43
] | 25 | false | 15.037594 | 44 | 1 | 75 | 6 |
def movie_boxoffice_daily(date: str = "20201018") -> pd.DataFrame:
last_date = datetime.datetime.strptime(date, "%Y%m%d") - datetime.timedelta(days=1)
last_date = last_date.strftime("%Y%m%d")
url = "https://www.endata.com.cn/API/GetData.ashx"
payload = {
"sdate": f"{date[:4]}-{date[4:6]}-{date[6:]}",
"edate": f"{last_date[:4]}-{last_date[4:6]}-{last_date[6:]}",
"MethodName": "BoxOffice_GetDayBoxOffice",
}
r = requests.post(url, data=payload)
r.encoding = "utf8"
data_json = json.loads(decrypt(r.text))
temp_df = pd.DataFrame(data_json["Data"]["Table"])
temp_df.columns = [
"排序",
"_",
"影片名称",
"_",
"累计票房",
"平均票价",
"上映天数",
"场均人次",
"_",
"_",
"_",
"_",
"_",
"单日票房",
"环比变化",
"_",
"口碑指数",
]
temp_df = temp_df[
["排序", "影片名称", "单日票房", "环比变化", "累计票房", "平均票价", "场均人次", "口碑指数", "上映天数"]
]
return temp_df
| 18,741 |
akfamily/akshare
|
087025d8d6f799b30ca114013e82c1ad22dc9294
|
akshare/movie/movie_yien.py
|
movie_boxoffice_weekly
|
(date: str = "20201018")
|
return temp_df
|
电影票房-单周票房
https://www.endata.com.cn/BoxOffice/BO/Week/oneWeek.html
:param date: 只能获取指定日期所在完整周的票房数据
:type date: str
:return: 单周票房
:rtype: pandas.DataFrame
|
电影票房-单周票房
https://www.endata.com.cn/BoxOffice/BO/Week/oneWeek.html
:param date: 只能获取指定日期所在完整周的票房数据
:type date: str
:return: 单周票房
:rtype: pandas.DataFrame
| 147 | 184 |
def movie_boxoffice_weekly(date: str = "20201018") -> pd.DataFrame:
"""
电影票房-单周票房
https://www.endata.com.cn/BoxOffice/BO/Week/oneWeek.html
:param date: 只能获取指定日期所在完整周的票房数据
:type date: str
:return: 单周票房
:rtype: pandas.DataFrame
"""
url = "https://www.endata.com.cn/API/GetData.ashx"
payload = {
"sdate": get_current_week(date=date).strftime("%Y-%m-%d"),
"MethodName": "BoxOffice_GetWeekInfoData",
}
r = requests.post(url, data=payload)
data_json = json.loads(decrypt(r.text))
temp_df = pd.DataFrame(data_json["Data"]["Table"])
temp_df.columns = [
"排序",
"_",
"影片名称",
"单周票房",
"累计票房",
"_",
"上映天数",
"平均票价",
"场均人次",
"环比变化",
"_",
"_",
"_",
"排名变化",
"口碑指数",
]
temp_df = temp_df[
["排序", "影片名称", "排名变化", "单周票房", "环比变化", "累计票房", "平均票价", "场均人次", "口碑指数", "上映天数"]
]
return temp_df
|
https://github.com/akfamily/akshare/blob/087025d8d6f799b30ca114013e82c1ad22dc9294/project25/akshare/movie/movie_yien.py#L147-L184
| 25 |
[
0,
1,
2,
3,
4,
5,
6,
7,
8
] | 23.684211 |
[
9,
10,
14,
15,
16,
17,
34,
37
] | 21.052632 | false | 15.037594 | 38 | 1 | 78.947368 | 6 |
def movie_boxoffice_weekly(date: str = "20201018") -> pd.DataFrame:
url = "https://www.endata.com.cn/API/GetData.ashx"
payload = {
"sdate": get_current_week(date=date).strftime("%Y-%m-%d"),
"MethodName": "BoxOffice_GetWeekInfoData",
}
r = requests.post(url, data=payload)
data_json = json.loads(decrypt(r.text))
temp_df = pd.DataFrame(data_json["Data"]["Table"])
temp_df.columns = [
"排序",
"_",
"影片名称",
"单周票房",
"累计票房",
"_",
"上映天数",
"平均票价",
"场均人次",
"环比变化",
"_",
"_",
"_",
"排名变化",
"口碑指数",
]
temp_df = temp_df[
["排序", "影片名称", "排名变化", "单周票房", "环比变化", "累计票房", "平均票价", "场均人次", "口碑指数", "上映天数"]
]
return temp_df
| 18,742 |
akfamily/akshare
|
087025d8d6f799b30ca114013e82c1ad22dc9294
|
akshare/movie/movie_yien.py
|
movie_boxoffice_monthly
|
(date: str = "20201018")
|
return temp_df
|
电影票房-单月票房
https://www.endata.com.cn/BoxOffice/BO/Month/oneMonth.html
:param date: 指定日期所在月份的月度票房
:type date: str
:return: 单月票房
:rtype: pandas.DataFrame
|
电影票房-单月票房
https://www.endata.com.cn/BoxOffice/BO/Month/oneMonth.html
:param date: 指定日期所在月份的月度票房
:type date: str
:return: 单月票房
:rtype: pandas.DataFrame
| 187 | 221 |
def movie_boxoffice_monthly(date: str = "20201018") -> pd.DataFrame:
"""
电影票房-单月票房
https://www.endata.com.cn/BoxOffice/BO/Month/oneMonth.html
:param date: 指定日期所在月份的月度票房
:type date: str
:return: 单月票房
:rtype: pandas.DataFrame
"""
url = "https://www.endata.com.cn/API/GetData.ashx"
payload = {
"startTime": f"{date[:4]}-{date[4:6]}-01",
"MethodName": "BoxOffice_GetMonthBox",
}
r = requests.post(url, data=payload)
r.encoding = "utf8"
data_json = json.loads(decrypt(r.text))
temp_df = pd.DataFrame(data_json["Data"]["Table"])
temp_df.columns = [
"排序",
"_",
"影片名称",
"月内天数",
"单月票房",
"平均票价",
"场均人次",
"月度占比",
"上映日期",
"_",
"口碑指数",
]
temp_df = temp_df[
["排序", "影片名称", "单月票房", "月度占比", "平均票价", "场均人次", "上映日期", "口碑指数", "月内天数"]
]
return temp_df
|
https://github.com/akfamily/akshare/blob/087025d8d6f799b30ca114013e82c1ad22dc9294/project25/akshare/movie/movie_yien.py#L187-L221
| 25 |
[
0,
1,
2,
3,
4,
5,
6,
7,
8
] | 25.714286 |
[
9,
10,
14,
15,
16,
17,
18,
31,
34
] | 25.714286 | false | 15.037594 | 35 | 1 | 74.285714 | 6 |
def movie_boxoffice_monthly(date: str = "20201018") -> pd.DataFrame:
url = "https://www.endata.com.cn/API/GetData.ashx"
payload = {
"startTime": f"{date[:4]}-{date[4:6]}-01",
"MethodName": "BoxOffice_GetMonthBox",
}
r = requests.post(url, data=payload)
r.encoding = "utf8"
data_json = json.loads(decrypt(r.text))
temp_df = pd.DataFrame(data_json["Data"]["Table"])
temp_df.columns = [
"排序",
"_",
"影片名称",
"月内天数",
"单月票房",
"平均票价",
"场均人次",
"月度占比",
"上映日期",
"_",
"口碑指数",
]
temp_df = temp_df[
["排序", "影片名称", "单月票房", "月度占比", "平均票价", "场均人次", "上映日期", "口碑指数", "月内天数"]
]
return temp_df
| 18,743 |
akfamily/akshare
|
087025d8d6f799b30ca114013e82c1ad22dc9294
|
akshare/movie/movie_yien.py
|
movie_boxoffice_yearly
|
(date: str = "20201018")
|
return temp_df
|
电影票房-年度票房
https://www.endata.com.cn/BoxOffice/BO/Year/index.html
:param date: 当前日期所在年度的票房数据
:type date: str
:return: 年度票房
:rtype: pandas.DataFrame
|
电影票房-年度票房
https://www.endata.com.cn/BoxOffice/BO/Year/index.html
:param date: 当前日期所在年度的票房数据
:type date: str
:return: 年度票房
:rtype: pandas.DataFrame
| 224 | 257 |
def movie_boxoffice_yearly(date: str = "20201018") -> pd.DataFrame:
"""
电影票房-年度票房
https://www.endata.com.cn/BoxOffice/BO/Year/index.html
:param date: 当前日期所在年度的票房数据
:type date: str
:return: 年度票房
:rtype: pandas.DataFrame
"""
url = "https://www.endata.com.cn/API/GetData.ashx"
payload = {
"year": f"{date[:4]}",
"MethodName": "BoxOffice_GetYearInfoData",
}
r = requests.post(url, data=payload)
r.encoding = "utf8"
data_json = json.loads(decrypt(r.text))
temp_df = pd.DataFrame(data_json["Data"]["Table"])
temp_df.reset_index(inplace=True)
temp_df.columns = [
"排序",
"_",
"影片名称",
"类型",
"总票房",
"平均票价",
"场均人次",
"国家及地区",
"上映日期",
"_",
]
temp_df["排序"] = range(1, len(temp_df) + 1)
temp_df = temp_df[["排序", "影片名称", "类型", "总票房", "平均票价", "场均人次", "国家及地区", "上映日期"]]
return temp_df
|
https://github.com/akfamily/akshare/blob/087025d8d6f799b30ca114013e82c1ad22dc9294/project25/akshare/movie/movie_yien.py#L224-L257
| 25 |
[
0,
1,
2,
3,
4,
5,
6,
7,
8
] | 26.470588 |
[
9,
10,
14,
15,
16,
17,
18,
19,
31,
32,
33
] | 32.352941 | false | 15.037594 | 34 | 1 | 67.647059 | 6 |
def movie_boxoffice_yearly(date: str = "20201018") -> pd.DataFrame:
url = "https://www.endata.com.cn/API/GetData.ashx"
payload = {
"year": f"{date[:4]}",
"MethodName": "BoxOffice_GetYearInfoData",
}
r = requests.post(url, data=payload)
r.encoding = "utf8"
data_json = json.loads(decrypt(r.text))
temp_df = pd.DataFrame(data_json["Data"]["Table"])
temp_df.reset_index(inplace=True)
temp_df.columns = [
"排序",
"_",
"影片名称",
"类型",
"总票房",
"平均票价",
"场均人次",
"国家及地区",
"上映日期",
"_",
]
temp_df["排序"] = range(1, len(temp_df) + 1)
temp_df = temp_df[["排序", "影片名称", "类型", "总票房", "平均票价", "场均人次", "国家及地区", "上映日期"]]
return temp_df
| 18,744 |
akfamily/akshare
|
087025d8d6f799b30ca114013e82c1ad22dc9294
|
akshare/movie/movie_yien.py
|
movie_boxoffice_yearly_first_week
|
(date: str = "20201018")
|
return temp_df
|
电影票房-年度票房-年度首周票房
https://www.endata.com.cn/BoxOffice/BO/Year/firstWeek.html
:param date: 当前日期所在年度的年度首周票房票房数据
:type date: str
:return: 年度首周票房
:rtype: pandas.DataFrame
|
电影票房-年度票房-年度首周票房
https://www.endata.com.cn/BoxOffice/BO/Year/firstWeek.html
:param date: 当前日期所在年度的年度首周票房票房数据
:type date: str
:return: 年度首周票房
:rtype: pandas.DataFrame
| 260 | 297 |
def movie_boxoffice_yearly_first_week(date: str = "20201018") -> pd.DataFrame:
"""
电影票房-年度票房-年度首周票房
https://www.endata.com.cn/BoxOffice/BO/Year/firstWeek.html
:param date: 当前日期所在年度的年度首周票房票房数据
:type date: str
:return: 年度首周票房
:rtype: pandas.DataFrame
"""
url = "https://www.endata.com.cn/API/GetData.ashx"
payload = {
"year": f"{date[:4]}",
"MethodName": "BoxOffice_getYearInfo_fData",
}
r = requests.post(url, data=payload)
r.encoding = "utf8"
data_json = json.loads(decrypt(r.text))
temp_df = pd.DataFrame(data_json["Data"]["Table"])
temp_df.reset_index(inplace=True)
temp_df.columns = [
"排序",
"_",
"_",
"影片名称",
"首周票房",
"场均人次",
"上映日期",
"首周天数",
"类型",
"国家及地区",
"_",
"占总票房比重",
]
temp_df["排序"] = range(1, len(temp_df) + 1)
temp_df = temp_df[
["排序", "影片名称", "类型", "首周票房", "占总票房比重", "场均人次", "国家及地区", "上映日期", "首周天数"]
]
return temp_df
|
https://github.com/akfamily/akshare/blob/087025d8d6f799b30ca114013e82c1ad22dc9294/project25/akshare/movie/movie_yien.py#L260-L297
| 25 |
[
0,
1,
2,
3,
4,
5,
6,
7,
8
] | 23.684211 |
[
9,
10,
14,
15,
16,
17,
18,
19,
33,
34,
37
] | 28.947368 | false | 15.037594 | 38 | 1 | 71.052632 | 6 |
def movie_boxoffice_yearly_first_week(date: str = "20201018") -> pd.DataFrame:
url = "https://www.endata.com.cn/API/GetData.ashx"
payload = {
"year": f"{date[:4]}",
"MethodName": "BoxOffice_getYearInfo_fData",
}
r = requests.post(url, data=payload)
r.encoding = "utf8"
data_json = json.loads(decrypt(r.text))
temp_df = pd.DataFrame(data_json["Data"]["Table"])
temp_df.reset_index(inplace=True)
temp_df.columns = [
"排序",
"_",
"_",
"影片名称",
"首周票房",
"场均人次",
"上映日期",
"首周天数",
"类型",
"国家及地区",
"_",
"占总票房比重",
]
temp_df["排序"] = range(1, len(temp_df) + 1)
temp_df = temp_df[
["排序", "影片名称", "类型", "首周票房", "占总票房比重", "场均人次", "国家及地区", "上映日期", "首周天数"]
]
return temp_df
| 18,745 |
akfamily/akshare
|
087025d8d6f799b30ca114013e82c1ad22dc9294
|
akshare/movie/movie_yien.py
|
movie_boxoffice_cinema_daily
|
(date: str = "20201018")
|
return temp_df
|
电影票房-影院票房-日票房排行
https://www.endata.com.cn/BoxOffice/BO/Cinema/day.html
:param date: 当前日期前一日的票房数据
:type date: str
:return: 影票房-影院票房-日票房排行
:rtype: pandas.DataFrame
|
电影票房-影院票房-日票房排行
https://www.endata.com.cn/BoxOffice/BO/Cinema/day.html
:param date: 当前日期前一日的票房数据
:type date: str
:return: 影票房-影院票房-日票房排行
:rtype: pandas.DataFrame
| 300 | 333 |
def movie_boxoffice_cinema_daily(date: str = "20201018") -> pd.DataFrame:
"""
电影票房-影院票房-日票房排行
https://www.endata.com.cn/BoxOffice/BO/Cinema/day.html
:param date: 当前日期前一日的票房数据
:type date: str
:return: 影票房-影院票房-日票房排行
:rtype: pandas.DataFrame
"""
url = "https://www.endata.com.cn/API/GetData.ashx"
payload = {
"rowNum1": "1",
"rowNum2": "100",
"date": date,
"MethodName": "BoxOffice_GetCinemaDayBoxOffice",
}
r = requests.post(url, data=payload)
r.encoding = "utf8"
data_json = json.loads(decrypt(r.text))
temp_df = pd.DataFrame(data_json["Data"]["Table"])
temp_df.columns = [
"排序",
"_",
"影院名称",
"单日票房",
"单日场次",
"_",
"_",
"场均票价",
"场均人次",
"上座率",
]
temp_df = temp_df[["排序", "影院名称", "单日票房", "单日场次", "场均人次", "场均票价", "上座率"]]
return temp_df
|
https://github.com/akfamily/akshare/blob/087025d8d6f799b30ca114013e82c1ad22dc9294/project25/akshare/movie/movie_yien.py#L300-L333
| 25 |
[
0,
1,
2,
3,
4,
5,
6,
7,
8
] | 26.470588 |
[
9,
10,
16,
17,
18,
19,
20,
32,
33
] | 26.470588 | false | 15.037594 | 34 | 1 | 73.529412 | 6 |
def movie_boxoffice_cinema_daily(date: str = "20201018") -> pd.DataFrame:
url = "https://www.endata.com.cn/API/GetData.ashx"
payload = {
"rowNum1": "1",
"rowNum2": "100",
"date": date,
"MethodName": "BoxOffice_GetCinemaDayBoxOffice",
}
r = requests.post(url, data=payload)
r.encoding = "utf8"
data_json = json.loads(decrypt(r.text))
temp_df = pd.DataFrame(data_json["Data"]["Table"])
temp_df.columns = [
"排序",
"_",
"影院名称",
"单日票房",
"单日场次",
"_",
"_",
"场均票价",
"场均人次",
"上座率",
]
temp_df = temp_df[["排序", "影院名称", "单日票房", "单日场次", "场均人次", "场均票价", "上座率"]]
return temp_df
| 18,746 |
akfamily/akshare
|
087025d8d6f799b30ca114013e82c1ad22dc9294
|
akshare/movie/movie_yien.py
|
movie_boxoffice_cinema_weekly
|
(date: str = "20201018")
|
return temp_df
|
电影票房-影院票房-周票房排行
https://www.endata.com.cn/BoxOffice/BO/Cinema/week.html
:param date: 当前日期前完整一周的票房数据
:type date: str
:return: 影票房-影院票房-轴票房排行
:rtype: pandas.DataFrame
|
电影票房-影院票房-周票房排行
https://www.endata.com.cn/BoxOffice/BO/Cinema/week.html
:param date: 当前日期前完整一周的票房数据
:type date: str
:return: 影票房-影院票房-轴票房排行
:rtype: pandas.DataFrame
| 336 | 375 |
def movie_boxoffice_cinema_weekly(date: str = "20201018") -> pd.DataFrame:
"""
电影票房-影院票房-周票房排行
https://www.endata.com.cn/BoxOffice/BO/Cinema/week.html
:param date: 当前日期前完整一周的票房数据
:type date: str
:return: 影票房-影院票房-轴票房排行
:rtype: pandas.DataFrame
"""
url = "https://www.endata.com.cn/API/GetData.ashx"
payload = {
"dateID": str(
datetime.date.fromisoformat(
f"{date[:4]}-{date[4:6]}-{date[6:]}"
).isocalendar()[1]
- 1
- 41
+ 1128
),
"rowNum1": "1",
"rowNum2": "100",
"MethodName": "BoxOffice_GetCinemaWeekBoxOffice",
}
r = requests.post(url, data=payload)
r.encoding = "utf8"
data_json = json.loads(decrypt(r.text))
temp_df = pd.DataFrame(data_json["Data"]["Table"])
temp_df.columns = [
"排序",
"_",
"影院名称",
"当周票房",
"_",
"单银幕票房",
"场均人次",
"单日单厅票房",
"单日单厅场次",
]
temp_df = temp_df[["排序", "影院名称", "当周票房", "单银幕票房", "场均人次", "单日单厅票房", "单日单厅场次"]]
return temp_df
|
https://github.com/akfamily/akshare/blob/087025d8d6f799b30ca114013e82c1ad22dc9294/project25/akshare/movie/movie_yien.py#L336-L375
| 25 |
[
0,
1,
2,
3,
4,
5,
6,
7,
8
] | 22.5 |
[
9,
10,
23,
24,
25,
26,
27,
38,
39
] | 22.5 | false | 15.037594 | 40 | 1 | 77.5 | 6 |
def movie_boxoffice_cinema_weekly(date: str = "20201018") -> pd.DataFrame:
url = "https://www.endata.com.cn/API/GetData.ashx"
payload = {
"dateID": str(
datetime.date.fromisoformat(
f"{date[:4]}-{date[4:6]}-{date[6:]}"
).isocalendar()[1]
- 1
- 41
+ 1128
),
"rowNum1": "1",
"rowNum2": "100",
"MethodName": "BoxOffice_GetCinemaWeekBoxOffice",
}
r = requests.post(url, data=payload)
r.encoding = "utf8"
data_json = json.loads(decrypt(r.text))
temp_df = pd.DataFrame(data_json["Data"]["Table"])
temp_df.columns = [
"排序",
"_",
"影院名称",
"当周票房",
"_",
"单银幕票房",
"场均人次",
"单日单厅票房",
"单日单厅场次",
]
temp_df = temp_df[["排序", "影院名称", "当周票房", "单银幕票房", "场均人次", "单日单厅票房", "单日单厅场次"]]
return temp_df
| 18,747 |
akfamily/akshare
|
087025d8d6f799b30ca114013e82c1ad22dc9294
|
akshare/movie/video_yien.py
|
_get_js_path
|
(name: str = "", module_file: str = "")
|
return module_json_path
|
get JS file path
:param name: file name
:type name: str
:param module_file: filename
:type module_file: str
:return: 路径
:rtype: str
|
get JS file path
:param name: file name
:type name: str
:param module_file: filename
:type module_file: str
:return: 路径
:rtype: str
| 19 | 31 |
def _get_js_path(name: str = "", module_file: str = "") -> str:
"""
get JS file path
:param name: file name
:type name: str
:param module_file: filename
:type module_file: str
:return: 路径
:rtype: str
"""
module_folder = os.path.abspath(os.path.dirname(os.path.dirname(module_file)))
module_json_path = os.path.join(module_folder, "movie", name)
return module_json_path
|
https://github.com/akfamily/akshare/blob/087025d8d6f799b30ca114013e82c1ad22dc9294/project25/akshare/movie/video_yien.py#L19-L31
| 25 |
[
0,
1,
2,
3,
4,
5,
6,
7,
8,
9
] | 76.923077 |
[
10,
11,
12
] | 23.076923 | false | 23.529412 | 13 | 1 | 76.923077 | 7 |
def _get_js_path(name: str = "", module_file: str = "") -> str:
module_folder = os.path.abspath(os.path.dirname(os.path.dirname(module_file)))
module_json_path = os.path.join(module_folder, "movie", name)
return module_json_path
| 18,748 |
akfamily/akshare
|
087025d8d6f799b30ca114013e82c1ad22dc9294
|
akshare/movie/video_yien.py
|
_get_file_content
|
(file_name: str = "jm.js")
|
return file_data
|
read the file content
:param file_name: filename
:type file_name: str
:return: file content
:rtype: str
|
read the file content
:param file_name: filename
:type file_name: str
:return: file content
:rtype: str
| 34 | 46 |
def _get_file_content(file_name: str = "jm.js"):
"""
read the file content
:param file_name: filename
:type file_name: str
:return: file content
:rtype: str
"""
setting_file_name = file_name
setting_file_path = _get_js_path(setting_file_name, __file__)
with open(setting_file_path) as f:
file_data = f.read()
return file_data
|
https://github.com/akfamily/akshare/blob/087025d8d6f799b30ca114013e82c1ad22dc9294/project25/akshare/movie/video_yien.py#L34-L46
| 25 |
[
0,
1,
2,
3,
4,
5,
6,
7
] | 61.538462 |
[
8,
9,
10,
11,
12
] | 38.461538 | false | 23.529412 | 13 | 2 | 61.538462 | 5 |
def _get_file_content(file_name: str = "jm.js"):
setting_file_name = file_name
setting_file_path = _get_js_path(setting_file_name, __file__)
with open(setting_file_path) as f:
file_data = f.read()
return file_data
| 18,749 |
akfamily/akshare
|
087025d8d6f799b30ca114013e82c1ad22dc9294
|
akshare/movie/video_yien.py
|
decrypt
|
(origin_data: str = "")
|
return data
|
解密艺恩的加密数据
:param origin_data: 解密前的字符串
:type origin_data: str
:return: 解密后的字符串
:rtype: str
|
解密艺恩的加密数据
:param origin_data: 解密前的字符串
:type origin_data: str
:return: 解密后的字符串
:rtype: str
| 49 | 61 |
def decrypt(origin_data: str = "") -> str:
"""
解密艺恩的加密数据
:param origin_data: 解密前的字符串
:type origin_data: str
:return: 解密后的字符串
:rtype: str
"""
file_data = _get_file_content(file_name="jm.js")
ctx = py_mini_racer.MiniRacer()
ctx.eval(file_data)
data = ctx.call("webInstace.shell", origin_data)
return data
|
https://github.com/akfamily/akshare/blob/087025d8d6f799b30ca114013e82c1ad22dc9294/project25/akshare/movie/video_yien.py#L49-L61
| 25 |
[
0,
1,
2,
3,
4,
5,
6,
7
] | 61.538462 |
[
8,
9,
10,
11,
12
] | 38.461538 | false | 23.529412 | 13 | 1 | 61.538462 | 5 |
def decrypt(origin_data: str = "") -> str:
file_data = _get_file_content(file_name="jm.js")
ctx = py_mini_racer.MiniRacer()
ctx.eval(file_data)
data = ctx.call("webInstace.shell", origin_data)
return data
| 18,750 |
akfamily/akshare
|
087025d8d6f799b30ca114013e82c1ad22dc9294
|
akshare/movie/video_yien.py
|
video_tv
|
()
|
return temp_df
|
艺恩-视频放映-电视剧集
https://www.endata.com.cn/Video/index.html
:return: 电视剧集
:rtype: pandas.DataFrame
|
艺恩-视频放映-电视剧集
https://www.endata.com.cn/Video/index.html
:return: 电视剧集
:rtype: pandas.DataFrame
| 64 | 81 |
def video_tv() -> pd.DataFrame:
"""
艺恩-视频放映-电视剧集
https://www.endata.com.cn/Video/index.html
:return: 电视剧集
:rtype: pandas.DataFrame
"""
url = "https://www.endata.com.cn/API/GetData.ashx"
payload = {"tvType": 2, "MethodName": "BoxOffice_GetTvData_PlayIndexRank"}
r = requests.post(url, data=payload)
r.encoding = "utf8"
data_json = json.loads(decrypt(r.text))
temp_df = pd.DataFrame(data_json["Data"]["Table"])
report_date = data_json["Data"]["Table1"][0]["MaxDate"]
temp_df.columns = ["排序", "名称", "类型", "播映指数", "用户热度", "媒体热度", "观看度", "好评度"]
temp_df = temp_df[["排序", "名称", "类型", "播映指数", "媒体热度", "用户热度", "好评度", "观看度"]]
temp_df["统计日期"] = report_date
return temp_df
|
https://github.com/akfamily/akshare/blob/087025d8d6f799b30ca114013e82c1ad22dc9294/project25/akshare/movie/video_yien.py#L64-L81
| 25 |
[
0,
1,
2,
3,
4,
5,
6
] | 38.888889 |
[
7,
8,
9,
10,
11,
12,
13,
14,
15,
16,
17
] | 61.111111 | false | 23.529412 | 18 | 1 | 38.888889 | 4 |
def video_tv() -> pd.DataFrame:
url = "https://www.endata.com.cn/API/GetData.ashx"
payload = {"tvType": 2, "MethodName": "BoxOffice_GetTvData_PlayIndexRank"}
r = requests.post(url, data=payload)
r.encoding = "utf8"
data_json = json.loads(decrypt(r.text))
temp_df = pd.DataFrame(data_json["Data"]["Table"])
report_date = data_json["Data"]["Table1"][0]["MaxDate"]
temp_df.columns = ["排序", "名称", "类型", "播映指数", "用户热度", "媒体热度", "观看度", "好评度"]
temp_df = temp_df[["排序", "名称", "类型", "播映指数", "媒体热度", "用户热度", "好评度", "观看度"]]
temp_df["统计日期"] = report_date
return temp_df
| 18,751 |
akfamily/akshare
|
087025d8d6f799b30ca114013e82c1ad22dc9294
|
akshare/movie/video_yien.py
|
video_variety_show
|
()
|
return temp_df
|
艺恩-视频放映-综艺节目
https://www.endata.com.cn/Video/index.html
:return: 综艺节目
:rtype: pandas.DataFrame
|
艺恩-视频放映-综艺节目
https://www.endata.com.cn/Video/index.html
:return: 综艺节目
:rtype: pandas.DataFrame
| 84 | 101 |
def video_variety_show() -> pd.DataFrame:
"""
艺恩-视频放映-综艺节目
https://www.endata.com.cn/Video/index.html
:return: 综艺节目
:rtype: pandas.DataFrame
"""
url = "https://www.endata.com.cn/API/GetData.ashx"
payload = {"tvType": 8, "MethodName": "BoxOffice_GetTvData_PlayIndexRank"}
r = requests.post(url, data=payload)
r.encoding = "utf8"
data_json = json.loads(decrypt(r.text))
temp_df = pd.DataFrame(data_json["Data"]["Table"])
report_date = data_json["Data"]["Table1"][0]["MaxDate"]
temp_df.columns = ["排序", "名称", "类型", "播映指数", "用户热度", "媒体热度", "观看度", "好评度"]
temp_df = temp_df[["排序", "名称", "类型", "播映指数", "媒体热度", "用户热度", "好评度", "观看度"]]
temp_df["统计日期"] = report_date
return temp_df
|
https://github.com/akfamily/akshare/blob/087025d8d6f799b30ca114013e82c1ad22dc9294/project25/akshare/movie/video_yien.py#L84-L101
| 25 |
[
0,
1,
2,
3,
4,
5,
6
] | 38.888889 |
[
7,
8,
9,
10,
11,
12,
13,
14,
15,
16,
17
] | 61.111111 | false | 23.529412 | 18 | 1 | 38.888889 | 4 |
def video_variety_show() -> pd.DataFrame:
url = "https://www.endata.com.cn/API/GetData.ashx"
payload = {"tvType": 8, "MethodName": "BoxOffice_GetTvData_PlayIndexRank"}
r = requests.post(url, data=payload)
r.encoding = "utf8"
data_json = json.loads(decrypt(r.text))
temp_df = pd.DataFrame(data_json["Data"]["Table"])
report_date = data_json["Data"]["Table1"][0]["MaxDate"]
temp_df.columns = ["排序", "名称", "类型", "播映指数", "用户热度", "媒体热度", "观看度", "好评度"]
temp_df = temp_df[["排序", "名称", "类型", "播映指数", "媒体热度", "用户热度", "好评度", "观看度"]]
temp_df["统计日期"] = report_date
return temp_df
| 18,752 |
akfamily/akshare
|
087025d8d6f799b30ca114013e82c1ad22dc9294
|
akshare/tool/trade_date_hist.py
|
tool_trade_date_hist_sina
|
()
|
return temp_df
|
交易日历-历史数据
https://finance.sina.com.cn/realstock/company/klc_td_sh.txt
:return: 交易日历
:rtype: pandas.DataFrame
|
交易日历-历史数据
https://finance.sina.com.cn/realstock/company/klc_td_sh.txt
:return: 交易日历
:rtype: pandas.DataFrame
| 18 | 39 |
def tool_trade_date_hist_sina() -> pd.DataFrame:
"""
交易日历-历史数据
https://finance.sina.com.cn/realstock/company/klc_td_sh.txt
:return: 交易日历
:rtype: pandas.DataFrame
"""
url = "https://finance.sina.com.cn/realstock/company/klc_td_sh.txt"
r = requests.get(url)
js_code = py_mini_racer.MiniRacer()
js_code.eval(hk_js_decode)
dict_list = js_code.call(
"d", r.text.split("=")[1].split(";")[0].replace('"', "")
) # 执行js解密代码
temp_df = pd.DataFrame(dict_list)
temp_df.columns = ["trade_date"]
temp_df["trade_date"] = pd.to_datetime(temp_df["trade_date"]).dt.date
temp_list = temp_df["trade_date"].to_list()
temp_list.append(datetime.date(1992, 5, 4)) # 是交易日但是交易日历缺失该日期
temp_list.sort()
temp_df = pd.DataFrame(temp_list, columns=["trade_date"])
return temp_df
|
https://github.com/akfamily/akshare/blob/087025d8d6f799b30ca114013e82c1ad22dc9294/project25/akshare/tool/trade_date_hist.py#L18-L39
| 25 |
[
0,
1,
2,
3,
4,
5,
6
] | 31.818182 |
[
7,
8,
9,
10,
11,
14,
15,
16,
17,
18,
19,
20,
21
] | 59.090909 | false | 34.782609 | 22 | 1 | 40.909091 | 4 |
def tool_trade_date_hist_sina() -> pd.DataFrame:
url = "https://finance.sina.com.cn/realstock/company/klc_td_sh.txt"
r = requests.get(url)
js_code = py_mini_racer.MiniRacer()
js_code.eval(hk_js_decode)
dict_list = js_code.call(
"d", r.text.split("=")[1].split(";")[0].replace('"', "")
) # 执行js解密代码
temp_df = pd.DataFrame(dict_list)
temp_df.columns = ["trade_date"]
temp_df["trade_date"] = pd.to_datetime(temp_df["trade_date"]).dt.date
temp_list = temp_df["trade_date"].to_list()
temp_list.append(datetime.date(1992, 5, 4)) # 是交易日但是交易日历缺失该日期
temp_list.sort()
temp_df = pd.DataFrame(temp_list, columns=["trade_date"])
return temp_df
| 18,753 |
akfamily/akshare
|
087025d8d6f799b30ca114013e82c1ad22dc9294
|
akshare/cost/cost_living.py
|
_get_region
|
()
|
return name_url_dict
|
获取主要板块, 一般不调用
:return: 主要板块
:rtype: dict
|
获取主要板块, 一般不调用
:return: 主要板块
:rtype: dict
| 13 | 32 |
def _get_region() -> dict:
"""
获取主要板块, 一般不调用
:return: 主要板块
:rtype: dict
"""
url = "https://www.expatistan.com/cost-of-living/index"
r = requests.get(url)
soup = BeautifulSoup(r.text, "lxml")
half_url_list = [
item["href"]
for item in soup.find("ul", attrs={"class": "regions"}).find_all("a")
]
name_list = [
item["href"].split("/")[-1]
for item in soup.find("ul", attrs={"class": "regions"}).find_all("a")
]
name_url_dict = dict(zip(name_list, half_url_list))
name_url_dict["world"] = "/cost-of-living/index"
return name_url_dict
|
https://github.com/akfamily/akshare/blob/087025d8d6f799b30ca114013e82c1ad22dc9294/project25/akshare/cost/cost_living.py#L13-L32
| 25 |
[
0,
1,
2,
3,
4,
5
] | 30 |
[
6,
7,
8,
9,
13,
17,
18,
19
] | 40 | false | 56.521739 | 20 | 3 | 60 | 3 |
def _get_region() -> dict:
url = "https://www.expatistan.com/cost-of-living/index"
r = requests.get(url)
soup = BeautifulSoup(r.text, "lxml")
half_url_list = [
item["href"]
for item in soup.find("ul", attrs={"class": "regions"}).find_all("a")
]
name_list = [
item["href"].split("/")[-1]
for item in soup.find("ul", attrs={"class": "regions"}).find_all("a")
]
name_url_dict = dict(zip(name_list, half_url_list))
name_url_dict["world"] = "/cost-of-living/index"
return name_url_dict
| 18,754 |
akfamily/akshare
|
087025d8d6f799b30ca114013e82c1ad22dc9294
|
akshare/cost/cost_living.py
|
cost_living
|
(region: str = "world")
|
return temp_df
|
国家或地区生活成本数据
https://expatistan.com/cost-of-living/index
:param region: choice of {"europe", "north-america", "latin-america", "asia", "middle-east", "africa", "oceania", "world"}
:type region: str
:return: 国家或地区生活成本数据
:rtype: pandas.DataFrame
|
国家或地区生活成本数据
https://expatistan.com/cost-of-living/index
:param region: choice of {"europe", "north-america", "latin-america", "asia", "middle-east", "africa", "oceania", "world"}
:type region: str
:return: 国家或地区生活成本数据
:rtype: pandas.DataFrame
| 35 | 58 |
def cost_living(region: str = "world") -> pd.DataFrame:
"""
国家或地区生活成本数据
https://expatistan.com/cost-of-living/index
:param region: choice of {"europe", "north-america", "latin-america", "asia", "middle-east", "africa", "oceania", "world"}
:type region: str
:return: 国家或地区生活成本数据
:rtype: pandas.DataFrame
"""
name_url_map = {
"europe": "/cost-of-living/index/europe",
"north-america": "/cost-of-living/index/north-america",
"latin-america": "/cost-of-living/index/latin-america",
"asia": "/cost-of-living/index/asia",
"middle-east": "/cost-of-living/index/middle-east",
"africa": "/cost-of-living/index/africa",
"oceania": "/cost-of-living/index/oceania",
"world": "/cost-of-living/index",
}
url = f"https://www.expatistan.com{name_url_map[region]}"
r = requests.get(url)
temp_df = pd.read_html(r.text)[0]
temp_df.columns = ["rank", "city", "index"]
return temp_df
|
https://github.com/akfamily/akshare/blob/087025d8d6f799b30ca114013e82c1ad22dc9294/project25/akshare/cost/cost_living.py#L35-L58
| 25 |
[
0,
1,
2,
3,
4,
5,
6,
7,
8,
9,
10,
11,
12,
13,
14,
15,
16,
17,
18,
19,
20,
21,
22,
23
] | 100 |
[] | 0 | true | 56.521739 | 24 | 1 | 100 | 6 |
def cost_living(region: str = "world") -> pd.DataFrame:
name_url_map = {
"europe": "/cost-of-living/index/europe",
"north-america": "/cost-of-living/index/north-america",
"latin-america": "/cost-of-living/index/latin-america",
"asia": "/cost-of-living/index/asia",
"middle-east": "/cost-of-living/index/middle-east",
"africa": "/cost-of-living/index/africa",
"oceania": "/cost-of-living/index/oceania",
"world": "/cost-of-living/index",
}
url = f"https://www.expatistan.com{name_url_map[region]}"
r = requests.get(url)
temp_df = pd.read_html(r.text)[0]
temp_df.columns = ["rank", "city", "index"]
return temp_df
| 18,755 |
akfamily/akshare
|
087025d8d6f799b30ca114013e82c1ad22dc9294
|
akshare/news/news_cctv.py
|
news_cctv
|
(date: str = "20130308")
|
新闻联播文字稿
https://tv.cctv.com/lm/xwlb/?spm=C52056131267.P4y8I53JvSWE.0.0
:param date: 需要获取数据的日期; 目前 20160203 年后
:type date: str
:return: 新闻联播文字稿
:rtype: pandas.DataFrame
|
新闻联播文字稿
https://tv.cctv.com/lm/xwlb/?spm=C52056131267.P4y8I53JvSWE.0.0
:param date: 需要获取数据的日期; 目前 20160203 年后
:type date: str
:return: 新闻联播文字稿
:rtype: pandas.DataFrame
| 16 | 176 |
def news_cctv(date: str = "20130308") -> pd.DataFrame:
"""
新闻联播文字稿
https://tv.cctv.com/lm/xwlb/?spm=C52056131267.P4y8I53JvSWE.0.0
:param date: 需要获取数据的日期; 目前 20160203 年后
:type date: str
:return: 新闻联播文字稿
:rtype: pandas.DataFrame
"""
if int(date) <= int("20130708"):
url = f"http://cctv.cntv.cn/lm/xinwenlianbo/{date}.shtml"
r = requests.get(url)
r.encoding = "gbk"
raw_list = re.findall(r"title_array_01\((.*)", r.text)
page_url = [
re.findall("(http.*)", item)[0].split("'")[0]
for item in raw_list[1:]
]
title_list = []
content_list = []
headers = {
"Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9",
"Accept-Encoding": "gzip, deflate",
"Accept-Language": "zh-CN,zh;q=0.9,en;q=0.8",
"Cache-Control": "no-cache",
"Cookie": "cna=DLYSGBDthG4CAbRVCNxSxGT6",
"Host": "tv.cctv.com",
"Pragma": "no-cache",
"Proxy-Connection": "keep-alive",
"Upgrade-Insecure-Requests": "1",
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/92.0.4515.159 Safari/537.36",
}
for page in tqdm(page_url, leave=False):
try:
r = requests.get(page, headers=headers)
r.encoding = "utf-8"
soup = BeautifulSoup(r.text, "lxml")
title = soup.find("h3").text
content = soup.find("div", attrs={"class": "cnt_bd"}).text
title_list.append(
title.strip("[视频]").strip().replace("\n", " ")
)
content_list.append(
content.strip()
.strip("央视网消息(新闻联播):")
.strip("央视网消息(新闻联播):")
.strip("(新闻联播):")
.strip()
.replace("\n", " ")
)
except:
continue
temp_df = pd.DataFrame(
[[date] * len(title_list), title_list, content_list],
index=["date", "title", "content"],
).T
return temp_df
elif int(date) < int("20160203"):
url = f"http://cctv.cntv.cn/lm/xinwenlianbo/{date}.shtml"
r = requests.get(url)
r.encoding = "utf-8"
soup = BeautifulSoup(r.text, "lxml")
page_url = [
item.find("a")["href"]
for item in soup.find(
"div", attrs={"id": "contentELMT1368521805488378"}
).find_all("li")[1:]
]
title_list = []
content_list = []
headers = {
"Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9",
"Accept-Encoding": "gzip, deflate",
"Accept-Language": "zh-CN,zh;q=0.9,en;q=0.8",
"Cache-Control": "no-cache",
"Cookie": "cna=DLYSGBDthG4CAbRVCNxSxGT6",
"Host": "tv.cctv.com",
"Pragma": "no-cache",
"Proxy-Connection": "keep-alive",
"Upgrade-Insecure-Requests": "1",
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/92.0.4515.159 Safari/537.36",
}
for page in tqdm(page_url, leave=False):
try:
r = requests.get(page, headers=headers)
r.encoding = "utf-8"
soup = BeautifulSoup(r.text, "lxml")
title = soup.find("h3").text
content = soup.find("div", attrs={"class": "cnt_bd"}).text
title_list.append(
title.strip("[视频]").strip().replace("\n", " ")
)
content_list.append(
content.strip()
.strip("央视网消息(新闻联播):")
.strip("央视网消息(新闻联播):")
.strip("(新闻联播):")
.strip()
.replace("\n", " ")
)
except:
continue
temp_df = pd.DataFrame(
[[date] * len(title_list), title_list, content_list],
index=["date", "title", "content"],
).T
return temp_df
elif int(date) > int("20160203"):
url = f"https://tv.cctv.com/lm/xwlb/day/{date}.shtml"
r = requests.get(url)
r.encoding = "utf-8"
soup = BeautifulSoup(r.text, "lxml")
page_url = [item.find("a")["href"] for item in soup.find_all("li")[1:]]
title_list = []
content_list = []
headers = {
"Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9",
"Accept-Encoding": "gzip, deflate",
"Accept-Language": "zh-CN,zh;q=0.9,en;q=0.8",
"Cache-Control": "no-cache",
"Cookie": "cna=DLYSGBDthG4CAbRVCNxSxGT6",
"Host": "tv.cctv.com",
"Pragma": "no-cache",
"Proxy-Connection": "keep-alive",
"Upgrade-Insecure-Requests": "1",
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/92.0.4515.159 Safari/537.36",
}
for page in tqdm(page_url, leave=False):
try:
r = requests.get(page, headers=headers)
r.encoding = "utf-8"
soup = BeautifulSoup(r.text, "lxml")
if soup.find("h3"):
title = soup.find("h3").text
else:
title = soup.find("div", attrs={"class": "tit"}).text
if soup.find("div", attrs={"class": "cnt_bd"}):
content = soup.find("div", attrs={"class": "cnt_bd"}).text
else:
content = soup.find(
"div", attrs={"class": "content_area"}
).text
title_list.append(
title.strip("[视频]").strip().replace("\n", " ")
)
content_list.append(
content.strip()
.strip("央视网消息(新闻联播):")
.strip("央视网消息(新闻联播):")
.strip("(新闻联播):")
.strip()
.replace("\n", " ")
)
except:
continue
temp_df = pd.DataFrame(
[[date] * len(title_list), title_list, content_list],
index=["date", "title", "content"],
).T
return temp_df
|
https://github.com/akfamily/akshare/blob/087025d8d6f799b30ca114013e82c1ad22dc9294/project25/akshare/news/news_cctv.py#L16-L176
| 25 |
[
0,
1,
2,
3,
4,
5,
6,
7,
8
] | 5.590062 |
[
9,
10,
11,
12,
13,
14,
18,
19,
20,
32,
33,
34,
35,
36,
37,
38,
39,
42,
50,
51,
52,
56,
58,
59,
60,
61,
62,
63,
69,
70,
71,
83,
84,
85,
86,
87,
88,
89,
90,
93,
101,
102,
103,
107,
108,
109,
110,
111,
112,
113,
114,
115,
116,
128,
129,
130,
131,
132,
133,
134,
136,
137,
138,
140,
143,
146,
154,
155,
156,
160
] | 43.478261 | false | 10 | 161 | 15 | 56.521739 | 6 |
def news_cctv(date: str = "20130308") -> pd.DataFrame:
if int(date) <= int("20130708"):
url = f"http://cctv.cntv.cn/lm/xinwenlianbo/{date}.shtml"
r = requests.get(url)
r.encoding = "gbk"
raw_list = re.findall(r"title_array_01\((.*)", r.text)
page_url = [
re.findall("(http.*)", item)[0].split("'")[0]
for item in raw_list[1:]
]
title_list = []
content_list = []
headers = {
"Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9",
"Accept-Encoding": "gzip, deflate",
"Accept-Language": "zh-CN,zh;q=0.9,en;q=0.8",
"Cache-Control": "no-cache",
"Cookie": "cna=DLYSGBDthG4CAbRVCNxSxGT6",
"Host": "tv.cctv.com",
"Pragma": "no-cache",
"Proxy-Connection": "keep-alive",
"Upgrade-Insecure-Requests": "1",
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/92.0.4515.159 Safari/537.36",
}
for page in tqdm(page_url, leave=False):
try:
r = requests.get(page, headers=headers)
r.encoding = "utf-8"
soup = BeautifulSoup(r.text, "lxml")
title = soup.find("h3").text
content = soup.find("div", attrs={"class": "cnt_bd"}).text
title_list.append(
title.strip("[视频]").strip().replace("\n", " ")
)
content_list.append(
content.strip()
.strip("央视网消息(新闻联播):")
.strip("央视网消息(新闻联播):")
.strip("(新闻联播):")
.strip()
.replace("\n", " ")
)
except:
continue
temp_df = pd.DataFrame(
[[date] * len(title_list), title_list, content_list],
index=["date", "title", "content"],
).T
return temp_df
elif int(date) < int("20160203"):
url = f"http://cctv.cntv.cn/lm/xinwenlianbo/{date}.shtml"
r = requests.get(url)
r.encoding = "utf-8"
soup = BeautifulSoup(r.text, "lxml")
page_url = [
item.find("a")["href"]
for item in soup.find(
"div", attrs={"id": "contentELMT1368521805488378"}
).find_all("li")[1:]
]
title_list = []
content_list = []
headers = {
"Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9",
"Accept-Encoding": "gzip, deflate",
"Accept-Language": "zh-CN,zh;q=0.9,en;q=0.8",
"Cache-Control": "no-cache",
"Cookie": "cna=DLYSGBDthG4CAbRVCNxSxGT6",
"Host": "tv.cctv.com",
"Pragma": "no-cache",
"Proxy-Connection": "keep-alive",
"Upgrade-Insecure-Requests": "1",
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/92.0.4515.159 Safari/537.36",
}
for page in tqdm(page_url, leave=False):
try:
r = requests.get(page, headers=headers)
r.encoding = "utf-8"
soup = BeautifulSoup(r.text, "lxml")
title = soup.find("h3").text
content = soup.find("div", attrs={"class": "cnt_bd"}).text
title_list.append(
title.strip("[视频]").strip().replace("\n", " ")
)
content_list.append(
content.strip()
.strip("央视网消息(新闻联播):")
.strip("央视网消息(新闻联播):")
.strip("(新闻联播):")
.strip()
.replace("\n", " ")
)
except:
continue
temp_df = pd.DataFrame(
[[date] * len(title_list), title_list, content_list],
index=["date", "title", "content"],
).T
return temp_df
elif int(date) > int("20160203"):
url = f"https://tv.cctv.com/lm/xwlb/day/{date}.shtml"
r = requests.get(url)
r.encoding = "utf-8"
soup = BeautifulSoup(r.text, "lxml")
page_url = [item.find("a")["href"] for item in soup.find_all("li")[1:]]
title_list = []
content_list = []
headers = {
"Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9",
"Accept-Encoding": "gzip, deflate",
"Accept-Language": "zh-CN,zh;q=0.9,en;q=0.8",
"Cache-Control": "no-cache",
"Cookie": "cna=DLYSGBDthG4CAbRVCNxSxGT6",
"Host": "tv.cctv.com",
"Pragma": "no-cache",
"Proxy-Connection": "keep-alive",
"Upgrade-Insecure-Requests": "1",
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/92.0.4515.159 Safari/537.36",
}
for page in tqdm(page_url, leave=False):
try:
r = requests.get(page, headers=headers)
r.encoding = "utf-8"
soup = BeautifulSoup(r.text, "lxml")
if soup.find("h3"):
title = soup.find("h3").text
else:
title = soup.find("div", attrs={"class": "tit"}).text
if soup.find("div", attrs={"class": "cnt_bd"}):
content = soup.find("div", attrs={"class": "cnt_bd"}).text
else:
content = soup.find(
"div", attrs={"class": "content_area"}
).text
title_list.append(
title.strip("[视频]").strip().replace("\n", " ")
)
content_list.append(
content.strip()
.strip("央视网消息(新闻联播):")
.strip("央视网消息(新闻联播):")
.strip("(新闻联播):")
.strip()
.replace("\n", " ")
)
except:
continue
temp_df = pd.DataFrame(
[[date] * len(title_list), title_list, content_list],
index=["date", "title", "content"],
).T
return temp_df
| 18,756 |
|
akfamily/akshare
|
087025d8d6f799b30ca114013e82c1ad22dc9294
|
akshare/news/news_stock.py
|
stock_news_em
|
(symbol: str = "601628")
|
return temp_df
|
东方财富-个股新闻-最近 100 条新闻
https://so.eastmoney.com/news/s?keyword=%E4%B8%AD%E5%9B%BD%E4%BA%BA%E5%AF%BF&pageindex=1&searchrange=8192&sortfiled=4
:param symbol: 股票代码
:type symbol: str
:return: 个股新闻
:rtype: pandas.DataFrame
|
东方财富-个股新闻-最近 100 条新闻
https://so.eastmoney.com/news/s?keyword=%E4%B8%AD%E5%9B%BD%E4%BA%BA%E5%AF%BF&pageindex=1&searchrange=8192&sortfiled=4
:param symbol: 股票代码
:type symbol: str
:return: 个股新闻
:rtype: pandas.DataFrame
| 14 | 82 |
def stock_news_em(symbol: str = "601628") -> pd.DataFrame:
"""
东方财富-个股新闻-最近 100 条新闻
https://so.eastmoney.com/news/s?keyword=%E4%B8%AD%E5%9B%BD%E4%BA%BA%E5%AF%BF&pageindex=1&searchrange=8192&sortfiled=4
:param symbol: 股票代码
:type symbol: str
:return: 个股新闻
:rtype: pandas.DataFrame
"""
url = "https://search-api-web.eastmoney.com/search/jsonp"
params = {
"cb": "jQuery3510875346244069884_1668256937995",
"param": '{"uid":"",'
+ f'"keyword":"{symbol}"'
+ ',"type":["cmsArticleWebOld"],"client":"web","clientType":"web","clientVersion":"curr","param":{"cmsArticleWebOld":{"searchScope":"default","sort":"default","pageIndex":1,"pageSize":100,"preTag":"<em>","postTag":"</em>"}}}',
"_": "1668256937996",
}
r = requests.get(url, params=params)
data_text = r.text
data_json = json.loads(
data_text.strip("jQuery3510875346244069884_1668256937995(")[:-1]
)
temp_df = pd.DataFrame(data_json["result"]["cmsArticleWebOld"])
temp_df.rename(
columns={
"date": "发布时间",
"mediaName": "文章来源",
"code": "-",
"title": "新闻标题",
"content": "新闻内容",
"url": "新闻链接",
"image": "-",
},
inplace=True,
)
temp_df["关键词"] = symbol
temp_df = temp_df[
[
"关键词",
"新闻标题",
"新闻内容",
"发布时间",
"文章来源",
"新闻链接",
]
]
temp_df["新闻标题"] = (
temp_df["新闻标题"]
.str.replace(r"\(<em>", "", regex=True)
.str.replace(r"</em>\)", "", regex=True)
)
temp_df["新闻标题"] = (
temp_df["新闻标题"]
.str.replace(r"<em>", "", regex=True)
.str.replace(r"</em>", "", regex=True)
)
temp_df["新闻内容"] = (
temp_df["新闻内容"]
.str.replace(r"\(<em>", "", regex=True)
.str.replace(r"</em>\)", "", regex=True)
)
temp_df["新闻内容"] = (
temp_df["新闻内容"]
.str.replace(r"<em>", "", regex=True)
.str.replace(r"</em>", "", regex=True)
)
temp_df["新闻内容"] = temp_df["新闻内容"].str.replace(r"\u3000", "", regex=True)
temp_df["新闻内容"] = temp_df["新闻内容"].str.replace(r"\r\n", " ", regex=True)
return temp_df
|
https://github.com/akfamily/akshare/blob/087025d8d6f799b30ca114013e82c1ad22dc9294/project25/akshare/news/news_stock.py#L14-L82
| 25 |
[
0,
1,
2,
3,
4,
5,
6,
7,
8
] | 13.043478 |
[
9,
10,
17,
18,
19,
22,
23,
35,
36,
46,
51,
56,
61,
66,
67,
68
] | 23.188406 | false | 25 | 69 | 1 | 76.811594 | 6 |
def stock_news_em(symbol: str = "601628") -> pd.DataFrame:
url = "https://search-api-web.eastmoney.com/search/jsonp"
params = {
"cb": "jQuery3510875346244069884_1668256937995",
"param": '{"uid":"",'
+ f'"keyword":"{symbol}"'
+ ',"type":["cmsArticleWebOld"],"client":"web","clientType":"web","clientVersion":"curr","param":{"cmsArticleWebOld":{"searchScope":"default","sort":"default","pageIndex":1,"pageSize":100,"preTag":"<em>","postTag":"</em>"}}}',
"_": "1668256937996",
}
r = requests.get(url, params=params)
data_text = r.text
data_json = json.loads(
data_text.strip("jQuery3510875346244069884_1668256937995(")[:-1]
)
temp_df = pd.DataFrame(data_json["result"]["cmsArticleWebOld"])
temp_df.rename(
columns={
"date": "发布时间",
"mediaName": "文章来源",
"code": "-",
"title": "新闻标题",
"content": "新闻内容",
"url": "新闻链接",
"image": "-",
},
inplace=True,
)
temp_df["关键词"] = symbol
temp_df = temp_df[
[
"关键词",
"新闻标题",
"新闻内容",
"发布时间",
"文章来源",
"新闻链接",
]
]
temp_df["新闻标题"] = (
temp_df["新闻标题"]
.str.replace(r"\(<em>", "", regex=True)
.str.replace(r"</em>\)", "", regex=True)
)
temp_df["新闻标题"] = (
temp_df["新闻标题"]
.str.replace(r"<em>", "", regex=True)
.str.replace(r"</em>", "", regex=True)
)
temp_df["新闻内容"] = (
temp_df["新闻内容"]
.str.replace(r"\(<em>", "", regex=True)
.str.replace(r"</em>\)", "", regex=True)
)
temp_df["新闻内容"] = (
temp_df["新闻内容"]
.str.replace(r"<em>", "", regex=True)
.str.replace(r"</em>", "", regex=True)
)
temp_df["新闻内容"] = temp_df["新闻内容"].str.replace(r"\u3000", "", regex=True)
temp_df["新闻内容"] = temp_df["新闻内容"].str.replace(r"\r\n", " ", regex=True)
return temp_df
| 18,757 |
akfamily/akshare
|
087025d8d6f799b30ca114013e82c1ad22dc9294
|
akshare/news/news_baidu.py
|
news_economic_baidu
|
(date: str = "20220502")
|
return big_df
|
百度股市通-经济数据
https://gushitong.baidu.com/calendar
:param date: 查询日期
:type date: str
:return: 经济数据
:rtype: pandas.DataFrame
|
百度股市通-经济数据
https://gushitong.baidu.com/calendar
:param date: 查询日期
:type date: str
:return: 经济数据
:rtype: pandas.DataFrame
| 12 | 73 |
def news_economic_baidu(date: str = "20220502") -> pd.DataFrame:
"""
百度股市通-经济数据
https://gushitong.baidu.com/calendar
:param date: 查询日期
:type date: str
:return: 经济数据
:rtype: pandas.DataFrame
"""
start_date = "-".join([date[:4], date[4:6], date[6:]])
end_date = "-".join([date[:4], date[4:6], date[6:]])
url = "https://finance.pae.baidu.com/api/financecalendar"
params = {
"start_date": start_date,
"end_date": end_date,
"market": "",
"cate": "economic_data",
'rn': '500',
'pn': '0',
}
r = requests.get(url, params=params)
data_json = r.json()
big_df = pd.DataFrame()
for item in data_json["Result"]:
if not item["list"] == []:
temp_df = pd.DataFrame(item["list"])
temp_df.columns = [
"日期",
"时间",
"-",
"事件",
"重要性",
"前值",
"预期",
"公布",
"-",
"-",
"地区",
"-",
]
temp_df = temp_df[
[
"日期",
"时间",
"地区",
"事件",
"公布",
"预期",
"前值",
"重要性",
]
]
temp_df["公布"] = pd.to_numeric(temp_df["公布"], errors="coerce")
temp_df["预期"] = pd.to_numeric(temp_df["预期"], errors="coerce")
temp_df["前值"] = pd.to_numeric(temp_df["前值"], errors="coerce")
temp_df["重要性"] = pd.to_numeric(temp_df["重要性"], errors="coerce")
temp_df["日期"] = pd.to_datetime(temp_df["日期"]).dt.date
big_df = pd.concat([big_df, temp_df], ignore_index=True)
else:
continue
return big_df
|
https://github.com/akfamily/akshare/blob/087025d8d6f799b30ca114013e82c1ad22dc9294/project25/akshare/news/news_baidu.py#L12-L73
| 25 |
[
0,
1,
2,
3,
4,
5,
6,
7,
8
] | 14.516129 |
[
9,
10,
11,
12,
20,
21,
22,
23,
24,
25,
26,
40,
52,
53,
54,
55,
56,
58,
60,
61
] | 32.258065 | false | 9.411765 | 62 | 3 | 67.741935 | 6 |
def news_economic_baidu(date: str = "20220502") -> pd.DataFrame:
start_date = "-".join([date[:4], date[4:6], date[6:]])
end_date = "-".join([date[:4], date[4:6], date[6:]])
url = "https://finance.pae.baidu.com/api/financecalendar"
params = {
"start_date": start_date,
"end_date": end_date,
"market": "",
"cate": "economic_data",
'rn': '500',
'pn': '0',
}
r = requests.get(url, params=params)
data_json = r.json()
big_df = pd.DataFrame()
for item in data_json["Result"]:
if not item["list"] == []:
temp_df = pd.DataFrame(item["list"])
temp_df.columns = [
"日期",
"时间",
"-",
"事件",
"重要性",
"前值",
"预期",
"公布",
"-",
"-",
"地区",
"-",
]
temp_df = temp_df[
[
"日期",
"时间",
"地区",
"事件",
"公布",
"预期",
"前值",
"重要性",
]
]
temp_df["公布"] = pd.to_numeric(temp_df["公布"], errors="coerce")
temp_df["预期"] = pd.to_numeric(temp_df["预期"], errors="coerce")
temp_df["前值"] = pd.to_numeric(temp_df["前值"], errors="coerce")
temp_df["重要性"] = pd.to_numeric(temp_df["重要性"], errors="coerce")
temp_df["日期"] = pd.to_datetime(temp_df["日期"]).dt.date
big_df = pd.concat([big_df, temp_df], ignore_index=True)
else:
continue
return big_df
| 18,758 |
akfamily/akshare
|
087025d8d6f799b30ca114013e82c1ad22dc9294
|
akshare/news/news_baidu.py
|
news_trade_notify_suspend_baidu
|
(date: str = "20220513")
|
return big_df
|
百度股市通-交易提醒-停复牌
https://gushitong.baidu.com/calendar
:param date: 查询日期
:type date: str
:return: 交易提醒-停复牌
:rtype: pandas.DataFrame
|
百度股市通-交易提醒-停复牌
https://gushitong.baidu.com/calendar
:param date: 查询日期
:type date: str
:return: 交易提醒-停复牌
:rtype: pandas.DataFrame
| 76 | 125 |
def news_trade_notify_suspend_baidu(date: str = "20220513") -> pd.DataFrame:
"""
百度股市通-交易提醒-停复牌
https://gushitong.baidu.com/calendar
:param date: 查询日期
:type date: str
:return: 交易提醒-停复牌
:rtype: pandas.DataFrame
"""
start_date = "-".join([date[:4], date[4:6], date[6:]])
end_date = "-".join([date[:4], date[4:6], date[6:]])
url = "https://finance.pae.baidu.com/api/financecalendar"
params = {
"start_date": start_date,
"end_date": end_date,
"market": "",
"cate": "notify_suspend",
}
r = requests.get(url, params=params)
data_json = r.json()
big_df = pd.DataFrame()
for item in data_json["Result"]:
if not item["list"] == []:
temp_df = pd.DataFrame(item["list"])
temp_df.columns = [
"股票代码",
"-",
"交易所",
"股票简称",
"停牌时间",
"复牌时间",
"-",
"停牌事项说明",
]
temp_df = temp_df[
[
"股票代码",
"股票简称",
"交易所",
"停牌时间",
"复牌时间",
"停牌事项说明",
]
]
temp_df["停牌时间"] = pd.to_datetime(temp_df["停牌时间"]).dt.date
temp_df["复牌时间"] = pd.to_datetime(temp_df["复牌时间"]).dt.date
big_df = pd.concat([big_df, temp_df], ignore_index=True)
else:
continue
return big_df
|
https://github.com/akfamily/akshare/blob/087025d8d6f799b30ca114013e82c1ad22dc9294/project25/akshare/news/news_baidu.py#L76-L125
| 25 |
[
0,
1,
2,
3,
4,
5,
6,
7,
8
] | 18 |
[
9,
10,
11,
12,
18,
19,
20,
21,
22,
23,
24,
34,
44,
45,
46,
48,
49
] | 34 | false | 9.411765 | 50 | 3 | 66 | 6 |
def news_trade_notify_suspend_baidu(date: str = "20220513") -> pd.DataFrame:
start_date = "-".join([date[:4], date[4:6], date[6:]])
end_date = "-".join([date[:4], date[4:6], date[6:]])
url = "https://finance.pae.baidu.com/api/financecalendar"
params = {
"start_date": start_date,
"end_date": end_date,
"market": "",
"cate": "notify_suspend",
}
r = requests.get(url, params=params)
data_json = r.json()
big_df = pd.DataFrame()
for item in data_json["Result"]:
if not item["list"] == []:
temp_df = pd.DataFrame(item["list"])
temp_df.columns = [
"股票代码",
"-",
"交易所",
"股票简称",
"停牌时间",
"复牌时间",
"-",
"停牌事项说明",
]
temp_df = temp_df[
[
"股票代码",
"股票简称",
"交易所",
"停牌时间",
"复牌时间",
"停牌事项说明",
]
]
temp_df["停牌时间"] = pd.to_datetime(temp_df["停牌时间"]).dt.date
temp_df["复牌时间"] = pd.to_datetime(temp_df["复牌时间"]).dt.date
big_df = pd.concat([big_df, temp_df], ignore_index=True)
else:
continue
return big_df
| 18,759 |
akfamily/akshare
|
087025d8d6f799b30ca114013e82c1ad22dc9294
|
akshare/news/news_baidu.py
|
news_trade_notify_dividend_baidu
|
(date: str = "20220916")
|
return big_df
|
百度股市通-交易提醒-分红派息
https://gushitong.baidu.com/calendar
:param date: 查询日期
:type date: str
:return: 交易提醒-停复牌
:rtype: pandas.DataFrame
|
百度股市通-交易提醒-分红派息
https://gushitong.baidu.com/calendar
:param date: 查询日期
:type date: str
:return: 交易提醒-停复牌
:rtype: pandas.DataFrame
| 128 | 182 |
def news_trade_notify_dividend_baidu(date: str = "20220916") -> pd.DataFrame:
"""
百度股市通-交易提醒-分红派息
https://gushitong.baidu.com/calendar
:param date: 查询日期
:type date: str
:return: 交易提醒-停复牌
:rtype: pandas.DataFrame
"""
start_date = "-".join([date[:4], date[4:6], date[6:]])
end_date = "-".join([date[:4], date[4:6], date[6:]])
url = "https://finance.pae.baidu.com/api/financecalendar"
params = {
"start_date": start_date,
"end_date": end_date,
"market": "",
"cate": "notify_divide",
}
r = requests.get(url, params=params)
data_json = r.json()
big_df = pd.DataFrame()
for item in data_json["Result"]:
if not item["list"] == []:
temp_df = pd.DataFrame(item["list"])
temp_df.columns = [
"股票代码",
"-",
"交易所",
"股票简称",
"除权日",
"报告期",
"分红",
"送股",
"转增",
"实物",
]
temp_df = temp_df[
[
"股票代码",
"除权日",
"分红",
"送股",
"转增",
"实物",
"交易所",
"股票简称",
"报告期",
]
]
temp_df["除权日"] = pd.to_datetime(temp_df["除权日"]).dt.date
temp_df["报告期"] = pd.to_datetime(temp_df["报告期"]).dt.date
big_df = pd.concat([big_df, temp_df], ignore_index=True)
else:
continue
return big_df
|
https://github.com/akfamily/akshare/blob/087025d8d6f799b30ca114013e82c1ad22dc9294/project25/akshare/news/news_baidu.py#L128-L182
| 25 |
[
0,
1,
2,
3,
4,
5,
6,
7,
8
] | 16.363636 |
[
9,
10,
11,
12,
18,
19,
20,
21,
22,
23,
24,
36,
49,
50,
51,
53,
54
] | 30.909091 | false | 9.411765 | 55 | 3 | 69.090909 | 6 |
def news_trade_notify_dividend_baidu(date: str = "20220916") -> pd.DataFrame:
start_date = "-".join([date[:4], date[4:6], date[6:]])
end_date = "-".join([date[:4], date[4:6], date[6:]])
url = "https://finance.pae.baidu.com/api/financecalendar"
params = {
"start_date": start_date,
"end_date": end_date,
"market": "",
"cate": "notify_divide",
}
r = requests.get(url, params=params)
data_json = r.json()
big_df = pd.DataFrame()
for item in data_json["Result"]:
if not item["list"] == []:
temp_df = pd.DataFrame(item["list"])
temp_df.columns = [
"股票代码",
"-",
"交易所",
"股票简称",
"除权日",
"报告期",
"分红",
"送股",
"转增",
"实物",
]
temp_df = temp_df[
[
"股票代码",
"除权日",
"分红",
"送股",
"转增",
"实物",
"交易所",
"股票简称",
"报告期",
]
]
temp_df["除权日"] = pd.to_datetime(temp_df["除权日"]).dt.date
temp_df["报告期"] = pd.to_datetime(temp_df["报告期"]).dt.date
big_df = pd.concat([big_df, temp_df], ignore_index=True)
else:
continue
return big_df
| 18,760 |
akfamily/akshare
|
087025d8d6f799b30ca114013e82c1ad22dc9294
|
akshare/news/news_baidu.py
|
news_report_time_baidu
|
(date: str = "20220514")
|
return big_df
|
百度股市通-财报发行
https://gushitong.baidu.com/calendar
:param date: 查询日期
:type date: str
:return: 财报发行
:rtype: pandas.DataFrame
|
百度股市通-财报发行
https://gushitong.baidu.com/calendar
:param date: 查询日期
:type date: str
:return: 财报发行
:rtype: pandas.DataFrame
| 185 | 230 |
def news_report_time_baidu(date: str = "20220514") -> pd.DataFrame:
"""
百度股市通-财报发行
https://gushitong.baidu.com/calendar
:param date: 查询日期
:type date: str
:return: 财报发行
:rtype: pandas.DataFrame
"""
start_date = "-".join([date[:4], date[4:6], date[6:]])
end_date = "-".join([date[:4], date[4:6], date[6:]])
url = "https://finance.pae.baidu.com/api/financecalendar"
params = {
"start_date": start_date,
"end_date": end_date,
"market": "",
"cate": "report_time",
'finClientType': 'pc',
}
r = requests.get(url, params=params)
data_json = r.json()
big_df = pd.DataFrame()
for item in data_json["Result"]:
if not item["list"] == []:
temp_df = pd.DataFrame(item["list"])
temp_df.columns = [
"股票代码",
"-",
"交易所",
"-",
"股票简称",
"-",
"财报期",
]
temp_df = temp_df[
[
"股票代码",
"交易所",
"股票简称",
"财报期",
]
]
big_df = pd.concat([big_df, temp_df], ignore_index=True)
else:
continue
return big_df
|
https://github.com/akfamily/akshare/blob/087025d8d6f799b30ca114013e82c1ad22dc9294/project25/akshare/news/news_baidu.py#L185-L230
| 25 |
[
0,
1,
2,
3,
4,
5,
6,
7,
8
] | 19.565217 |
[
9,
10,
11,
12,
19,
20,
21,
22,
23,
24,
25,
34,
42,
44,
45
] | 32.608696 | false | 9.411765 | 46 | 3 | 67.391304 | 6 |
def news_report_time_baidu(date: str = "20220514") -> pd.DataFrame:
start_date = "-".join([date[:4], date[4:6], date[6:]])
end_date = "-".join([date[:4], date[4:6], date[6:]])
url = "https://finance.pae.baidu.com/api/financecalendar"
params = {
"start_date": start_date,
"end_date": end_date,
"market": "",
"cate": "report_time",
'finClientType': 'pc',
}
r = requests.get(url, params=params)
data_json = r.json()
big_df = pd.DataFrame()
for item in data_json["Result"]:
if not item["list"] == []:
temp_df = pd.DataFrame(item["list"])
temp_df.columns = [
"股票代码",
"-",
"交易所",
"-",
"股票简称",
"-",
"财报期",
]
temp_df = temp_df[
[
"股票代码",
"交易所",
"股票简称",
"财报期",
]
]
big_df = pd.concat([big_df, temp_df], ignore_index=True)
else:
continue
return big_df
| 18,761 |
akfamily/akshare
|
087025d8d6f799b30ca114013e82c1ad22dc9294
|
akshare/stock/stock_dividents_cninfo.py
|
stock_dividents_cninfo
|
(symbol: str = "600009")
|
return temp_df
|
巨潮资讯-个股-历史分红
http://webapi.cninfo.com.cn/#/company?companyid=600009
:param symbol: 股票代码
:type symbol: str
:return: 历史分红
:rtype: pandas.DataFrame
|
巨潮资讯-个股-历史分红
http://webapi.cninfo.com.cn/#/company?companyid=600009
:param symbol: 股票代码
:type symbol: str
:return: 历史分红
:rtype: pandas.DataFrame
| 45 | 100 |
def stock_dividents_cninfo(symbol: str = "600009") -> pd.DataFrame:
"""
巨潮资讯-个股-历史分红
http://webapi.cninfo.com.cn/#/company?companyid=600009
:param symbol: 股票代码
:type symbol: str
:return: 历史分红
:rtype: pandas.DataFrame
"""
url = "http://webapi.cninfo.com.cn/api/sysapi/p_sysapi1139"
params = {
'scode': symbol
}
random_time_str = str(int(time.time()))
js_code = py_mini_racer.MiniRacer()
js_code.eval(js_str)
mcode = js_code.call("mcode", random_time_str)
headers = {
"Accept": "*/*",
"Accept-Encoding": "gzip, deflate",
"Accept-Language": "zh-CN,zh;q=0.9,en;q=0.8",
"Cache-Control": "no-cache",
"Content-Length": "0",
"Host": "webapi.cninfo.com.cn",
"mcode": mcode,
"Origin": "http://webapi.cninfo.com.cn",
"Pragma": "no-cache",
"Proxy-Connection": "keep-alive",
"Referer": "http://webapi.cninfo.com.cn/",
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/93.0.4577.63 Safari/537.36",
"X-Requested-With": "XMLHttpRequest",
}
r = requests.post(url, params=params, headers=headers)
data_json = r.json()
temp_df = pd.DataFrame(data_json["records"])
temp_df.columns = [
"实施方案公告日期",
"送股比例",
"转增比例",
"派息比例",
"股权登记日",
"除权日",
"派息日",
"股份到账日",
"实施方案分红说明",
"分红类型",
"报告时间",
]
temp_df["实施方案公告日期"] = pd.to_datetime(temp_df["实施方案公告日期"]).dt.date
temp_df["送股比例"] = pd.to_numeric(temp_df["送股比例"], errors="coerce")
temp_df["转增比例"] = pd.to_numeric(temp_df["转增比例"], errors="coerce")
temp_df["派息比例"] = pd.to_numeric(temp_df["派息比例"], errors="coerce")
temp_df["股权登记日"] = pd.to_datetime(temp_df["股权登记日"], errors="coerce").dt.date
temp_df["除权日"] = pd.to_datetime(temp_df["除权日"], errors="coerce").dt.date
temp_df["派息日"] = pd.to_datetime(temp_df["派息日"], errors="coerce").dt.date
return temp_df
|
https://github.com/akfamily/akshare/blob/087025d8d6f799b30ca114013e82c1ad22dc9294/project25/akshare/stock/stock_dividents_cninfo.py#L45-L100
| 25 |
[
0,
1,
2,
3,
4,
5,
6,
7,
8
] | 16.071429 |
[
9,
10,
13,
14,
15,
16,
17,
32,
33,
34,
35,
48,
49,
50,
51,
52,
53,
54,
55
] | 33.928571 | false | 27.586207 | 56 | 1 | 66.071429 | 6 |
def stock_dividents_cninfo(symbol: str = "600009") -> pd.DataFrame:
url = "http://webapi.cninfo.com.cn/api/sysapi/p_sysapi1139"
params = {
'scode': symbol
}
random_time_str = str(int(time.time()))
js_code = py_mini_racer.MiniRacer()
js_code.eval(js_str)
mcode = js_code.call("mcode", random_time_str)
headers = {
"Accept": "*/*",
"Accept-Encoding": "gzip, deflate",
"Accept-Language": "zh-CN,zh;q=0.9,en;q=0.8",
"Cache-Control": "no-cache",
"Content-Length": "0",
"Host": "webapi.cninfo.com.cn",
"mcode": mcode,
"Origin": "http://webapi.cninfo.com.cn",
"Pragma": "no-cache",
"Proxy-Connection": "keep-alive",
"Referer": "http://webapi.cninfo.com.cn/",
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/93.0.4577.63 Safari/537.36",
"X-Requested-With": "XMLHttpRequest",
}
r = requests.post(url, params=params, headers=headers)
data_json = r.json()
temp_df = pd.DataFrame(data_json["records"])
temp_df.columns = [
"实施方案公告日期",
"送股比例",
"转增比例",
"派息比例",
"股权登记日",
"除权日",
"派息日",
"股份到账日",
"实施方案分红说明",
"分红类型",
"报告时间",
]
temp_df["实施方案公告日期"] = pd.to_datetime(temp_df["实施方案公告日期"]).dt.date
temp_df["送股比例"] = pd.to_numeric(temp_df["送股比例"], errors="coerce")
temp_df["转增比例"] = pd.to_numeric(temp_df["转增比例"], errors="coerce")
temp_df["派息比例"] = pd.to_numeric(temp_df["派息比例"], errors="coerce")
temp_df["股权登记日"] = pd.to_datetime(temp_df["股权登记日"], errors="coerce").dt.date
temp_df["除权日"] = pd.to_datetime(temp_df["除权日"], errors="coerce").dt.date
temp_df["派息日"] = pd.to_datetime(temp_df["派息日"], errors="coerce").dt.date
return temp_df
| 18,762 |
akfamily/akshare
|
087025d8d6f799b30ca114013e82c1ad22dc9294
|
akshare/stock/stock_industry_cninfo.py
|
stock_industry_category_cninfo
|
(symbol: str = "巨潮行业分类标准") -> pd.DataFrame
|
return temp_df
|
巨潮资讯-行业分类数据
http://webapi.cninfo.com.cn/#/apiDoc
查询 p_public0002 接口
:param symbol: 行业类型; choice of {"证监会行业分类标准", "巨潮行业分类标准", "申银万国行业分类标准", "新财富行业分类标准", "国资委行业分类标准", "巨潮产业细分标准", "天相行业分类标准", "全球行业分类标准"}
:type symbol: str
:return: 行业分类数据
:rtype: pandas.DataFrame
|
巨潮资讯-行业分类数据
http://webapi.cninfo.com.cn/#/apiDoc
查询 p_public0002 接口
:param symbol: 行业类型; choice of {"证监会行业分类标准", "巨潮行业分类标准", "申银万国行业分类标准", "新财富行业分类标准", "国资委行业分类标准", "巨潮产业细分标准", "天相行业分类标准", "全球行业分类标准"}
:type symbol: str
:return: 行业分类数据
:rtype: pandas.DataFrame
| 47 | 115 |
def stock_industry_category_cninfo(symbol: str = "巨潮行业分类标准") -> pd.DataFrame:
"""
巨潮资讯-行业分类数据
http://webapi.cninfo.com.cn/#/apiDoc
查询 p_public0002 接口
:param symbol: 行业类型; choice of {"证监会行业分类标准", "巨潮行业分类标准", "申银万国行业分类标准", "新财富行业分类标准", "国资委行业分类标准", "巨潮产业细分标准", "天相行业分类标准", "全球行业分类标准"}
:type symbol: str
:return: 行业分类数据
:rtype: pandas.DataFrame
"""
symbol_map = {
"证监会行业分类标准": "008001",
"巨潮行业分类标准": "008002",
"申银万国行业分类标准": "008003",
"新财富行业分类标准": "008004",
"国资委行业分类标准": "008005",
"巨潮产业细分标准": "008006",
"天相行业分类标准": "008007",
"全球行业分类标准": "008008",
}
url = "http://webapi.cninfo.com.cn/api/stock/p_public0002"
params = {"indcode": "", "indtype": symbol_map[symbol]}
random_time_str = str(int(time.time()))
js_code = py_mini_racer.MiniRacer()
js_code.eval(js_str)
mcode = js_code.call("mcode", random_time_str)
headers = {
"Accept": "*/*",
"Accept-Encoding": "gzip, deflate",
"Accept-Language": "zh-CN,zh;q=0.9,en;q=0.8",
"Cache-Control": "no-cache",
"Content-Length": "0",
"Host": "webapi.cninfo.com.cn",
"mcode": mcode,
"Origin": "http://webapi.cninfo.com.cn",
"Pragma": "no-cache",
"Proxy-Connection": "keep-alive",
"Referer": "http://webapi.cninfo.com.cn/",
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/93.0.4577.63 Safari/537.36",
"X-Requested-With": "XMLHttpRequest",
}
r = requests.post(url, params=params, headers=headers)
data_json = r.json()
temp_df = pd.DataFrame(data_json["records"])
cols_map = {
"PARENTCODE": "父类编码",
"SORTCODE": "类目编码",
"SORTNAME": "类目名称",
"F001V": "类目名称英文",
"F002D": "终止日期",
"F003V": "行业类型编码",
"F004V": "行业类型",
}
temp_df.rename(columns=cols_map, inplace=True)
# 行业按分级排序
tmp = temp_df[["类目编码"]].copy()
tmp["len"] = temp_df["类目编码"].str.len()
tmp["Level"] = 0
g = tmp.groupby("len")
level = 0
for k in g.groups.keys():
temp_df.loc[
temp_df["类目编码"].isin(g.get_group(k)["类目编码"]), "Level"
] = level
level += 1
temp_df["Level"] = temp_df["Level"].astype(int)
temp_df.rename(columns={"Level": "分级"}, inplace=True)
temp_df["终止日期"] = pd.to_datetime(temp_df["终止日期"]).dt.date
return temp_df
|
https://github.com/akfamily/akshare/blob/087025d8d6f799b30ca114013e82c1ad22dc9294/project25/akshare/stock/stock_industry_cninfo.py#L47-L115
| 25 |
[
0,
1,
2,
3,
4,
5,
6,
7,
8,
9
] | 14.492754 |
[
10,
20,
21,
22,
23,
24,
25,
26,
41,
42,
43,
44,
53,
55,
56,
57,
58,
59,
60,
61,
64,
65,
66,
67,
68
] | 36.231884 | false | 17.857143 | 69 | 2 | 63.768116 | 7 |
def stock_industry_category_cninfo(symbol: str = "巨潮行业分类标准") -> pd.DataFrame:
symbol_map = {
"证监会行业分类标准": "008001",
"巨潮行业分类标准": "008002",
"申银万国行业分类标准": "008003",
"新财富行业分类标准": "008004",
"国资委行业分类标准": "008005",
"巨潮产业细分标准": "008006",
"天相行业分类标准": "008007",
"全球行业分类标准": "008008",
}
url = "http://webapi.cninfo.com.cn/api/stock/p_public0002"
params = {"indcode": "", "indtype": symbol_map[symbol]}
random_time_str = str(int(time.time()))
js_code = py_mini_racer.MiniRacer()
js_code.eval(js_str)
mcode = js_code.call("mcode", random_time_str)
headers = {
"Accept": "*/*",
"Accept-Encoding": "gzip, deflate",
"Accept-Language": "zh-CN,zh;q=0.9,en;q=0.8",
"Cache-Control": "no-cache",
"Content-Length": "0",
"Host": "webapi.cninfo.com.cn",
"mcode": mcode,
"Origin": "http://webapi.cninfo.com.cn",
"Pragma": "no-cache",
"Proxy-Connection": "keep-alive",
"Referer": "http://webapi.cninfo.com.cn/",
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/93.0.4577.63 Safari/537.36",
"X-Requested-With": "XMLHttpRequest",
}
r = requests.post(url, params=params, headers=headers)
data_json = r.json()
temp_df = pd.DataFrame(data_json["records"])
cols_map = {
"PARENTCODE": "父类编码",
"SORTCODE": "类目编码",
"SORTNAME": "类目名称",
"F001V": "类目名称英文",
"F002D": "终止日期",
"F003V": "行业类型编码",
"F004V": "行业类型",
}
temp_df.rename(columns=cols_map, inplace=True)
# 行业按分级排序
tmp = temp_df[["类目编码"]].copy()
tmp["len"] = temp_df["类目编码"].str.len()
tmp["Level"] = 0
g = tmp.groupby("len")
level = 0
for k in g.groups.keys():
temp_df.loc[
temp_df["类目编码"].isin(g.get_group(k)["类目编码"]), "Level"
] = level
level += 1
temp_df["Level"] = temp_df["Level"].astype(int)
temp_df.rename(columns={"Level": "分级"}, inplace=True)
temp_df["终止日期"] = pd.to_datetime(temp_df["终止日期"]).dt.date
return temp_df
| 18,763 |
akfamily/akshare
|
087025d8d6f799b30ca114013e82c1ad22dc9294
|
akshare/stock/stock_industry_cninfo.py
|
stock_industry_change_cninfo
|
(
symbol: str = "002594",
start_date: str = "20091227",
end_date: str = "20220713",
)
|
return data_df
|
巨潮资讯-上市公司行业归属的变动情况
http://webapi.cninfo.com.cn/#/apiDoc
查询 p_stock2110 接口
:param symbol: 股票代码
:type symbol: str
:param start_date: 开始变动日期
:type start_date: str
:param end_date: 结束变动日期
:type end_date: str
:return: 行业归属的变动情况
:rtype: pandas.DataFrame
|
巨潮资讯-上市公司行业归属的变动情况
http://webapi.cninfo.com.cn/#/apiDoc
查询 p_stock2110 接口
:param symbol: 股票代码
:type symbol: str
:param start_date: 开始变动日期
:type start_date: str
:param end_date: 结束变动日期
:type end_date: str
:return: 行业归属的变动情况
:rtype: pandas.DataFrame
| 118 | 183 |
def stock_industry_change_cninfo(
symbol: str = "002594",
start_date: str = "20091227",
end_date: str = "20220713",
) -> pd.DataFrame:
"""
巨潮资讯-上市公司行业归属的变动情况
http://webapi.cninfo.com.cn/#/apiDoc
查询 p_stock2110 接口
:param symbol: 股票代码
:type symbol: str
:param start_date: 开始变动日期
:type start_date: str
:param end_date: 结束变动日期
:type end_date: str
:return: 行业归属的变动情况
:rtype: pandas.DataFrame
"""
url = "http://webapi.cninfo.com.cn/api/stock/p_stock2110"
params = {
"scode": symbol,
"sdate": "-".join([start_date[:4], start_date[4:6], start_date[6:]]),
"edate": "-".join([end_date[:4], end_date[4:6], end_date[6:]]),
}
random_time_str = str(int(time.time()))
js_code = py_mini_racer.MiniRacer()
js_code.eval(js_str)
mcode = js_code.call("mcode", random_time_str)
headers = {
"Accept": "*/*",
"Accept-Encoding": "gzip, deflate",
"Accept-Language": "zh-CN,zh;q=0.9,en;q=0.8",
"Cache-Control": "no-cache",
"Content-Length": "0",
"Host": "webapi.cninfo.com.cn",
"mcode": mcode,
"Origin": "http://webapi.cninfo.com.cn",
"Pragma": "no-cache",
"Proxy-Connection": "keep-alive",
"Referer": "http://webapi.cninfo.com.cn/",
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/93.0.4577.63 Safari/537.36",
"X-Requested-With": "XMLHttpRequest",
}
r = requests.post(url, params=params, headers=headers)
data_json = r.json()
temp_df = pd.DataFrame(data_json["records"])
cols_map = {
"ORGNAME": "机构名称",
"SECCODE": "证券代码",
"SECNAME": "新证券简称",
"VARYDATE": "变更日期",
"F001V": "分类标准编码",
"F002V": "分类标准",
"F003V": "行业编码",
"F004V": "行业门类",
"F005V": "行业次类",
"F006V": "行业大类",
"F007V": "行业中类",
"F008C": "最新记录标识",
}
ignore_cols = ["最新记录标识"]
temp_df.rename(columns=cols_map, inplace=True)
temp_df.fillna(np.nan, inplace=True)
temp_df["变更日期"] = pd.to_datetime(temp_df["变更日期"]).dt.date
data_df = temp_df[[c for c in temp_df.columns if c not in ignore_cols]]
return data_df
|
https://github.com/akfamily/akshare/blob/087025d8d6f799b30ca114013e82c1ad22dc9294/project25/akshare/stock/stock_industry_cninfo.py#L118-L183
| 25 |
[
0
] | 1.515152 |
[
18,
19,
24,
25,
26,
27,
28,
43,
44,
45,
46,
60,
61,
62,
63,
64,
65
] | 25.757576 | false | 17.857143 | 66 | 2 | 74.242424 | 11 |
def stock_industry_change_cninfo(
symbol: str = "002594",
start_date: str = "20091227",
end_date: str = "20220713",
) -> pd.DataFrame:
url = "http://webapi.cninfo.com.cn/api/stock/p_stock2110"
params = {
"scode": symbol,
"sdate": "-".join([start_date[:4], start_date[4:6], start_date[6:]]),
"edate": "-".join([end_date[:4], end_date[4:6], end_date[6:]]),
}
random_time_str = str(int(time.time()))
js_code = py_mini_racer.MiniRacer()
js_code.eval(js_str)
mcode = js_code.call("mcode", random_time_str)
headers = {
"Accept": "*/*",
"Accept-Encoding": "gzip, deflate",
"Accept-Language": "zh-CN,zh;q=0.9,en;q=0.8",
"Cache-Control": "no-cache",
"Content-Length": "0",
"Host": "webapi.cninfo.com.cn",
"mcode": mcode,
"Origin": "http://webapi.cninfo.com.cn",
"Pragma": "no-cache",
"Proxy-Connection": "keep-alive",
"Referer": "http://webapi.cninfo.com.cn/",
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/93.0.4577.63 Safari/537.36",
"X-Requested-With": "XMLHttpRequest",
}
r = requests.post(url, params=params, headers=headers)
data_json = r.json()
temp_df = pd.DataFrame(data_json["records"])
cols_map = {
"ORGNAME": "机构名称",
"SECCODE": "证券代码",
"SECNAME": "新证券简称",
"VARYDATE": "变更日期",
"F001V": "分类标准编码",
"F002V": "分类标准",
"F003V": "行业编码",
"F004V": "行业门类",
"F005V": "行业次类",
"F006V": "行业大类",
"F007V": "行业中类",
"F008C": "最新记录标识",
}
ignore_cols = ["最新记录标识"]
temp_df.rename(columns=cols_map, inplace=True)
temp_df.fillna(np.nan, inplace=True)
temp_df["变更日期"] = pd.to_datetime(temp_df["变更日期"]).dt.date
data_df = temp_df[[c for c in temp_df.columns if c not in ignore_cols]]
return data_df
| 18,764 |
akfamily/akshare
|
087025d8d6f799b30ca114013e82c1ad22dc9294
|
akshare/stock/stock_new_cninfo.py
|
stock_new_gh_cninfo
|
()
|
return temp_df
|
巨潮资讯-数据中心-新股数据-新股过会
http://webapi.cninfo.com.cn/#/xinguList
:return: 新股过会
:rtype: pandas.DataFrame
|
巨潮资讯-数据中心-新股数据-新股过会
http://webapi.cninfo.com.cn/#/xinguList
:return: 新股过会
:rtype: pandas.DataFrame
| 45 | 85 |
def stock_new_gh_cninfo() -> pd.DataFrame:
"""
巨潮资讯-数据中心-新股数据-新股过会
http://webapi.cninfo.com.cn/#/xinguList
:return: 新股过会
:rtype: pandas.DataFrame
"""
url = "http://webapi.cninfo.com.cn/api/sysapi/p_sysapi1098"
random_time_str = str(int(time.time()))
js_code = py_mini_racer.MiniRacer()
js_code.eval(js_str)
mcode = js_code.call("mcode", random_time_str)
headers = {
"Accept": "*/*",
"Accept-Encoding": "gzip, deflate",
"Accept-Language": "zh-CN,zh;q=0.9,en;q=0.8",
"Cache-Control": "no-cache",
"Content-Length": "0",
"Host": "webapi.cninfo.com.cn",
"mcode": mcode,
"Origin": "http://webapi.cninfo.com.cn",
"Pragma": "no-cache",
"Proxy-Connection": "keep-alive",
"Referer": "http://webapi.cninfo.com.cn/",
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/93.0.4577.63 Safari/537.36",
"X-Requested-With": "XMLHttpRequest",
}
r = requests.post(url, headers=headers)
data_json = r.json()
temp_df = pd.DataFrame(data_json["records"])
temp_df.columns = [
"公司名称",
"上会日期",
"审核类型",
"审议内容",
"审核结果",
"审核公告日",
]
temp_df["上会日期"] = pd.to_datetime(temp_df["上会日期"]).dt.date
temp_df["审核公告日"] = pd.to_datetime(temp_df["审核公告日"]).dt.date
return temp_df
|
https://github.com/akfamily/akshare/blob/087025d8d6f799b30ca114013e82c1ad22dc9294/project25/akshare/stock/stock_new_cninfo.py#L45-L85
| 25 |
[
0,
1,
2,
3,
4,
5,
6
] | 17.073171 |
[
7,
8,
9,
10,
11,
12,
27,
28,
29,
30,
38,
39,
40
] | 31.707317 | false | 18 | 41 | 1 | 68.292683 | 4 |
def stock_new_gh_cninfo() -> pd.DataFrame:
url = "http://webapi.cninfo.com.cn/api/sysapi/p_sysapi1098"
random_time_str = str(int(time.time()))
js_code = py_mini_racer.MiniRacer()
js_code.eval(js_str)
mcode = js_code.call("mcode", random_time_str)
headers = {
"Accept": "*/*",
"Accept-Encoding": "gzip, deflate",
"Accept-Language": "zh-CN,zh;q=0.9,en;q=0.8",
"Cache-Control": "no-cache",
"Content-Length": "0",
"Host": "webapi.cninfo.com.cn",
"mcode": mcode,
"Origin": "http://webapi.cninfo.com.cn",
"Pragma": "no-cache",
"Proxy-Connection": "keep-alive",
"Referer": "http://webapi.cninfo.com.cn/",
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/93.0.4577.63 Safari/537.36",
"X-Requested-With": "XMLHttpRequest",
}
r = requests.post(url, headers=headers)
data_json = r.json()
temp_df = pd.DataFrame(data_json["records"])
temp_df.columns = [
"公司名称",
"上会日期",
"审核类型",
"审议内容",
"审核结果",
"审核公告日",
]
temp_df["上会日期"] = pd.to_datetime(temp_df["上会日期"]).dt.date
temp_df["审核公告日"] = pd.to_datetime(temp_df["审核公告日"]).dt.date
return temp_df
| 18,765 |
akfamily/akshare
|
087025d8d6f799b30ca114013e82c1ad22dc9294
|
akshare/stock/stock_new_cninfo.py
|
stock_new_ipo_cninfo
|
()
|
return temp_df
|
巨潮资讯-数据中心-新股数据-新股发行
http://webapi.cninfo.com.cn/#/xinguList
:return: 新股发行
:rtype: pandas.DataFrame
|
巨潮资讯-数据中心-新股数据-新股发行
http://webapi.cninfo.com.cn/#/xinguList
:return: 新股发行
:rtype: pandas.DataFrame
| 88 | 165 |
def stock_new_ipo_cninfo() -> pd.DataFrame:
"""
巨潮资讯-数据中心-新股数据-新股发行
http://webapi.cninfo.com.cn/#/xinguList
:return: 新股发行
:rtype: pandas.DataFrame
"""
url = "http://webapi.cninfo.com.cn/api/sysapi/p_sysapi1097"
random_time_str = str(int(time.time()))
js_code = py_mini_racer.MiniRacer()
js_code.eval(js_str)
mcode = js_code.call("mcode", random_time_str)
headers = {
"Accept": "*/*",
"Accept-Encoding": "gzip, deflate",
"Accept-Language": "zh-CN,zh;q=0.9,en;q=0.8",
"Cache-Control": "no-cache",
"Content-Length": "0",
"Host": "webapi.cninfo.com.cn",
"mcode": mcode,
"Origin": "http://webapi.cninfo.com.cn",
"Pragma": "no-cache",
"Proxy-Connection": "keep-alive",
"Referer": "http://webapi.cninfo.com.cn/",
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/93.0.4577.63 Safari/537.36",
"X-Requested-With": "XMLHttpRequest",
}
params = {
"timetype": "36",
"market": "ALL",
}
r = requests.post(url, headers=headers, params=params)
data_json = r.json()
temp_df = pd.DataFrame(data_json["records"])
temp_df.columns = [
"摇号结果公告日",
"中签公告日",
"证券简称",
"上市日期",
"中签缴款日",
"申购日期",
"发行价",
"证劵代码",
"上网发行中签率",
"总发行数量",
"发行市盈率",
"上网发行数量",
"网上申购上限",
]
temp_df = temp_df[
[
"证劵代码",
"证券简称",
"上市日期",
"申购日期",
"发行价",
"总发行数量",
"发行市盈率",
"上网发行中签率",
"摇号结果公告日",
"中签公告日",
"中签缴款日",
"网上申购上限",
"上网发行数量",
]
]
temp_df["摇号结果公告日"] = pd.to_datetime(temp_df["摇号结果公告日"]).dt.date
temp_df["中签公告日"] = pd.to_datetime(temp_df["中签公告日"]).dt.date
temp_df["上市日期"] = pd.to_datetime(temp_df["上市日期"]).dt.date
temp_df["中签缴款日"] = pd.to_datetime(temp_df["中签缴款日"]).dt.date
temp_df["申购日期"] = pd.to_datetime(temp_df["申购日期"]).dt.date
temp_df["发行价"] = pd.to_numeric(temp_df["发行价"])
temp_df["上网发行中签率"] = pd.to_numeric(temp_df["上网发行中签率"])
temp_df["总发行数量"] = pd.to_numeric(temp_df["总发行数量"])
temp_df["发行市盈率"] = pd.to_numeric(temp_df["发行市盈率"])
temp_df["上网发行数量"] = pd.to_numeric(temp_df["上网发行数量"])
temp_df["网上申购上限"] = pd.to_numeric(temp_df["网上申购上限"])
return temp_df
|
https://github.com/akfamily/akshare/blob/087025d8d6f799b30ca114013e82c1ad22dc9294/project25/akshare/stock/stock_new_cninfo.py#L88-L165
| 25 |
[
0,
1,
2,
3,
4,
5,
6
] | 8.974359 |
[
7,
8,
9,
10,
11,
12,
27,
31,
32,
33,
34,
49,
66,
67,
68,
69,
70,
71,
72,
73,
74,
75,
76,
77
] | 30.769231 | false | 18 | 78 | 1 | 69.230769 | 4 |
def stock_new_ipo_cninfo() -> pd.DataFrame:
url = "http://webapi.cninfo.com.cn/api/sysapi/p_sysapi1097"
random_time_str = str(int(time.time()))
js_code = py_mini_racer.MiniRacer()
js_code.eval(js_str)
mcode = js_code.call("mcode", random_time_str)
headers = {
"Accept": "*/*",
"Accept-Encoding": "gzip, deflate",
"Accept-Language": "zh-CN,zh;q=0.9,en;q=0.8",
"Cache-Control": "no-cache",
"Content-Length": "0",
"Host": "webapi.cninfo.com.cn",
"mcode": mcode,
"Origin": "http://webapi.cninfo.com.cn",
"Pragma": "no-cache",
"Proxy-Connection": "keep-alive",
"Referer": "http://webapi.cninfo.com.cn/",
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/93.0.4577.63 Safari/537.36",
"X-Requested-With": "XMLHttpRequest",
}
params = {
"timetype": "36",
"market": "ALL",
}
r = requests.post(url, headers=headers, params=params)
data_json = r.json()
temp_df = pd.DataFrame(data_json["records"])
temp_df.columns = [
"摇号结果公告日",
"中签公告日",
"证券简称",
"上市日期",
"中签缴款日",
"申购日期",
"发行价",
"证劵代码",
"上网发行中签率",
"总发行数量",
"发行市盈率",
"上网发行数量",
"网上申购上限",
]
temp_df = temp_df[
[
"证劵代码",
"证券简称",
"上市日期",
"申购日期",
"发行价",
"总发行数量",
"发行市盈率",
"上网发行中签率",
"摇号结果公告日",
"中签公告日",
"中签缴款日",
"网上申购上限",
"上网发行数量",
]
]
temp_df["摇号结果公告日"] = pd.to_datetime(temp_df["摇号结果公告日"]).dt.date
temp_df["中签公告日"] = pd.to_datetime(temp_df["中签公告日"]).dt.date
temp_df["上市日期"] = pd.to_datetime(temp_df["上市日期"]).dt.date
temp_df["中签缴款日"] = pd.to_datetime(temp_df["中签缴款日"]).dt.date
temp_df["申购日期"] = pd.to_datetime(temp_df["申购日期"]).dt.date
temp_df["发行价"] = pd.to_numeric(temp_df["发行价"])
temp_df["上网发行中签率"] = pd.to_numeric(temp_df["上网发行中签率"])
temp_df["总发行数量"] = pd.to_numeric(temp_df["总发行数量"])
temp_df["发行市盈率"] = pd.to_numeric(temp_df["发行市盈率"])
temp_df["上网发行数量"] = pd.to_numeric(temp_df["上网发行数量"])
temp_df["网上申购上限"] = pd.to_numeric(temp_df["网上申购上限"])
return temp_df
| 18,766 |
akfamily/akshare
|
087025d8d6f799b30ca114013e82c1ad22dc9294
|
akshare/stock/stock_us_pink.py
|
stock_us_pink_spot_em
|
()
|
return temp_df
|
东方财富网-行情中心-美股市场-粉单市场
http://quote.eastmoney.com/center/gridlist.html#us_pinksheet
:return: 粉单市场实时行情
:rtype: pandas.DataFrame
|
东方财富网-行情中心-美股市场-粉单市场
http://quote.eastmoney.com/center/gridlist.html#us_pinksheet
:return: 粉单市场实时行情
:rtype: pandas.DataFrame
| 12 | 100 |
def stock_us_pink_spot_em() -> pd.DataFrame:
"""
东方财富网-行情中心-美股市场-粉单市场
http://quote.eastmoney.com/center/gridlist.html#us_pinksheet
:return: 粉单市场实时行情
:rtype: pandas.DataFrame
"""
url = "http://23.push2.eastmoney.com/api/qt/clist/get"
params = {
"pn": "1",
"pz": "2000",
"po": "1",
"np": "1",
"ut": "bd1d9ddb04089700cf9c27f6f7426281",
"fltt": "2",
"invt": "2",
"fid": "f3",
"fs": "m:153",
"fields": "f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f12,f13,f14,f15,f16,f17,f18,f20,f21,f23,f24,f25,f26,f22,f33,f11,f62,f128,f136,f115,f152",
"_": "1631271634231",
}
r = requests.get(url, params=params)
data_json = r.json()
temp_df = pd.DataFrame(data_json["data"]["diff"])
temp_df.columns = [
"_",
"最新价",
"涨跌幅",
"涨跌额",
"_",
"_",
"_",
"_",
"_",
"_",
"_",
"简称",
"编码",
"名称",
"最高价",
"最低价",
"开盘价",
"昨收价",
"总市值",
"_",
"_",
"_",
"_",
"_",
"_",
"_",
"_",
"市盈率",
"_",
"_",
"_",
"_",
"_",
]
temp_df.reset_index(inplace=True)
temp_df["index"] = range(1, len(temp_df) + 1)
temp_df.rename(columns={"index": "序号"}, inplace=True)
temp_df["代码"] = temp_df["编码"].astype(str) + "." + temp_df["简称"]
temp_df = temp_df[
[
"序号",
"名称",
"最新价",
"涨跌额",
"涨跌幅",
"开盘价",
"最高价",
"最低价",
"昨收价",
"总市值",
"市盈率",
"代码",
]
]
temp_df["最新价"] = pd.to_numeric(temp_df["最新价"], errors="coerce")
temp_df["涨跌额"] = pd.to_numeric(temp_df["涨跌额"], errors="coerce")
temp_df["涨跌幅"] = pd.to_numeric(temp_df["涨跌幅"], errors="coerce")
temp_df["开盘价"] = pd.to_numeric(temp_df["开盘价"], errors="coerce")
temp_df["最高价"] = pd.to_numeric(temp_df["最高价"], errors="coerce")
temp_df["最低价"] = pd.to_numeric(temp_df["最低价"], errors="coerce")
temp_df["昨收价"] = pd.to_numeric(temp_df["昨收价"], errors="coerce")
temp_df["总市值"] = pd.to_numeric(temp_df["总市值"], errors="coerce")
temp_df["市盈率"] = pd.to_numeric(temp_df["市盈率"], errors="coerce")
return temp_df
|
https://github.com/akfamily/akshare/blob/087025d8d6f799b30ca114013e82c1ad22dc9294/project25/akshare/stock/stock_us_pink.py#L12-L100
| 25 |
[
0,
1,
2,
3,
4,
5,
6
] | 7.865169 |
[
7,
8,
21,
22,
23,
24,
59,
60,
61,
62,
63,
79,
80,
81,
82,
83,
84,
85,
86,
87,
88
] | 23.595506 | false | 17.857143 | 89 | 1 | 76.404494 | 4 |
def stock_us_pink_spot_em() -> pd.DataFrame:
url = "http://23.push2.eastmoney.com/api/qt/clist/get"
params = {
"pn": "1",
"pz": "2000",
"po": "1",
"np": "1",
"ut": "bd1d9ddb04089700cf9c27f6f7426281",
"fltt": "2",
"invt": "2",
"fid": "f3",
"fs": "m:153",
"fields": "f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f12,f13,f14,f15,f16,f17,f18,f20,f21,f23,f24,f25,f26,f22,f33,f11,f62,f128,f136,f115,f152",
"_": "1631271634231",
}
r = requests.get(url, params=params)
data_json = r.json()
temp_df = pd.DataFrame(data_json["data"]["diff"])
temp_df.columns = [
"_",
"最新价",
"涨跌幅",
"涨跌额",
"_",
"_",
"_",
"_",
"_",
"_",
"_",
"简称",
"编码",
"名称",
"最高价",
"最低价",
"开盘价",
"昨收价",
"总市值",
"_",
"_",
"_",
"_",
"_",
"_",
"_",
"_",
"市盈率",
"_",
"_",
"_",
"_",
"_",
]
temp_df.reset_index(inplace=True)
temp_df["index"] = range(1, len(temp_df) + 1)
temp_df.rename(columns={"index": "序号"}, inplace=True)
temp_df["代码"] = temp_df["编码"].astype(str) + "." + temp_df["简称"]
temp_df = temp_df[
[
"序号",
"名称",
"最新价",
"涨跌额",
"涨跌幅",
"开盘价",
"最高价",
"最低价",
"昨收价",
"总市值",
"市盈率",
"代码",
]
]
temp_df["最新价"] = pd.to_numeric(temp_df["最新价"], errors="coerce")
temp_df["涨跌额"] = pd.to_numeric(temp_df["涨跌额"], errors="coerce")
temp_df["涨跌幅"] = pd.to_numeric(temp_df["涨跌幅"], errors="coerce")
temp_df["开盘价"] = pd.to_numeric(temp_df["开盘价"], errors="coerce")
temp_df["最高价"] = pd.to_numeric(temp_df["最高价"], errors="coerce")
temp_df["最低价"] = pd.to_numeric(temp_df["最低价"], errors="coerce")
temp_df["昨收价"] = pd.to_numeric(temp_df["昨收价"], errors="coerce")
temp_df["总市值"] = pd.to_numeric(temp_df["总市值"], errors="coerce")
temp_df["市盈率"] = pd.to_numeric(temp_df["市盈率"], errors="coerce")
return temp_df
| 18,767 |
akfamily/akshare
|
087025d8d6f799b30ca114013e82c1ad22dc9294
|
akshare/stock/stock_cg_equity_mortgage.py
|
stock_cg_equity_mortgage_cninfo
|
(date: str = "20210930")
|
return temp_df
|
巨潮资讯-数据中心-专题统计-公司治理-股权质押
http://webapi.cninfo.com.cn/#/thematicStatistics
:param date: 开始统计时间
:type date: str
:return: 股权质押
:rtype: pandas.DataFrame
|
巨潮资讯-数据中心-专题统计-公司治理-股权质押
http://webapi.cninfo.com.cn/#/thematicStatistics
:param date: 开始统计时间
:type date: str
:return: 股权质押
:rtype: pandas.DataFrame
| 45 | 111 |
def stock_cg_equity_mortgage_cninfo(date: str = "20210930") -> pd.DataFrame:
"""
巨潮资讯-数据中心-专题统计-公司治理-股权质押
http://webapi.cninfo.com.cn/#/thematicStatistics
:param date: 开始统计时间
:type date: str
:return: 股权质押
:rtype: pandas.DataFrame
"""
url = "http://webapi.cninfo.com.cn/api/sysapi/p_sysapi1094"
random_time_str = str(int(time.time()))
js_code = py_mini_racer.MiniRacer()
js_code.eval(js_str)
mcode = js_code.call("mcode", random_time_str)
headers = {
"Accept": "*/*",
"Accept-Encoding": "gzip, deflate",
"Accept-Language": "zh-CN,zh;q=0.9,en;q=0.8",
"Cache-Control": "no-cache",
"Content-Length": "0",
"Host": "webapi.cninfo.com.cn",
"mcode": mcode,
"Origin": "http://webapi.cninfo.com.cn",
"Pragma": "no-cache",
"Proxy-Connection": "keep-alive",
"Referer": "http://webapi.cninfo.com.cn/",
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/93.0.4577.63 Safari/537.36",
"X-Requested-With": "XMLHttpRequest",
}
params = {
"tdate": "-".join([date[:4], date[4:6], date[6:]]),
}
r = requests.post(url, headers=headers, params=params)
data_json = r.json()
temp_df = pd.DataFrame(data_json["records"])
temp_df.columns = [
"质押解除数量",
"股票简称",
"公告日期",
"质押事项",
"质权人",
"出质人",
"股票代码",
"占总股本比例",
"累计质押占总股本比例",
"质押数量",
]
temp_df = temp_df[
[
"股票代码",
"股票简称",
"公告日期",
"出质人",
"质权人",
"质押数量",
"占总股本比例",
"质押解除数量",
"质押事项",
"累计质押占总股本比例",
]
]
temp_df["公告日期"] = pd.to_datetime(temp_df["公告日期"]).dt.date
temp_df["质押数量"] = pd.to_numeric(temp_df["质押数量"])
temp_df["占总股本比例"] = pd.to_numeric(temp_df["占总股本比例"])
temp_df["质押解除数量"] = pd.to_numeric(temp_df["质押解除数量"])
temp_df["累计质押占总股本比例"] = pd.to_numeric(temp_df["累计质押占总股本比例"])
return temp_df
|
https://github.com/akfamily/akshare/blob/087025d8d6f799b30ca114013e82c1ad22dc9294/project25/akshare/stock/stock_cg_equity_mortgage.py#L45-L111
| 25 |
[
0,
1,
2,
3,
4,
5,
6,
7,
8
] | 13.432836 |
[
9,
10,
11,
12,
13,
14,
29,
32,
33,
34,
35,
47,
61,
62,
63,
64,
65,
66
] | 26.865672 | false | 28.571429 | 67 | 1 | 73.134328 | 6 |
def stock_cg_equity_mortgage_cninfo(date: str = "20210930") -> pd.DataFrame:
url = "http://webapi.cninfo.com.cn/api/sysapi/p_sysapi1094"
random_time_str = str(int(time.time()))
js_code = py_mini_racer.MiniRacer()
js_code.eval(js_str)
mcode = js_code.call("mcode", random_time_str)
headers = {
"Accept": "*/*",
"Accept-Encoding": "gzip, deflate",
"Accept-Language": "zh-CN,zh;q=0.9,en;q=0.8",
"Cache-Control": "no-cache",
"Content-Length": "0",
"Host": "webapi.cninfo.com.cn",
"mcode": mcode,
"Origin": "http://webapi.cninfo.com.cn",
"Pragma": "no-cache",
"Proxy-Connection": "keep-alive",
"Referer": "http://webapi.cninfo.com.cn/",
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/93.0.4577.63 Safari/537.36",
"X-Requested-With": "XMLHttpRequest",
}
params = {
"tdate": "-".join([date[:4], date[4:6], date[6:]]),
}
r = requests.post(url, headers=headers, params=params)
data_json = r.json()
temp_df = pd.DataFrame(data_json["records"])
temp_df.columns = [
"质押解除数量",
"股票简称",
"公告日期",
"质押事项",
"质权人",
"出质人",
"股票代码",
"占总股本比例",
"累计质押占总股本比例",
"质押数量",
]
temp_df = temp_df[
[
"股票代码",
"股票简称",
"公告日期",
"出质人",
"质权人",
"质押数量",
"占总股本比例",
"质押解除数量",
"质押事项",
"累计质押占总股本比例",
]
]
temp_df["公告日期"] = pd.to_datetime(temp_df["公告日期"]).dt.date
temp_df["质押数量"] = pd.to_numeric(temp_df["质押数量"])
temp_df["占总股本比例"] = pd.to_numeric(temp_df["占总股本比例"])
temp_df["质押解除数量"] = pd.to_numeric(temp_df["质押解除数量"])
temp_df["累计质押占总股本比例"] = pd.to_numeric(temp_df["累计质押占总股本比例"])
return temp_df
| 18,768 |
akfamily/akshare
|
087025d8d6f799b30ca114013e82c1ad22dc9294
|
akshare/stock/stock_industry.py
|
stock_sector_spot
|
(indicator: str = "新浪行业") -> pd.D
|
return temp_df
|
新浪行业-板块行情
http://finance.sina.com.cn/stock/sl/
:param indicator: choice of {"新浪行业", "启明星行业", "概念", "地域", "行业"}
:type indicator: str
:return: 指定 indicator 的数据
:rtype: pandas.DataFrame
|
新浪行业-板块行情
http://finance.sina.com.cn/stock/sl/
:param indicator: choice of {"新浪行业", "启明星行业", "概念", "地域", "行业"}
:type indicator: str
:return: 指定 indicator 的数据
:rtype: pandas.DataFrame
| 18 | 79 |
def stock_sector_spot(indicator: str = "新浪行业") -> pd.DataFrame:
"""
新浪行业-板块行情
http://finance.sina.com.cn/stock/sl/
:param indicator: choice of {"新浪行业", "启明星行业", "概念", "地域", "行业"}
:type indicator: str
:return: 指定 indicator 的数据
:rtype: pandas.DataFrame
"""
if indicator == "新浪行业":
url = "http://vip.stock.finance.sina.com.cn/q/view/newSinaHy.php"
r = requests.get(url)
if indicator == "启明星行业":
url = "http://biz.finance.sina.com.cn/hq/qmxIndustryHq.php"
r = requests.get(url)
r.encoding = "gb2312"
if indicator == "概念":
url = "http://money.finance.sina.com.cn/q/view/newFLJK.php"
params = {
"param": "class"
}
r = requests.get(url, params=params)
if indicator == "地域":
url = "http://money.finance.sina.com.cn/q/view/newFLJK.php"
params = {
"param": "area"
}
r = requests.get(url, params=params)
if indicator == "行业":
url = "http://money.finance.sina.com.cn/q/view/newFLJK.php"
params = {
"param": "industry"
}
r = requests.get(url, params=params)
text_data = r.text
json_data = json.loads(text_data[text_data.find("{"):])
temp_df = pd.DataFrame([value.split(",") for key, value in json_data.items()])
temp_df.columns = [
"label",
"板块",
"公司家数",
"平均价格",
"涨跌额",
"涨跌幅",
"总成交量",
"总成交额",
"股票代码",
"个股-涨跌幅",
"个股-当前价",
"个股-涨跌额",
"股票名称",
]
temp_df['公司家数'] = pd.to_numeric(temp_df['公司家数'])
temp_df['平均价格'] = pd.to_numeric(temp_df['平均价格'])
temp_df['涨跌额'] = pd.to_numeric(temp_df['涨跌额'])
temp_df['涨跌幅'] = pd.to_numeric(temp_df['涨跌幅'])
temp_df['总成交量'] = pd.to_numeric(temp_df['总成交量'])
temp_df['总成交额'] = pd.to_numeric(temp_df['总成交额'])
temp_df['个股-涨跌幅'] = pd.to_numeric(temp_df['个股-涨跌幅'])
temp_df['个股-当前价'] = pd.to_numeric(temp_df['个股-当前价'])
temp_df['个股-涨跌额'] = pd.to_numeric(temp_df['个股-涨跌额'])
return temp_df
|
https://github.com/akfamily/akshare/blob/087025d8d6f799b30ca114013e82c1ad22dc9294/project25/akshare/stock/stock_industry.py#L18-L79
| 25 |
[
0,
1,
2,
3,
4,
5,
6,
7,
8
] | 14.516129 |
[
9,
10,
11,
12,
13,
14,
15,
16,
17,
18,
21,
22,
23,
24,
27,
28,
29,
30,
33,
34,
35,
36,
37,
52,
53,
54,
55,
56,
57,
58,
59,
60,
61
] | 53.225806 | false | 11.627907 | 62 | 7 | 46.774194 | 6 |
def stock_sector_spot(indicator: str = "新浪行业") -> pd.DataFrame:
if indicator == "新浪行业":
url = "http://vip.stock.finance.sina.com.cn/q/view/newSinaHy.php"
r = requests.get(url)
if indicator == "启明星行业":
url = "http://biz.finance.sina.com.cn/hq/qmxIndustryHq.php"
r = requests.get(url)
r.encoding = "gb2312"
if indicator == "概念":
url = "http://money.finance.sina.com.cn/q/view/newFLJK.php"
params = {
"param": "class"
}
r = requests.get(url, params=params)
if indicator == "地域":
url = "http://money.finance.sina.com.cn/q/view/newFLJK.php"
params = {
"param": "area"
}
r = requests.get(url, params=params)
if indicator == "行业":
url = "http://money.finance.sina.com.cn/q/view/newFLJK.php"
params = {
"param": "industry"
}
r = requests.get(url, params=params)
text_data = r.text
json_data = json.loads(text_data[text_data.find("{"):])
temp_df = pd.DataFrame([value.split(",") for key, value in json_data.items()])
temp_df.columns = [
"label",
"板块",
"公司家数",
"平均价格",
"涨跌额",
"涨跌幅",
"总成交量",
"总成交额",
"股票代码",
"个股-涨跌幅",
"个股-当前价",
"个股-涨跌额",
"股票名称",
]
temp_df['公司家数'] = pd.to_numeric(temp_df['公司家数'])
temp_df['平均价格'] = pd.to_numeric(temp_df['平均价格'])
temp_df['涨跌额'] = pd.to_numeric(temp_df['涨跌额'])
temp_df['涨跌幅'] = pd.to_numeric(temp_df['涨跌幅'])
temp_df['总成交量'] = pd.to_numeric(temp_df['总成交量'])
temp_df['总成交额'] = pd.to_numeric(temp_df['总成交额'])
temp_df['个股-涨跌幅'] = pd.to_numeric(temp_df['个股-涨跌幅'])
temp_df['个股-当前价'] = pd.to_numeric(temp_df['个股-当前价'])
temp_df['个股-涨跌额'] = pd.to_numeric(temp_df['个股-涨跌额'])
return temp_df
| 18,769 |
akfamily/akshare
|
087025d8d6f799b30ca114013e82c1ad22dc9294
|
akshare/stock/stock_industry.py
|
stock_sector_detail
|
(sector: str = "gn_gfgn")
|
return big_df
|
新浪行业-板块行情-成份详情
http://finance.sina.com.cn/stock/sl/#area_1
:param sector: stock_sector_spot 返回的 label 值, choice of {"新浪行业", "概念", "地域", "行业"}; "启明星行业" 无详情
:type sector: str
:return: 指定 sector 的板块详情
:rtype: pandas.DataFrame
|
新浪行业-板块行情-成份详情
http://finance.sina.com.cn/stock/sl/#area_1
:param sector: stock_sector_spot 返回的 label 值, choice of {"新浪行业", "概念", "地域", "行业"}; "启明星行业" 无详情
:type sector: str
:return: 指定 sector 的板块详情
:rtype: pandas.DataFrame
| 82 | 131 |
def stock_sector_detail(sector: str = "gn_gfgn") -> pd.DataFrame:
"""
新浪行业-板块行情-成份详情
http://finance.sina.com.cn/stock/sl/#area_1
:param sector: stock_sector_spot 返回的 label 值, choice of {"新浪行业", "概念", "地域", "行业"}; "启明星行业" 无详情
:type sector: str
:return: 指定 sector 的板块详情
:rtype: pandas.DataFrame
"""
url = "http://vip.stock.finance.sina.com.cn/quotes_service/api/json_v2.php/Market_Center.getHQNodeStockCount"
params = {
"node": sector
}
r = requests.get(url, params=params)
total_num = int(r.json())
total_page_num = math.ceil(int(total_num) / 80)
big_df = pd.DataFrame()
url = "http://vip.stock.finance.sina.com.cn/quotes_service/api/json_v2.php/Market_Center.getHQNodeData"
for page in tqdm(range(1, total_page_num+1), leave=True):
params = {
"page": str(page),
"num": "80",
"sort": "symbol",
"asc": "1",
"node": sector,
"symbol": "",
"_s_r_a": "page",
}
r = requests.get(url, params=params)
data_text = r.text
data_json = demjson.decode(data_text)
temp_df = pd.DataFrame(data_json)
big_df = big_df.append(temp_df, ignore_index=True)
big_df['trade'] = pd.to_numeric(big_df['trade'])
big_df['pricechange'] = pd.to_numeric(big_df['pricechange'])
big_df['changepercent'] = pd.to_numeric(big_df['changepercent'])
big_df['buy'] = pd.to_numeric(big_df['buy'])
big_df['sell'] = pd.to_numeric(big_df['sell'])
big_df['settlement'] = pd.to_numeric(big_df['settlement'])
big_df['open'] = pd.to_numeric(big_df['open'])
big_df['high'] = pd.to_numeric(big_df['high'])
big_df['low'] = pd.to_numeric(big_df['low'])
big_df['volume'] = pd.to_numeric(big_df['volume'])
big_df['amount'] = pd.to_numeric(big_df['amount'])
big_df['per'] = pd.to_numeric(big_df['per'])
big_df['pb'] = pd.to_numeric(big_df['pb'])
big_df['mktcap'] = pd.to_numeric(big_df['mktcap'])
big_df['nmc'] = pd.to_numeric(big_df['nmc'])
big_df['turnoverratio'] = pd.to_numeric(big_df['turnoverratio'])
return big_df
|
https://github.com/akfamily/akshare/blob/087025d8d6f799b30ca114013e82c1ad22dc9294/project25/akshare/stock/stock_industry.py#L82-L131
| 25 |
[
0,
1,
2,
3,
4,
5,
6,
7,
8
] | 18 |
[
9,
10,
13,
14,
15,
16,
17,
18,
19,
28,
29,
30,
31,
32,
33,
34,
35,
36,
37,
38,
39,
40,
41,
42,
43,
44,
45,
46,
47,
48,
49
] | 62 | false | 11.627907 | 50 | 2 | 38 | 6 |
def stock_sector_detail(sector: str = "gn_gfgn") -> pd.DataFrame:
url = "http://vip.stock.finance.sina.com.cn/quotes_service/api/json_v2.php/Market_Center.getHQNodeStockCount"
params = {
"node": sector
}
r = requests.get(url, params=params)
total_num = int(r.json())
total_page_num = math.ceil(int(total_num) / 80)
big_df = pd.DataFrame()
url = "http://vip.stock.finance.sina.com.cn/quotes_service/api/json_v2.php/Market_Center.getHQNodeData"
for page in tqdm(range(1, total_page_num+1), leave=True):
params = {
"page": str(page),
"num": "80",
"sort": "symbol",
"asc": "1",
"node": sector,
"symbol": "",
"_s_r_a": "page",
}
r = requests.get(url, params=params)
data_text = r.text
data_json = demjson.decode(data_text)
temp_df = pd.DataFrame(data_json)
big_df = big_df.append(temp_df, ignore_index=True)
big_df['trade'] = pd.to_numeric(big_df['trade'])
big_df['pricechange'] = pd.to_numeric(big_df['pricechange'])
big_df['changepercent'] = pd.to_numeric(big_df['changepercent'])
big_df['buy'] = pd.to_numeric(big_df['buy'])
big_df['sell'] = pd.to_numeric(big_df['sell'])
big_df['settlement'] = pd.to_numeric(big_df['settlement'])
big_df['open'] = pd.to_numeric(big_df['open'])
big_df['high'] = pd.to_numeric(big_df['high'])
big_df['low'] = pd.to_numeric(big_df['low'])
big_df['volume'] = pd.to_numeric(big_df['volume'])
big_df['amount'] = pd.to_numeric(big_df['amount'])
big_df['per'] = pd.to_numeric(big_df['per'])
big_df['pb'] = pd.to_numeric(big_df['pb'])
big_df['mktcap'] = pd.to_numeric(big_df['mktcap'])
big_df['nmc'] = pd.to_numeric(big_df['nmc'])
big_df['turnoverratio'] = pd.to_numeric(big_df['turnoverratio'])
return big_df
| 18,770 |
akfamily/akshare
|
087025d8d6f799b30ca114013e82c1ad22dc9294
|
akshare/stock/stock_hk_sina.py
|
stock_hk_spot
|
()
|
return data_df
|
新浪财经-港股的所有港股的实时行情数据
http://vip.stock.finance.sina.com.cn/mkt/#qbgg_hk
:return: 实时行情数据
:rtype: pandas.DataFrame
|
新浪财经-港股的所有港股的实时行情数据
http://vip.stock.finance.sina.com.cn/mkt/#qbgg_hk
:return: 实时行情数据
:rtype: pandas.DataFrame
| 23 | 58 |
def stock_hk_spot() -> pd.DataFrame:
"""
新浪财经-港股的所有港股的实时行情数据
http://vip.stock.finance.sina.com.cn/mkt/#qbgg_hk
:return: 实时行情数据
:rtype: pandas.DataFrame
"""
res = requests.get(hk_sina_stock_list_url, params=hk_sina_stock_dict_payload)
data_json = [
demjson.decode(tt)
for tt in [
item + "}" for item in res.text[1:-1].split("},") if not item.endswith("}")
]
]
data_df = pd.DataFrame(data_json)
data_df = data_df[
[
"symbol",
"name",
"engname",
"tradetype",
"lasttrade",
"prevclose",
"open",
"high",
"low",
"volume",
"amount",
"ticktime",
"buy",
"sell",
"pricechange",
"changepercent",
]
]
return data_df
|
https://github.com/akfamily/akshare/blob/087025d8d6f799b30ca114013e82c1ad22dc9294/project25/akshare/stock/stock_hk_sina.py#L23-L58
| 25 |
[
0,
1,
2,
3,
4,
5,
6
] | 19.444444 |
[
7,
8,
14,
15,
35
] | 13.888889 | false | 7.317073 | 36 | 3 | 86.111111 | 4 |
def stock_hk_spot() -> pd.DataFrame:
res = requests.get(hk_sina_stock_list_url, params=hk_sina_stock_dict_payload)
data_json = [
demjson.decode(tt)
for tt in [
item + "}" for item in res.text[1:-1].split("},") if not item.endswith("}")
]
]
data_df = pd.DataFrame(data_json)
data_df = data_df[
[
"symbol",
"name",
"engname",
"tradetype",
"lasttrade",
"prevclose",
"open",
"high",
"low",
"volume",
"amount",
"ticktime",
"buy",
"sell",
"pricechange",
"changepercent",
]
]
return data_df
| 18,771 |
akfamily/akshare
|
087025d8d6f799b30ca114013e82c1ad22dc9294
|
akshare/stock/stock_hk_sina.py
|
stock_hk_daily
|
(symbol: str = "00981", adjust: str = "")
|
新浪财经-港股-个股的历史行情数据
https://stock.finance.sina.com.cn/hkstock/quotes/02912.html
:param symbol: 可以使用 stock_hk_spot 获取
:type symbol: str
:param adjust: "": 返回未复权的数据 ; qfq: 返回前复权后的数据; qfq-factor: 返回前复权因子和调整;
:type adjust: str
:return: 指定 adjust 的数据
:rtype: pandas.DataFrame
|
新浪财经-港股-个股的历史行情数据
https://stock.finance.sina.com.cn/hkstock/quotes/02912.html
:param symbol: 可以使用 stock_hk_spot 获取
:type symbol: str
:param adjust: "": 返回未复权的数据 ; qfq: 返回前复权后的数据; qfq-factor: 返回前复权因子和调整;
:type adjust: str
:return: 指定 adjust 的数据
:rtype: pandas.DataFrame
| 61 | 203 |
def stock_hk_daily(symbol: str = "00981", adjust: str = "") -> pd.DataFrame:
"""
新浪财经-港股-个股的历史行情数据
https://stock.finance.sina.com.cn/hkstock/quotes/02912.html
:param symbol: 可以使用 stock_hk_spot 获取
:type symbol: str
:param adjust: "": 返回未复权的数据 ; qfq: 返回前复权后的数据; qfq-factor: 返回前复权因子和调整;
:type adjust: str
:return: 指定 adjust 的数据
:rtype: pandas.DataFrame
"""
res = requests.get(hk_sina_stock_hist_url.format(symbol))
js_code = py_mini_racer.MiniRacer()
js_code.eval(hk_js_decode)
dict_list = js_code.call(
"d", res.text.split("=")[1].split(";")[0].replace('"', "")
) # 执行js解密代码
data_df = pd.DataFrame(dict_list)
data_df.index = pd.to_datetime(data_df["date"]).dt.date
del data_df["date"]
data_df = data_df.astype("float")
if adjust == "":
data_df.reset_index(inplace=True)
return data_df
if adjust == "hfq":
res = requests.get(hk_sina_stock_hist_hfq_url.format(symbol))
try:
hfq_factor_df = pd.DataFrame(
eval(res.text.split("=")[1].split("\n")[0])["data"]
)
if len(hfq_factor_df) == 1:
data_df.reset_index(inplace=True)
return data_df
except SyntaxError as e:
data_df.reset_index(inplace=True)
return data_df
hfq_factor_df.columns = ["date", "hfq_factor", "cash"]
hfq_factor_df.index = pd.to_datetime(hfq_factor_df.date)
del hfq_factor_df["date"]
# 处理复权因子
temp_date_range = pd.date_range(
"1900-01-01", hfq_factor_df.index[0].isoformat()
)
temp_df = pd.DataFrame(range(len(temp_date_range)), temp_date_range)
new_range = pd.merge(
temp_df, hfq_factor_df, left_index=True, right_index=True, how="outer"
)
new_range = new_range.fillna(method="ffill")
new_range = new_range.iloc[:, [1, 2]]
temp_df = pd.merge(
data_df, new_range, left_index=True, right_index=True, how="outer"
)
temp_df.fillna(method="ffill", inplace=True)
temp_df.drop_duplicates(
subset=["open", "high", "low", "close", "volume"], inplace=True
)
temp_df = temp_df.astype(float)
temp_df["open"] = temp_df["open"] * temp_df["hfq_factor"] + temp_df["cash"]
temp_df["high"] = temp_df["high"] * temp_df["hfq_factor"] + temp_df["cash"]
temp_df["close"] = temp_df["close"] * temp_df["hfq_factor"] + temp_df["cash"]
temp_df["low"] = temp_df["low"] * temp_df["hfq_factor"] + temp_df["cash"]
temp_df = temp_df.apply(lambda x: round(x, 4))
temp_df.dropna(how="any", inplace=True)
temp_df = temp_df.iloc[:, :-2]
temp_df.reset_index(inplace=True)
temp_df.rename({"index": "date"}, axis='columns', inplace=True)
temp_df['date'] = temp_df['date'].astype(str)
return temp_df
if adjust == "qfq":
res = requests.get(hk_sina_stock_hist_qfq_url.format(symbol))
try:
qfq_factor_df = pd.DataFrame(
eval(res.text.split("=")[1].split("\n")[0])["data"]
)
if len(qfq_factor_df) == 1:
data_df.reset_index(inplace=True)
return data_df
except SyntaxError as e:
data_df.reset_index(inplace=True)
return data_df
qfq_factor_df.columns = ["date", "qfq_factor"]
qfq_factor_df.index = pd.to_datetime(qfq_factor_df.date)
del qfq_factor_df["date"]
temp_date_range = pd.date_range(
"1900-01-01", qfq_factor_df.index[0].isoformat()
)
temp_df = pd.DataFrame(range(len(temp_date_range)), temp_date_range)
new_range = pd.merge(
temp_df, qfq_factor_df, left_index=True, right_index=True, how="outer"
)
new_range = new_range.fillna(method="ffill")
new_range = new_range.iloc[:, [1]]
temp_df = pd.merge(
data_df, new_range, left_index=True, right_index=True, how="outer"
)
temp_df.fillna(method="ffill", inplace=True)
temp_df.drop_duplicates(
subset=["open", "high", "low", "close", "volume"], inplace=True
)
temp_df = temp_df.astype(float)
temp_df["open"] = temp_df["open"] * temp_df["qfq_factor"]
temp_df["high"] = temp_df["high"] * temp_df["qfq_factor"]
temp_df["close"] = temp_df["close"] * temp_df["qfq_factor"]
temp_df["low"] = temp_df["low"] * temp_df["qfq_factor"]
temp_df = temp_df.apply(lambda x: round(x, 4))
temp_df.dropna(how="any", inplace=True)
temp_df = temp_df.iloc[:, :-1]
temp_df.reset_index(inplace=True)
temp_df.rename({"index": "date"}, axis='columns', inplace=True)
temp_df['date'] = temp_df['date'].astype(str)
return temp_df
if adjust == "hfq-factor":
res = requests.get(hk_sina_stock_hist_hfq_url.format(symbol))
hfq_factor_df = pd.DataFrame(
eval(res.text.split("=")[1].split("\n")[0])["data"]
)
hfq_factor_df.columns = ["date", "hfq_factor", "cash"]
hfq_factor_df.index = pd.to_datetime(hfq_factor_df.date)
del hfq_factor_df["date"]
hfq_factor_df.reset_index(inplace=True)
hfq_factor_df['date'] = hfq_factor_df['date'].astype(str)
return hfq_factor_df
if adjust == "qfq-factor":
res = requests.get(hk_sina_stock_hist_qfq_url.format(symbol))
qfq_factor_df = pd.DataFrame(
eval(res.text.split("=")[1].split("\n")[0])["data"]
)
qfq_factor_df.columns = ["date", "qfq_factor"]
qfq_factor_df.index = pd.to_datetime(qfq_factor_df.date)
del qfq_factor_df["date"]
qfq_factor_df.reset_index(inplace=True)
qfq_factor_df['date'] = qfq_factor_df['date'].astype(str)
return qfq_factor_df
|
https://github.com/akfamily/akshare/blob/087025d8d6f799b30ca114013e82c1ad22dc9294/project25/akshare/stock/stock_hk_sina.py#L61-L203
| 25 |
[
0,
1,
2,
3,
4,
5,
6,
7,
8,
9,
10
] | 7.692308 |
[
11,
12,
13,
14,
17,
18,
19,
20,
22,
23,
24,
26,
27,
28,
29,
32,
33,
34,
35,
36,
37,
38,
39,
40,
43,
46,
47,
50,
51,
53,
56,
57,
60,
61,
62,
63,
64,
65,
66,
67,
68,
69,
70,
71,
73,
74,
75,
76,
79,
80,
81,
83,
84,
85,
86,
87,
88,
90,
93,
94,
97,
98,
100,
103,
104,
107,
108,
109,
110,
111,
112,
113,
114,
115,
116,
117,
118,
120,
121,
122,
125,
126,
127,
128,
129,
130,
132,
133,
134,
137,
138,
139,
140,
141,
142
] | 66.433566 | false | 7.317073 | 143 | 10 | 33.566434 | 8 |
def stock_hk_daily(symbol: str = "00981", adjust: str = "") -> pd.DataFrame:
res = requests.get(hk_sina_stock_hist_url.format(symbol))
js_code = py_mini_racer.MiniRacer()
js_code.eval(hk_js_decode)
dict_list = js_code.call(
"d", res.text.split("=")[1].split(";")[0].replace('"', "")
) # 执行js解密代码
data_df = pd.DataFrame(dict_list)
data_df.index = pd.to_datetime(data_df["date"]).dt.date
del data_df["date"]
data_df = data_df.astype("float")
if adjust == "":
data_df.reset_index(inplace=True)
return data_df
if adjust == "hfq":
res = requests.get(hk_sina_stock_hist_hfq_url.format(symbol))
try:
hfq_factor_df = pd.DataFrame(
eval(res.text.split("=")[1].split("\n")[0])["data"]
)
if len(hfq_factor_df) == 1:
data_df.reset_index(inplace=True)
return data_df
except SyntaxError as e:
data_df.reset_index(inplace=True)
return data_df
hfq_factor_df.columns = ["date", "hfq_factor", "cash"]
hfq_factor_df.index = pd.to_datetime(hfq_factor_df.date)
del hfq_factor_df["date"]
# 处理复权因子
temp_date_range = pd.date_range(
"1900-01-01", hfq_factor_df.index[0].isoformat()
)
temp_df = pd.DataFrame(range(len(temp_date_range)), temp_date_range)
new_range = pd.merge(
temp_df, hfq_factor_df, left_index=True, right_index=True, how="outer"
)
new_range = new_range.fillna(method="ffill")
new_range = new_range.iloc[:, [1, 2]]
temp_df = pd.merge(
data_df, new_range, left_index=True, right_index=True, how="outer"
)
temp_df.fillna(method="ffill", inplace=True)
temp_df.drop_duplicates(
subset=["open", "high", "low", "close", "volume"], inplace=True
)
temp_df = temp_df.astype(float)
temp_df["open"] = temp_df["open"] * temp_df["hfq_factor"] + temp_df["cash"]
temp_df["high"] = temp_df["high"] * temp_df["hfq_factor"] + temp_df["cash"]
temp_df["close"] = temp_df["close"] * temp_df["hfq_factor"] + temp_df["cash"]
temp_df["low"] = temp_df["low"] * temp_df["hfq_factor"] + temp_df["cash"]
temp_df = temp_df.apply(lambda x: round(x, 4))
temp_df.dropna(how="any", inplace=True)
temp_df = temp_df.iloc[:, :-2]
temp_df.reset_index(inplace=True)
temp_df.rename({"index": "date"}, axis='columns', inplace=True)
temp_df['date'] = temp_df['date'].astype(str)
return temp_df
if adjust == "qfq":
res = requests.get(hk_sina_stock_hist_qfq_url.format(symbol))
try:
qfq_factor_df = pd.DataFrame(
eval(res.text.split("=")[1].split("\n")[0])["data"]
)
if len(qfq_factor_df) == 1:
data_df.reset_index(inplace=True)
return data_df
except SyntaxError as e:
data_df.reset_index(inplace=True)
return data_df
qfq_factor_df.columns = ["date", "qfq_factor"]
qfq_factor_df.index = pd.to_datetime(qfq_factor_df.date)
del qfq_factor_df["date"]
temp_date_range = pd.date_range(
"1900-01-01", qfq_factor_df.index[0].isoformat()
)
temp_df = pd.DataFrame(range(len(temp_date_range)), temp_date_range)
new_range = pd.merge(
temp_df, qfq_factor_df, left_index=True, right_index=True, how="outer"
)
new_range = new_range.fillna(method="ffill")
new_range = new_range.iloc[:, [1]]
temp_df = pd.merge(
data_df, new_range, left_index=True, right_index=True, how="outer"
)
temp_df.fillna(method="ffill", inplace=True)
temp_df.drop_duplicates(
subset=["open", "high", "low", "close", "volume"], inplace=True
)
temp_df = temp_df.astype(float)
temp_df["open"] = temp_df["open"] * temp_df["qfq_factor"]
temp_df["high"] = temp_df["high"] * temp_df["qfq_factor"]
temp_df["close"] = temp_df["close"] * temp_df["qfq_factor"]
temp_df["low"] = temp_df["low"] * temp_df["qfq_factor"]
temp_df = temp_df.apply(lambda x: round(x, 4))
temp_df.dropna(how="any", inplace=True)
temp_df = temp_df.iloc[:, :-1]
temp_df.reset_index(inplace=True)
temp_df.rename({"index": "date"}, axis='columns', inplace=True)
temp_df['date'] = temp_df['date'].astype(str)
return temp_df
if adjust == "hfq-factor":
res = requests.get(hk_sina_stock_hist_hfq_url.format(symbol))
hfq_factor_df = pd.DataFrame(
eval(res.text.split("=")[1].split("\n")[0])["data"]
)
hfq_factor_df.columns = ["date", "hfq_factor", "cash"]
hfq_factor_df.index = pd.to_datetime(hfq_factor_df.date)
del hfq_factor_df["date"]
hfq_factor_df.reset_index(inplace=True)
hfq_factor_df['date'] = hfq_factor_df['date'].astype(str)
return hfq_factor_df
if adjust == "qfq-factor":
res = requests.get(hk_sina_stock_hist_qfq_url.format(symbol))
qfq_factor_df = pd.DataFrame(
eval(res.text.split("=")[1].split("\n")[0])["data"]
)
qfq_factor_df.columns = ["date", "qfq_factor"]
qfq_factor_df.index = pd.to_datetime(qfq_factor_df.date)
del qfq_factor_df["date"]
qfq_factor_df.reset_index(inplace=True)
qfq_factor_df['date'] = qfq_factor_df['date'].astype(str)
return qfq_factor_df
| 18,772 |
|
akfamily/akshare
|
087025d8d6f799b30ca114013e82c1ad22dc9294
|
akshare/stock/stock_us_js.py
|
stock_price_js
|
(symbol: str = "us")
|
return temp_df
|
美股目标价 or 港股目标价
https://www.ushknews.com/report.html
:param symbol: choice of {"us", "hk"}
:type symbol: str
:return: 美股目标价 or 港股目标价
:rtype: pandas.DataFrame
|
美股目标价 or 港股目标价
https://www.ushknews.com/report.html
:param symbol: choice of {"us", "hk"}
:type symbol: str
:return: 美股目标价 or 港股目标价
:rtype: pandas.DataFrame
| 12 | 74 |
def stock_price_js(symbol: str = "us") -> pd.DataFrame:
"""
美股目标价 or 港股目标价
https://www.ushknews.com/report.html
:param symbol: choice of {"us", "hk"}
:type symbol: str
:return: 美股目标价 or 港股目标价
:rtype: pandas.DataFrame
"""
url = "https://calendar-api.ushknews.com/getWebTargetPriceList"
params = {
'limit': '20',
'category': symbol,
}
headers = {
"accept": "application/json, text/plain, */*",
"accept-encoding": "gzip, deflate, br",
"accept-language": "zh-CN,zh;q=0.9,en;q=0.8",
"cache-control": "no-cache",
"origin": "https://www.ushknews.com",
"pragma": "no-cache",
"referer": "https://www.ushknews.com/",
"sec-ch-ua": '"Google Chrome";v="107", "Chromium";v="107", "Not=A?Brand";v="24"',
"sec-ch-ua-mobile": "?0",
"sec-ch-ua-platform": '"Windows"',
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-site",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/107.0.0.0 Safari/537.36",
"x-app-id": "BNsiR9uq7yfW0LVz",
"x-version": "1.0.0",
}
r = requests.get(url, params=params, headers=headers)
json_data = r.json()
temp_df = pd.DataFrame(json_data["data"]["list"])
temp_df.columns = [
"_",
"_",
"评级",
"_",
"最新目标价",
"先前目标价",
"机构名称",
"日期",
"_",
"个股名称",
"_",
"_",
]
temp_df = temp_df[
[
"日期",
"个股名称",
"评级",
"先前目标价",
"最新目标价",
"机构名称",
]
]
temp_df["日期"] = pd.to_datetime(temp_df["日期"]).dt.date
temp_df["先前目标价"] = pd.to_numeric(temp_df["先前目标价"], errors="coerce")
temp_df["最新目标价"] = pd.to_numeric(temp_df["最新目标价"], errors="coerce")
return temp_df
|
https://github.com/akfamily/akshare/blob/087025d8d6f799b30ca114013e82c1ad22dc9294/project25/akshare/stock/stock_us_js.py#L12-L74
| 25 |
[
0,
1,
2,
3,
4,
5,
6,
7,
8
] | 14.285714 |
[
9,
10,
14,
32,
33,
34,
35,
49,
59,
60,
61,
62
] | 19.047619 | false | 23.809524 | 63 | 1 | 80.952381 | 6 |
def stock_price_js(symbol: str = "us") -> pd.DataFrame:
url = "https://calendar-api.ushknews.com/getWebTargetPriceList"
params = {
'limit': '20',
'category': symbol,
}
headers = {
"accept": "application/json, text/plain, */*",
"accept-encoding": "gzip, deflate, br",
"accept-language": "zh-CN,zh;q=0.9,en;q=0.8",
"cache-control": "no-cache",
"origin": "https://www.ushknews.com",
"pragma": "no-cache",
"referer": "https://www.ushknews.com/",
"sec-ch-ua": '"Google Chrome";v="107", "Chromium";v="107", "Not=A?Brand";v="24"',
"sec-ch-ua-mobile": "?0",
"sec-ch-ua-platform": '"Windows"',
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-site",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/107.0.0.0 Safari/537.36",
"x-app-id": "BNsiR9uq7yfW0LVz",
"x-version": "1.0.0",
}
r = requests.get(url, params=params, headers=headers)
json_data = r.json()
temp_df = pd.DataFrame(json_data["data"]["list"])
temp_df.columns = [
"_",
"_",
"评级",
"_",
"最新目标价",
"先前目标价",
"机构名称",
"日期",
"_",
"个股名称",
"_",
"_",
]
temp_df = temp_df[
[
"日期",
"个股名称",
"评级",
"先前目标价",
"最新目标价",
"机构名称",
]
]
temp_df["日期"] = pd.to_datetime(temp_df["日期"]).dt.date
temp_df["先前目标价"] = pd.to_numeric(temp_df["先前目标价"], errors="coerce")
temp_df["最新目标价"] = pd.to_numeric(temp_df["最新目标价"], errors="coerce")
return temp_df
| 18,773 |
akfamily/akshare
|
087025d8d6f799b30ca114013e82c1ad22dc9294
|
akshare/stock/stock_industry_pe_cninfo.py
|
stock_industry_pe_ratio_cninfo
|
(symbol: str = "证监会行业分类", date: str = "20210910") -> pd.DataFra
|
return temp_df
|
巨潮资讯-数据中心-行业分析-行业市盈率
http://webapi.cninfo.com.cn/#/thematicStatistics
:param symbol: choice of {"证监会行业分类", "国证行业分类"}
:type symbol: str
:param date: 查询日期
:type date: str
:return: 行业市盈率
:rtype: pandas.DataFrame
|
巨潮资讯-数据中心-行业分析-行业市盈率
http://webapi.cninfo.com.cn/#/thematicStatistics
:param symbol: choice of {"证监会行业分类", "国证行业分类"}
:type symbol: str
:param date: 查询日期
:type date: str
:return: 行业市盈率
:rtype: pandas.DataFrame
| 45 | 122 |
def stock_industry_pe_ratio_cninfo(symbol: str = "证监会行业分类", date: str = "20210910") -> pd.DataFrame:
"""
巨潮资讯-数据中心-行业分析-行业市盈率
http://webapi.cninfo.com.cn/#/thematicStatistics
:param symbol: choice of {"证监会行业分类", "国证行业分类"}
:type symbol: str
:param date: 查询日期
:type date: str
:return: 行业市盈率
:rtype: pandas.DataFrame
"""
sort_code_map = {
"证监会行业分类": "008001",
"国证行业分类": "008200"
}
url = "http://webapi.cninfo.com.cn/api/sysapi/p_sysapi1087"
params = {"tdate": "-".join([date[:4], date[4:6], date[6:]]),
"sortcode": sort_code_map[symbol],
}
random_time_str = str(int(time.time()))
js_code = py_mini_racer.MiniRacer()
js_code.eval(js_str)
mcode = js_code.call("mcode", random_time_str)
headers = {
"Accept": "*/*",
"Accept-Encoding": "gzip, deflate",
"Accept-Language": "zh-CN,zh;q=0.9,en;q=0.8",
"Cache-Control": "no-cache",
"Content-Length": "0",
"Host": "webapi.cninfo.com.cn",
"mcode": mcode,
"Origin": "http://webapi.cninfo.com.cn",
"Pragma": "no-cache",
"Proxy-Connection": "keep-alive",
"Referer": "http://webapi.cninfo.com.cn/",
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/93.0.4577.63 Safari/537.36",
"X-Requested-With": "XMLHttpRequest",
}
r = requests.post(url, params=params, headers=headers)
data_json = r.json()
temp_df = pd.DataFrame(data_json["records"])
temp_df.columns = [
"行业层级",
"静态市盈率-算术平均",
"静态市盈率-中位数",
"静态市盈率-加权平均",
"净利润-静态",
"行业名称",
"行业编码",
"行业分类",
"总市值-静态",
"纳入计算公司数量",
"变动日期",
"公司数量",
]
temp_df = temp_df[[
"变动日期",
"行业分类",
"行业层级",
"行业编码",
"行业名称",
"公司数量",
"纳入计算公司数量",
"总市值-静态",
"净利润-静态",
"静态市盈率-加权平均",
"静态市盈率-中位数",
"静态市盈率-算术平均",
]]
temp_df["行业层级"] = pd.to_numeric(temp_df["行业层级"], errors="coerce")
temp_df["公司数量"] = pd.to_numeric(temp_df["公司数量"], errors="coerce")
temp_df["纳入计算公司数量"] = pd.to_numeric(temp_df["纳入计算公司数量"], errors="coerce")
temp_df["总市值-静态"] = pd.to_numeric(temp_df["总市值-静态"], errors="coerce")
temp_df["净利润-静态"] = pd.to_numeric(temp_df["净利润-静态"], errors="coerce")
temp_df["静态市盈率-加权平均"] = pd.to_numeric(temp_df["静态市盈率-加权平均"], errors="coerce")
temp_df["静态市盈率-中位数"] = pd.to_numeric(temp_df["静态市盈率-中位数"], errors="coerce")
temp_df["静态市盈率-算术平均"] = pd.to_numeric(temp_df["静态市盈率-算术平均"], errors="coerce")
return temp_df
|
https://github.com/akfamily/akshare/blob/087025d8d6f799b30ca114013e82c1ad22dc9294/project25/akshare/stock/stock_industry_pe_cninfo.py#L45-L122
| 25 |
[
0,
1,
2,
3,
4,
5,
6,
7,
8,
9,
10
] | 14.102564 |
[
11,
15,
16,
19,
20,
21,
22,
23,
38,
39,
40,
41,
55,
69,
70,
71,
72,
73,
74,
75,
76,
77
] | 28.205128 | false | 25 | 78 | 1 | 71.794872 | 8 |
def stock_industry_pe_ratio_cninfo(symbol: str = "证监会行业分类", date: str = "20210910") -> pd.DataFrame:
sort_code_map = {
"证监会行业分类": "008001",
"国证行业分类": "008200"
}
url = "http://webapi.cninfo.com.cn/api/sysapi/p_sysapi1087"
params = {"tdate": "-".join([date[:4], date[4:6], date[6:]]),
"sortcode": sort_code_map[symbol],
}
random_time_str = str(int(time.time()))
js_code = py_mini_racer.MiniRacer()
js_code.eval(js_str)
mcode = js_code.call("mcode", random_time_str)
headers = {
"Accept": "*/*",
"Accept-Encoding": "gzip, deflate",
"Accept-Language": "zh-CN,zh;q=0.9,en;q=0.8",
"Cache-Control": "no-cache",
"Content-Length": "0",
"Host": "webapi.cninfo.com.cn",
"mcode": mcode,
"Origin": "http://webapi.cninfo.com.cn",
"Pragma": "no-cache",
"Proxy-Connection": "keep-alive",
"Referer": "http://webapi.cninfo.com.cn/",
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/93.0.4577.63 Safari/537.36",
"X-Requested-With": "XMLHttpRequest",
}
r = requests.post(url, params=params, headers=headers)
data_json = r.json()
temp_df = pd.DataFrame(data_json["records"])
temp_df.columns = [
"行业层级",
"静态市盈率-算术平均",
"静态市盈率-中位数",
"静态市盈率-加权平均",
"净利润-静态",
"行业名称",
"行业编码",
"行业分类",
"总市值-静态",
"纳入计算公司数量",
"变动日期",
"公司数量",
]
temp_df = temp_df[[
"变动日期",
"行业分类",
"行业层级",
"行业编码",
"行业名称",
"公司数量",
"纳入计算公司数量",
"总市值-静态",
"净利润-静态",
"静态市盈率-加权平均",
"静态市盈率-中位数",
"静态市盈率-算术平均",
]]
temp_df["行业层级"] = pd.to_numeric(temp_df["行业层级"], errors="coerce")
temp_df["公司数量"] = pd.to_numeric(temp_df["公司数量"], errors="coerce")
temp_df["纳入计算公司数量"] = pd.to_numeric(temp_df["纳入计算公司数量"], errors="coerce")
temp_df["总市值-静态"] = pd.to_numeric(temp_df["总市值-静态"], errors="coerce")
temp_df["净利润-静态"] = pd.to_numeric(temp_df["净利润-静态"], errors="coerce")
temp_df["静态市盈率-加权平均"] = pd.to_numeric(temp_df["静态市盈率-加权平均"], errors="coerce")
temp_df["静态市盈率-中位数"] = pd.to_numeric(temp_df["静态市盈率-中位数"], errors="coerce")
temp_df["静态市盈率-算术平均"] = pd.to_numeric(temp_df["静态市盈率-算术平均"], errors="coerce")
return temp_df
| 18,774 |
akfamily/akshare
|
087025d8d6f799b30ca114013e82c1ad22dc9294
|
akshare/stock/stock_hot_rank_em.py
|
stock_hot_rank_em
|
()
|
return temp_df
|
东方财富-个股人气榜-人气榜
https://guba.eastmoney.com/rank/
:return: 人气榜
:rtype: pandas.DataFrame
|
东方财富-个股人气榜-人气榜
https://guba.eastmoney.com/rank/
:return: 人气榜
:rtype: pandas.DataFrame
| 12 | 61 |
def stock_hot_rank_em() -> pd.DataFrame:
"""
东方财富-个股人气榜-人气榜
https://guba.eastmoney.com/rank/
:return: 人气榜
:rtype: pandas.DataFrame
"""
url = "https://emappdata.eastmoney.com/stockrank/getAllCurrentList"
payload = {
"appId": "appId01",
"globalId": "786e4c21-70dc-435a-93bb-38",
"marketType": "",
"pageNo": 1,
"pageSize": 100,
}
r = requests.post(url, json=payload)
data_json = r.json()
temp_rank_df = pd.DataFrame(data_json["data"])
temp_rank_df["mark"] = [
"0" + "." + item[2:] if "SZ" in item else "1" + "." + item[2:]
for item in temp_rank_df["sc"]
]
",".join(temp_rank_df["mark"]) + "?v=08926209912590994"
params = {
"ut": "f057cbcbce2a86e2866ab8877db1d059",
"fltt": "2",
"invt": "2",
"fields": "f14,f3,f12,f2",
"secids": ",".join(temp_rank_df["mark"]) + ",?v=08926209912590994",
}
url = "https://push2.eastmoney.com/api/qt/ulist.np/get"
r = requests.get(url, params=params)
data_json = r.json()
temp_df = pd.DataFrame(data_json["data"]["diff"])
temp_df.columns = ["最新价", "涨跌幅", "代码", "股票名称"]
temp_df["当前排名"] = temp_rank_df["rk"]
temp_df["代码"] = temp_rank_df["sc"]
temp_df = temp_df[
[
"当前排名",
"代码",
"股票名称",
"最新价",
"涨跌幅",
]
]
temp_df['最新价'] = pd.to_numeric(temp_df['最新价'], errors="coerce")
temp_df['涨跌幅'] = pd.to_numeric(temp_df['涨跌幅'], errors="coerce")
return temp_df
|
https://github.com/akfamily/akshare/blob/087025d8d6f799b30ca114013e82c1ad22dc9294/project25/akshare/stock/stock_hot_rank_em.py#L12-L61
| 25 |
[
0,
1,
2,
3,
4,
5,
6
] | 14 |
[
7,
8,
15,
16,
17,
19,
23,
24,
31,
32,
33,
34,
35,
36,
37,
38,
47,
48,
49
] | 38 | false | 11.363636 | 50 | 2 | 62 | 4 |
def stock_hot_rank_em() -> pd.DataFrame:
url = "https://emappdata.eastmoney.com/stockrank/getAllCurrentList"
payload = {
"appId": "appId01",
"globalId": "786e4c21-70dc-435a-93bb-38",
"marketType": "",
"pageNo": 1,
"pageSize": 100,
}
r = requests.post(url, json=payload)
data_json = r.json()
temp_rank_df = pd.DataFrame(data_json["data"])
temp_rank_df["mark"] = [
"0" + "." + item[2:] if "SZ" in item else "1" + "." + item[2:]
for item in temp_rank_df["sc"]
]
",".join(temp_rank_df["mark"]) + "?v=08926209912590994"
params = {
"ut": "f057cbcbce2a86e2866ab8877db1d059",
"fltt": "2",
"invt": "2",
"fields": "f14,f3,f12,f2",
"secids": ",".join(temp_rank_df["mark"]) + ",?v=08926209912590994",
}
url = "https://push2.eastmoney.com/api/qt/ulist.np/get"
r = requests.get(url, params=params)
data_json = r.json()
temp_df = pd.DataFrame(data_json["data"]["diff"])
temp_df.columns = ["最新价", "涨跌幅", "代码", "股票名称"]
temp_df["当前排名"] = temp_rank_df["rk"]
temp_df["代码"] = temp_rank_df["sc"]
temp_df = temp_df[
[
"当前排名",
"代码",
"股票名称",
"最新价",
"涨跌幅",
]
]
temp_df['最新价'] = pd.to_numeric(temp_df['最新价'], errors="coerce")
temp_df['涨跌幅'] = pd.to_numeric(temp_df['涨跌幅'], errors="coerce")
return temp_df
| 18,775 |
akfamily/akshare
|
087025d8d6f799b30ca114013e82c1ad22dc9294
|
akshare/stock/stock_hot_rank_em.py
|
stock_hot_rank_detail_em
|
(symbol: str = "SZ000665")
|
return temp_df
|
东方财富-个股人气榜-历史趋势及粉丝特征
https://guba.eastmoney.com/rank/stock?code=000665
:param symbol: 带市场表示的证券代码
:type symbol: str
:return: 个股的历史趋势及粉丝特征
:rtype: pandas.DataFrame
|
东方财富-个股人气榜-历史趋势及粉丝特征
https://guba.eastmoney.com/rank/stock?code=000665
:param symbol: 带市场表示的证券代码
:type symbol: str
:return: 个股的历史趋势及粉丝特征
:rtype: pandas.DataFrame
| 64 | 96 |
def stock_hot_rank_detail_em(symbol: str = "SZ000665") -> pd.DataFrame:
"""
东方财富-个股人气榜-历史趋势及粉丝特征
https://guba.eastmoney.com/rank/stock?code=000665
:param symbol: 带市场表示的证券代码
:type symbol: str
:return: 个股的历史趋势及粉丝特征
:rtype: pandas.DataFrame
"""
url_rank = "https://emappdata.eastmoney.com/stockrank/getHisList"
payload = {
"appId": "appId01",
"globalId": "786e4c21-70dc-435a-93bb-38",
"marketType": "",
"srcSecurityCode": symbol,
}
r = requests.post(url_rank, json=payload)
data_json = r.json()
temp_df = pd.DataFrame(data_json["data"])
temp_df["证券代码"] = symbol
temp_df.columns = ["时间", "排名", "证券代码"]
temp_df = temp_df[["时间", "排名", "证券代码"]]
url_follow = "https://emappdata.eastmoney.com/stockrank/getHisProfileList"
r = requests.post(url_follow, json=payload)
data_json = r.json()
temp_df["新晋粉丝"] = (
pd.DataFrame(data_json["data"])["newUidRate"].str.strip("%").astype(float) / 100
)
temp_df["铁杆粉丝"] = (
pd.DataFrame(data_json["data"])["oldUidRate"].str.strip("%").astype(float) / 100
)
return temp_df
|
https://github.com/akfamily/akshare/blob/087025d8d6f799b30ca114013e82c1ad22dc9294/project25/akshare/stock/stock_hot_rank_em.py#L64-L96
| 25 |
[
0,
1,
2,
3,
4,
5,
6,
7,
8
] | 27.272727 |
[
9,
10,
16,
17,
18,
19,
20,
21,
23,
24,
25,
26,
29,
32
] | 42.424242 | false | 11.363636 | 33 | 1 | 57.575758 | 6 |
def stock_hot_rank_detail_em(symbol: str = "SZ000665") -> pd.DataFrame:
url_rank = "https://emappdata.eastmoney.com/stockrank/getHisList"
payload = {
"appId": "appId01",
"globalId": "786e4c21-70dc-435a-93bb-38",
"marketType": "",
"srcSecurityCode": symbol,
}
r = requests.post(url_rank, json=payload)
data_json = r.json()
temp_df = pd.DataFrame(data_json["data"])
temp_df["证券代码"] = symbol
temp_df.columns = ["时间", "排名", "证券代码"]
temp_df = temp_df[["时间", "排名", "证券代码"]]
url_follow = "https://emappdata.eastmoney.com/stockrank/getHisProfileList"
r = requests.post(url_follow, json=payload)
data_json = r.json()
temp_df["新晋粉丝"] = (
pd.DataFrame(data_json["data"])["newUidRate"].str.strip("%").astype(float) / 100
)
temp_df["铁杆粉丝"] = (
pd.DataFrame(data_json["data"])["oldUidRate"].str.strip("%").astype(float) / 100
)
return temp_df
| 18,776 |
akfamily/akshare
|
087025d8d6f799b30ca114013e82c1ad22dc9294
|
akshare/stock/stock_hot_rank_em.py
|
stock_hot_rank_detail_realtime_em
|
(symbol: str = "SZ000665")
|
return temp_df
|
东方财富-个股人气榜-实时变动
https://guba.eastmoney.com/rank/stock?code=000665
:param symbol: 带市场表示的证券代码
:type symbol: str
:return: 实时变动
:rtype: pandas.DataFrame
|
东方财富-个股人气榜-实时变动
https://guba.eastmoney.com/rank/stock?code=000665
:param symbol: 带市场表示的证券代码
:type symbol: str
:return: 实时变动
:rtype: pandas.DataFrame
| 99 | 119 |
def stock_hot_rank_detail_realtime_em(symbol: str = "SZ000665") -> pd.DataFrame:
"""
东方财富-个股人气榜-实时变动
https://guba.eastmoney.com/rank/stock?code=000665
:param symbol: 带市场表示的证券代码
:type symbol: str
:return: 实时变动
:rtype: pandas.DataFrame
"""
url = "https://emappdata.eastmoney.com/stockrank/getCurrentList"
payload = {
"appId": "appId01",
"globalId": "786e4c21-70dc-435a-93bb-38",
"marketType": "",
"srcSecurityCode": symbol,
}
r = requests.post(url, json=payload)
data_json = r.json()
temp_df = pd.DataFrame(data_json['data'])
temp_df.columns = ['时间', '排名']
return temp_df
|
https://github.com/akfamily/akshare/blob/087025d8d6f799b30ca114013e82c1ad22dc9294/project25/akshare/stock/stock_hot_rank_em.py#L99-L119
| 25 |
[
0,
1,
2,
3,
4,
5,
6,
7,
8
] | 42.857143 |
[
9,
10,
16,
17,
18,
19,
20
] | 33.333333 | false | 11.363636 | 21 | 1 | 66.666667 | 6 |
def stock_hot_rank_detail_realtime_em(symbol: str = "SZ000665") -> pd.DataFrame:
url = "https://emappdata.eastmoney.com/stockrank/getCurrentList"
payload = {
"appId": "appId01",
"globalId": "786e4c21-70dc-435a-93bb-38",
"marketType": "",
"srcSecurityCode": symbol,
}
r = requests.post(url, json=payload)
data_json = r.json()
temp_df = pd.DataFrame(data_json['data'])
temp_df.columns = ['时间', '排名']
return temp_df
| 18,777 |
akfamily/akshare
|
087025d8d6f799b30ca114013e82c1ad22dc9294
|
akshare/stock/stock_hot_rank_em.py
|
stock_hot_keyword_em
|
(symbol: str = "SZ000665")
|
return temp_df
|
东方财富-个股人气榜-关键词
https://guba.eastmoney.com/rank/stock?code=000665
:param symbol: 带市场表示的证券代码
:type symbol: str
:return: 关键词
:rtype: pandas.DataFrame
|
东方财富-个股人气榜-关键词
https://guba.eastmoney.com/rank/stock?code=000665
:param symbol: 带市场表示的证券代码
:type symbol: str
:return: 关键词
:rtype: pandas.DataFrame
| 122 | 142 |
def stock_hot_keyword_em(symbol: str = "SZ000665") -> pd.DataFrame:
"""
东方财富-个股人气榜-关键词
https://guba.eastmoney.com/rank/stock?code=000665
:param symbol: 带市场表示的证券代码
:type symbol: str
:return: 关键词
:rtype: pandas.DataFrame
"""
url = "https://emappdata.eastmoney.com/stockrank/getHotStockRankList"
payload = {
"appId": "appId01",
"globalId": "786e4c21-70dc-435a-93bb-38",
"srcSecurityCode": symbol,
}
r = requests.post(url, json=payload)
data_json = r.json()
temp_df = pd.DataFrame(data_json['data'])
del temp_df['flag']
temp_df.columns = ['时间', '股票代码', '概念名称', '概念代码', '热度']
return temp_df
|
https://github.com/akfamily/akshare/blob/087025d8d6f799b30ca114013e82c1ad22dc9294/project25/akshare/stock/stock_hot_rank_em.py#L122-L142
| 25 |
[
0,
1,
2,
3,
4,
5,
6,
7,
8
] | 42.857143 |
[
9,
10,
15,
16,
17,
18,
19,
20
] | 38.095238 | false | 11.363636 | 21 | 1 | 61.904762 | 6 |
def stock_hot_keyword_em(symbol: str = "SZ000665") -> pd.DataFrame:
url = "https://emappdata.eastmoney.com/stockrank/getHotStockRankList"
payload = {
"appId": "appId01",
"globalId": "786e4c21-70dc-435a-93bb-38",
"srcSecurityCode": symbol,
}
r = requests.post(url, json=payload)
data_json = r.json()
temp_df = pd.DataFrame(data_json['data'])
del temp_df['flag']
temp_df.columns = ['时间', '股票代码', '概念名称', '概念代码', '热度']
return temp_df
| 18,778 |
akfamily/akshare
|
087025d8d6f799b30ca114013e82c1ad22dc9294
|
akshare/stock/stock_hot_rank_em.py
|
stock_hot_rank_relate_em
|
(symbol: str = "SZ000665")
|
return temp_df
|
东方财富-个股人气榜-相关股票
https://guba.eastmoney.com/rank/stock?code=000665
:param symbol: 带市场表示的证券代码
:type symbol: str
:return: 相关股票
:rtype: pandas.DataFrame
|
东方财富-个股人气榜-相关股票
https://guba.eastmoney.com/rank/stock?code=000665
:param symbol: 带市场表示的证券代码
:type symbol: str
:return: 相关股票
:rtype: pandas.DataFrame
| 169 | 191 |
def stock_hot_rank_relate_em(symbol: str = "SZ000665") -> pd.DataFrame:
"""
东方财富-个股人气榜-相关股票
https://guba.eastmoney.com/rank/stock?code=000665
:param symbol: 带市场表示的证券代码
:type symbol: str
:return: 相关股票
:rtype: pandas.DataFrame
"""
url = "https://emappdata.eastmoney.com/stockrank/getFollowStockRankList"
payload = {
"appId": "appId01",
"globalId": "786e4c21-70dc-435a-93bb-38",
"srcSecurityCode": symbol,
}
r = requests.post(url, json=payload)
data_json = r.json()
temp_df = pd.DataFrame.from_dict(data_json['data'])
temp_df.columns = ['时间', '-', '股票代码', '-', '相关股票代码', '涨跌幅', '-']
temp_df = temp_df[['时间', '股票代码', '相关股票代码', '涨跌幅']]
temp_df['涨跌幅'] = temp_df['涨跌幅'].str.strip('%')
temp_df['涨跌幅'] = pd.to_numeric(temp_df['涨跌幅'])
return temp_df
|
https://github.com/akfamily/akshare/blob/087025d8d6f799b30ca114013e82c1ad22dc9294/project25/akshare/stock/stock_hot_rank_em.py#L169-L191
| 25 |
[
0,
1,
2,
3,
4,
5,
6,
7,
8
] | 39.130435 |
[
9,
10,
15,
16,
17,
18,
19,
20,
21,
22
] | 43.478261 | false | 11.363636 | 23 | 1 | 56.521739 | 6 |
def stock_hot_rank_relate_em(symbol: str = "SZ000665") -> pd.DataFrame:
url = "https://emappdata.eastmoney.com/stockrank/getFollowStockRankList"
payload = {
"appId": "appId01",
"globalId": "786e4c21-70dc-435a-93bb-38",
"srcSecurityCode": symbol,
}
r = requests.post(url, json=payload)
data_json = r.json()
temp_df = pd.DataFrame.from_dict(data_json['data'])
temp_df.columns = ['时间', '-', '股票代码', '-', '相关股票代码', '涨跌幅', '-']
temp_df = temp_df[['时间', '股票代码', '相关股票代码', '涨跌幅']]
temp_df['涨跌幅'] = temp_df['涨跌幅'].str.strip('%')
temp_df['涨跌幅'] = pd.to_numeric(temp_df['涨跌幅'])
return temp_df
| 18,779 |
akfamily/akshare
|
087025d8d6f799b30ca114013e82c1ad22dc9294
|
akshare/stock/stock_zh_ah_tx.py
|
_get_zh_stock_ah_page_count
|
()
|
return page_count
|
腾讯财经-港股-AH-总页数
https://stockapp.finance.qq.com/mstats/#mod=list&id=hk_ah&module=HK&type=AH&sort=3&page=3&max=20
:return: 总页数
:rtype: int
|
腾讯财经-港股-AH-总页数
https://stockapp.finance.qq.com/mstats/#mod=list&id=hk_ah&module=HK&type=AH&sort=3&page=3&max=20
:return: 总页数
:rtype: int
| 24 | 38 |
def _get_zh_stock_ah_page_count() -> int:
"""
腾讯财经-港股-AH-总页数
https://stockapp.finance.qq.com/mstats/#mod=list&id=hk_ah&module=HK&type=AH&sort=3&page=3&max=20
:return: 总页数
:rtype: int
"""
hk_payload_copy = hk_payload.copy()
hk_payload_copy.update({"reqPage": 1})
res = requests.get(hk_url, params=hk_payload_copy, headers=hk_headers)
data_json = demjson.decode(
res.text[res.text.find("{") : res.text.rfind("}") + 1]
)
page_count = data_json["data"]["page_count"]
return page_count
|
https://github.com/akfamily/akshare/blob/087025d8d6f799b30ca114013e82c1ad22dc9294/project25/akshare/stock/stock_zh_ah_tx.py#L24-L38
| 25 |
[
0,
1,
2,
3,
4,
5,
6
] | 46.666667 |
[
7,
8,
9,
10,
13,
14
] | 40 | false | 13.186813 | 15 | 1 | 60 | 4 |
def _get_zh_stock_ah_page_count() -> int:
hk_payload_copy = hk_payload.copy()
hk_payload_copy.update({"reqPage": 1})
res = requests.get(hk_url, params=hk_payload_copy, headers=hk_headers)
data_json = demjson.decode(
res.text[res.text.find("{") : res.text.rfind("}") + 1]
)
page_count = data_json["data"]["page_count"]
return page_count
| 18,780 |
akfamily/akshare
|
087025d8d6f799b30ca114013e82c1ad22dc9294
|
akshare/stock/stock_zh_ah_tx.py
|
stock_zh_ah_spot
|
()
|
return big_df
|
腾讯财经-港股-AH-实时行情
https://stockapp.finance.qq.com/mstats/#mod=list&id=hk_ah&module=HK&type=AH&sort=3&page=3&max=20
:return: 腾讯财经-港股-AH-实时行情
:rtype: pandas.DataFrame
|
腾讯财经-港股-AH-实时行情
https://stockapp.finance.qq.com/mstats/#mod=list&id=hk_ah&module=HK&type=AH&sort=3&page=3&max=20
:return: 腾讯财经-港股-AH-实时行情
:rtype: pandas.DataFrame
| 41 | 109 |
def stock_zh_ah_spot() -> pd.DataFrame:
"""
腾讯财经-港股-AH-实时行情
https://stockapp.finance.qq.com/mstats/#mod=list&id=hk_ah&module=HK&type=AH&sort=3&page=3&max=20
:return: 腾讯财经-港股-AH-实时行情
:rtype: pandas.DataFrame
"""
big_df = pd.DataFrame()
page_count = _get_zh_stock_ah_page_count() + 1
for i in tqdm(range(1, page_count), leave=False):
hk_payload.update({"reqPage": i})
res = requests.get(hk_url, params=hk_payload, headers=hk_headers)
data_json = demjson.decode(
res.text[res.text.find("{") : res.text.rfind("}") + 1]
)
big_df = pd.concat(
[
big_df,
pd.DataFrame(data_json["data"]["page_data"])
.iloc[:, 0]
.str.split("~", expand=True),
],
ignore_index=True,
)
big_df.columns = [
"代码",
"名称",
"最新价",
"涨跌幅",
"涨跌额",
"买入",
"卖出",
"成交量",
"成交额",
"今开",
"昨收",
"最高",
"最低",
"-",
]
big_df = big_df[
[
"代码",
"名称",
"最新价",
"涨跌幅",
"涨跌额",
"买入",
"卖出",
"成交量",
"成交额",
"今开",
"昨收",
"最高",
"最低",
]
]
big_df["最新价"] = pd.to_numeric(big_df["最新价"], errors="coerce")
big_df["涨跌幅"] = pd.to_numeric(big_df["涨跌幅"], errors="coerce")
big_df["涨跌额"] = pd.to_numeric(big_df["涨跌额"], errors="coerce")
big_df["买入"] = pd.to_numeric(big_df["买入"], errors="coerce")
big_df["卖出"] = pd.to_numeric(big_df["卖出"], errors="coerce")
big_df["成交量"] = pd.to_numeric(big_df["成交量"], errors="coerce")
big_df["成交额"] = pd.to_numeric(big_df["成交额"], errors="coerce")
big_df["今开"] = pd.to_numeric(big_df["今开"], errors="coerce")
big_df["昨收"] = pd.to_numeric(big_df["昨收"], errors="coerce")
big_df["最高"] = pd.to_numeric(big_df["最高"], errors="coerce")
big_df["最低"] = pd.to_numeric(big_df["最低"], errors="coerce")
return big_df
|
https://github.com/akfamily/akshare/blob/087025d8d6f799b30ca114013e82c1ad22dc9294/project25/akshare/stock/stock_zh_ah_tx.py#L41-L109
| 25 |
[
0,
1,
2,
3,
4,
5,
6
] | 10.144928 |
[
7,
8,
9,
10,
11,
12,
15,
24,
40,
57,
58,
59,
60,
61,
62,
63,
64,
65,
66,
67,
68
] | 30.434783 | false | 13.186813 | 69 | 2 | 69.565217 | 4 |
def stock_zh_ah_spot() -> pd.DataFrame:
big_df = pd.DataFrame()
page_count = _get_zh_stock_ah_page_count() + 1
for i in tqdm(range(1, page_count), leave=False):
hk_payload.update({"reqPage": i})
res = requests.get(hk_url, params=hk_payload, headers=hk_headers)
data_json = demjson.decode(
res.text[res.text.find("{") : res.text.rfind("}") + 1]
)
big_df = pd.concat(
[
big_df,
pd.DataFrame(data_json["data"]["page_data"])
.iloc[:, 0]
.str.split("~", expand=True),
],
ignore_index=True,
)
big_df.columns = [
"代码",
"名称",
"最新价",
"涨跌幅",
"涨跌额",
"买入",
"卖出",
"成交量",
"成交额",
"今开",
"昨收",
"最高",
"最低",
"-",
]
big_df = big_df[
[
"代码",
"名称",
"最新价",
"涨跌幅",
"涨跌额",
"买入",
"卖出",
"成交量",
"成交额",
"今开",
"昨收",
"最高",
"最低",
]
]
big_df["最新价"] = pd.to_numeric(big_df["最新价"], errors="coerce")
big_df["涨跌幅"] = pd.to_numeric(big_df["涨跌幅"], errors="coerce")
big_df["涨跌额"] = pd.to_numeric(big_df["涨跌额"], errors="coerce")
big_df["买入"] = pd.to_numeric(big_df["买入"], errors="coerce")
big_df["卖出"] = pd.to_numeric(big_df["卖出"], errors="coerce")
big_df["成交量"] = pd.to_numeric(big_df["成交量"], errors="coerce")
big_df["成交额"] = pd.to_numeric(big_df["成交额"], errors="coerce")
big_df["今开"] = pd.to_numeric(big_df["今开"], errors="coerce")
big_df["昨收"] = pd.to_numeric(big_df["昨收"], errors="coerce")
big_df["最高"] = pd.to_numeric(big_df["最高"], errors="coerce")
big_df["最低"] = pd.to_numeric(big_df["最低"], errors="coerce")
return big_df
| 18,781 |
akfamily/akshare
|
087025d8d6f799b30ca114013e82c1ad22dc9294
|
akshare/stock/stock_zh_ah_tx.py
|
stock_zh_ah_name
|
()
|
return big_df
|
腾讯财经-港股-AH-股票名称
:return: 股票代码和股票名称的字典
:rtype: dict
|
腾讯财经-港股-AH-股票名称
:return: 股票代码和股票名称的字典
:rtype: dict
| 112 | 156 |
def stock_zh_ah_name() -> dict:
"""
腾讯财经-港股-AH-股票名称
:return: 股票代码和股票名称的字典
:rtype: dict
"""
big_df = pd.DataFrame()
page_count = _get_zh_stock_ah_page_count() + 1
for i in tqdm(range(1, page_count), leave=False):
hk_payload.update({"reqPage": i})
res = requests.get(hk_url, params=hk_payload, headers=hk_headers)
data_json = demjson.decode(
res.text[res.text.find("{") : res.text.rfind("}") + 1]
)
big_df = pd.concat(
[
big_df,
pd.DataFrame(data_json["data"]["page_data"])
.iloc[:, 0]
.str.split("~", expand=True),
],
ignore_index=True,
).iloc[:, :-1]
big_df.columns = [
"代码",
"名称",
"最新价",
"涨跌幅",
"涨跌额",
"买入",
"卖出",
"成交量",
"成交额",
"今开",
"昨收",
"最高",
"最低",
]
big_df = big_df[
[
"代码",
"名称",
]
]
return big_df
|
https://github.com/akfamily/akshare/blob/087025d8d6f799b30ca114013e82c1ad22dc9294/project25/akshare/stock/stock_zh_ah_tx.py#L112-L156
| 25 |
[
0,
1,
2,
3,
4,
5
] | 13.333333 |
[
6,
7,
8,
9,
10,
11,
14,
23,
38,
44
] | 22.222222 | false | 13.186813 | 45 | 2 | 77.777778 | 3 |
def stock_zh_ah_name() -> dict:
big_df = pd.DataFrame()
page_count = _get_zh_stock_ah_page_count() + 1
for i in tqdm(range(1, page_count), leave=False):
hk_payload.update({"reqPage": i})
res = requests.get(hk_url, params=hk_payload, headers=hk_headers)
data_json = demjson.decode(
res.text[res.text.find("{") : res.text.rfind("}") + 1]
)
big_df = pd.concat(
[
big_df,
pd.DataFrame(data_json["data"]["page_data"])
.iloc[:, 0]
.str.split("~", expand=True),
],
ignore_index=True,
).iloc[:, :-1]
big_df.columns = [
"代码",
"名称",
"最新价",
"涨跌幅",
"涨跌额",
"买入",
"卖出",
"成交量",
"成交额",
"今开",
"昨收",
"最高",
"最低",
]
big_df = big_df[
[
"代码",
"名称",
]
]
return big_df
| 18,782 |
akfamily/akshare
|
087025d8d6f799b30ca114013e82c1ad22dc9294
|
akshare/stock/stock_zh_ah_tx.py
|
stock_zh_ah_daily
|
(
symbol: str = "02318",
start_year: str = "2000",
end_year: str = "2019",
adjust: str = "",
)
|
return big_df
|
腾讯财经-港股-AH-股票历史行情
http://gu.qq.com/hk01033/gp
:param symbol: 股票代码
:type symbol: str
:param start_year: 开始年份; e.g., “2000”
:type start_year: str
:param end_year: 结束年份; e.g., “2019”
:type end_year: str
:param adjust: 'qfq': 前复权, 'hfq': 后复权
:type adjust: str
:return: 指定股票在指定年份的日频率历史行情数据
:rtype: pandas.DataFrame
|
腾讯财经-港股-AH-股票历史行情
http://gu.qq.com/hk01033/gp
:param symbol: 股票代码
:type symbol: str
:param start_year: 开始年份; e.g., “2000”
:type start_year: str
:param end_year: 结束年份; e.g., “2019”
:type end_year: str
:param adjust: 'qfq': 前复权, 'hfq': 后复权
:type adjust: str
:return: 指定股票在指定年份的日频率历史行情数据
:rtype: pandas.DataFrame
| 159 | 258 |
def stock_zh_ah_daily(
symbol: str = "02318",
start_year: str = "2000",
end_year: str = "2019",
adjust: str = "",
) -> pd.DataFrame:
"""
腾讯财经-港股-AH-股票历史行情
http://gu.qq.com/hk01033/gp
:param symbol: 股票代码
:type symbol: str
:param start_year: 开始年份; e.g., “2000”
:type start_year: str
:param end_year: 结束年份; e.g., “2019”
:type end_year: str
:param adjust: 'qfq': 前复权, 'hfq': 后复权
:type adjust: str
:return: 指定股票在指定年份的日频率历史行情数据
:rtype: pandas.DataFrame
"""
big_df = pd.DataFrame()
for year in tqdm(range(int(start_year), int(end_year)), leave=False):
# year = "2003"
hk_stock_payload_copy = hk_stock_payload.copy()
hk_stock_payload_copy.update({"_var": f"kline_day{adjust}{year}"})
if adjust == "":
hk_stock_payload_copy.update(
{
"param": f"hk{symbol},day,{year}-01-01,{int(year) + 1}-12-31,640,"
}
)
else:
hk_stock_payload_copy.update(
{
"param": f"hk{symbol},day,{year}-01-01,{int(year) + 1}-12-31,640,{adjust}"
}
)
hk_stock_payload_copy.update({"r": str(random.random())})
if adjust == "":
headers = {
"Accept": "*/*",
"Accept-Encoding": "gzip, deflate",
"Accept-Language": "zh-CN,zh;q=0.9,en;q=0.8",
"Cache-Control": "no-cache",
"Connection": "keep-alive",
"Host": "web.ifzq.gtimg.cn",
"Pragma": "no-cache",
"Referer": "http://gu.qq.com/hk01033/gp",
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/84.0.4147.125 Safari/537.36",
}
res = requests.get(
"http://web.ifzq.gtimg.cn/appstock/app/kline/kline",
params=hk_stock_payload_copy,
headers=headers,
)
else:
res = requests.get(
"http://web.ifzq.gtimg.cn/appstock/app/hkfqkline/get",
params=hk_stock_payload_copy,
headers=hk_stock_headers,
)
data_json = demjson.decode(
res.text[res.text.find("{") : res.text.rfind("}") + 1]
)
try:
if adjust == "":
temp_df = pd.DataFrame(data_json["data"][f"hk{symbol}"]["day"])
else:
temp_df = pd.DataFrame(
data_json["data"][f"hk{symbol}"][f"{adjust}day"]
)
except:
continue
if adjust != "" and not temp_df.empty:
temp_df.columns = [
"日期",
"开盘",
"收盘",
"最高",
"最低",
"成交量",
"_",
"_",
"_",
]
temp_df = temp_df[["日期", "开盘", "收盘", "最高", "最低", "成交量"]]
elif not temp_df.empty:
try:
temp_df.columns = ["日期", "开盘", "收盘", "最高", "最低", "成交量", "_"]
except:
temp_df.columns = ["日期", "开盘", "收盘", "最高", "最低", "成交量"]
temp_df = temp_df[["日期", "开盘", "收盘", "最高", "最低", "成交量"]]
big_df = pd.concat([big_df, temp_df], ignore_index=True)
big_df["日期"] = pd.to_datetime(big_df["日期"]).dt.date
big_df["开盘"] = pd.to_numeric(big_df["开盘"])
big_df["收盘"] = pd.to_numeric(big_df["收盘"])
big_df["最高"] = pd.to_numeric(big_df["最高"])
big_df["最低"] = pd.to_numeric(big_df["最低"])
big_df["成交量"] = pd.to_numeric(big_df["成交量"])
return big_df
|
https://github.com/akfamily/akshare/blob/087025d8d6f799b30ca114013e82c1ad22dc9294/project25/akshare/stock/stock_zh_ah_tx.py#L159-L258
| 25 |
[
0
] | 1 |
[
20,
21,
23,
24,
25,
26,
32,
37,
38,
39,
50,
56,
61,
64,
65,
66,
68,
71,
72,
73,
74,
85,
86,
87,
88,
89,
90,
91,
92,
93,
94,
95,
96,
97,
98,
99
] | 36 | false | 13.186813 | 100 | 10 | 64 | 12 |
def stock_zh_ah_daily(
symbol: str = "02318",
start_year: str = "2000",
end_year: str = "2019",
adjust: str = "",
) -> pd.DataFrame:
big_df = pd.DataFrame()
for year in tqdm(range(int(start_year), int(end_year)), leave=False):
# year = "2003"
hk_stock_payload_copy = hk_stock_payload.copy()
hk_stock_payload_copy.update({"_var": f"kline_day{adjust}{year}"})
if adjust == "":
hk_stock_payload_copy.update(
{
"param": f"hk{symbol},day,{year}-01-01,{int(year) + 1}-12-31,640,"
}
)
else:
hk_stock_payload_copy.update(
{
"param": f"hk{symbol},day,{year}-01-01,{int(year) + 1}-12-31,640,{adjust}"
}
)
hk_stock_payload_copy.update({"r": str(random.random())})
if adjust == "":
headers = {
"Accept": "*/*",
"Accept-Encoding": "gzip, deflate",
"Accept-Language": "zh-CN,zh;q=0.9,en;q=0.8",
"Cache-Control": "no-cache",
"Connection": "keep-alive",
"Host": "web.ifzq.gtimg.cn",
"Pragma": "no-cache",
"Referer": "http://gu.qq.com/hk01033/gp",
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/84.0.4147.125 Safari/537.36",
}
res = requests.get(
"http://web.ifzq.gtimg.cn/appstock/app/kline/kline",
params=hk_stock_payload_copy,
headers=headers,
)
else:
res = requests.get(
"http://web.ifzq.gtimg.cn/appstock/app/hkfqkline/get",
params=hk_stock_payload_copy,
headers=hk_stock_headers,
)
data_json = demjson.decode(
res.text[res.text.find("{") : res.text.rfind("}") + 1]
)
try:
if adjust == "":
temp_df = pd.DataFrame(data_json["data"][f"hk{symbol}"]["day"])
else:
temp_df = pd.DataFrame(
data_json["data"][f"hk{symbol}"][f"{adjust}day"]
)
except:
continue
if adjust != "" and not temp_df.empty:
temp_df.columns = [
"日期",
"开盘",
"收盘",
"最高",
"最低",
"成交量",
"_",
"_",
"_",
]
temp_df = temp_df[["日期", "开盘", "收盘", "最高", "最低", "成交量"]]
elif not temp_df.empty:
try:
temp_df.columns = ["日期", "开盘", "收盘", "最高", "最低", "成交量", "_"]
except:
temp_df.columns = ["日期", "开盘", "收盘", "最高", "最低", "成交量"]
temp_df = temp_df[["日期", "开盘", "收盘", "最高", "最低", "成交量"]]
big_df = pd.concat([big_df, temp_df], ignore_index=True)
big_df["日期"] = pd.to_datetime(big_df["日期"]).dt.date
big_df["开盘"] = pd.to_numeric(big_df["开盘"])
big_df["收盘"] = pd.to_numeric(big_df["收盘"])
big_df["最高"] = pd.to_numeric(big_df["最高"])
big_df["最低"] = pd.to_numeric(big_df["最低"])
big_df["成交量"] = pd.to_numeric(big_df["成交量"])
return big_df
| 18,783 |
akfamily/akshare
|
087025d8d6f799b30ca114013e82c1ad22dc9294
|
akshare/stock/stock_us_zh_hx.py
|
stock_us_zh_spot
|
()
|
return temp_df
|
美股-中概股的实时行情数据
http://quote.hexun.com/default.htm#ustock_3
:return: 中概股的实时行情数据
:return: pandas.DataFrame
|
美股-中概股的实时行情数据
http://quote.hexun.com/default.htm#ustock_3
:return: 中概股的实时行情数据
:return: pandas.DataFrame
| 15 | 64 |
def stock_us_zh_spot() -> pd.DataFrame:
"""
美股-中概股的实时行情数据
http://quote.hexun.com/default.htm#ustock_3
:return: 中概股的实时行情数据
:return: pandas.DataFrame
"""
url = "http://quote.hexun.com/usastock/data/getdjstock.aspx"
params = {
"type": "1",
"market": "3",
"sorttype": "4",
"updown": "up",
"page": "1",
'count': "200",
"time": "203450"
}
headers = {
"Accept": "*/*",
"Accept-Encoding": "gzip, deflate",
"Accept-Language": "zh-CN,zh;q=0.9,en;q=0.8",
"Cache-Control": "no-cache",
"Connection": "keep-alive",
"Host": "quote.hexun.com",
"Pragma": "no-cache",
"Referer": "http://quote.hexun.com/usastock/xqstock.aspx?market=3",
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/77.0.3865.120 Safari/537.36"
}
r = requests.get(url, params=params, headers=headers)
data_list = eval(r.text.split("=")[1].strip().rsplit(";")[0]) # eval 出列表
data_df = pd.DataFrame(
data_list,
columns=[
"代码",
"名称",
"最新价(美元)",
"涨跌幅",
"d_1",
"d_2",
"最高",
"最低",
"昨收",
"d_3",
"成交量",
"d_4",
],
)
temp_df = data_df[["代码", "名称", "最新价(美元)", "涨跌幅", "最高", "最低", "昨收", "成交量"]]
temp_df = temp_df.rename({"最新价(美元)": "最新价"}, axis=1)
return temp_df
|
https://github.com/akfamily/akshare/blob/087025d8d6f799b30ca114013e82c1ad22dc9294/project25/akshare/stock/stock_us_zh_hx.py#L15-L64
| 25 |
[
0,
1,
2,
3,
4,
5,
6
] | 14 |
[
7,
8,
17,
28,
29,
30,
47,
48,
49
] | 18 | false | 20 | 50 | 1 | 82 | 4 |
def stock_us_zh_spot() -> pd.DataFrame:
url = "http://quote.hexun.com/usastock/data/getdjstock.aspx"
params = {
"type": "1",
"market": "3",
"sorttype": "4",
"updown": "up",
"page": "1",
'count': "200",
"time": "203450"
}
headers = {
"Accept": "*/*",
"Accept-Encoding": "gzip, deflate",
"Accept-Language": "zh-CN,zh;q=0.9,en;q=0.8",
"Cache-Control": "no-cache",
"Connection": "keep-alive",
"Host": "quote.hexun.com",
"Pragma": "no-cache",
"Referer": "http://quote.hexun.com/usastock/xqstock.aspx?market=3",
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/77.0.3865.120 Safari/537.36"
}
r = requests.get(url, params=params, headers=headers)
data_list = eval(r.text.split("=")[1].strip().rsplit(";")[0]) # eval 出列表
data_df = pd.DataFrame(
data_list,
columns=[
"代码",
"名称",
"最新价(美元)",
"涨跌幅",
"d_1",
"d_2",
"最高",
"最低",
"昨收",
"d_3",
"成交量",
"d_4",
],
)
temp_df = data_df[["代码", "名称", "最新价(美元)", "涨跌幅", "最高", "最低", "昨收", "成交量"]]
temp_df = temp_df.rename({"最新价(美元)": "最新价"}, axis=1)
return temp_df
| 18,784 |
akfamily/akshare
|
087025d8d6f799b30ca114013e82c1ad22dc9294
|
akshare/stock/stock_us_zh_hx.py
|
stock_us_zh_daily
|
(symbol: str = 'BABA')
|
return data_df
|
美股-中概股的日频率历史行情数据
http://stockdata.stock.hexun.com/us/BABA.shtml
:return: 中概股的日频率历史行情数据
:return: pandas.DataFrame
|
美股-中概股的日频率历史行情数据
http://stockdata.stock.hexun.com/us/BABA.shtml
:return: 中概股的日频率历史行情数据
:return: pandas.DataFrame
| 67 | 108 |
def stock_us_zh_daily(symbol: str = 'BABA') -> pd.DataFrame:
"""
美股-中概股的日频率历史行情数据
http://stockdata.stock.hexun.com/us/BABA.shtml
:return: 中概股的日频率历史行情数据
:return: pandas.DataFrame
"""
url = "http://webusstock.hermes.hexun.com/usa/kline"
params = {
"code": f"NYSE{symbol}",
"start": "20201218223000",
"number": "-1000",
"type": "5",
}
headers = {
"Accept": "*/*",
"Accept-Encoding": "gzip, deflate",
"Accept-Language": "zh-CN,zh;q=0.9,en;q=0.8",
"Cache-Control": "no-cache",
"Connection": "keep-alive",
"Cookie": "UM_distinctid=1758d5cddd49c-0fc7e2c2612624-303464-1fa400-1758d5cddd592f; ADVC=38ffe1fbf97465; cn_1263247791_dplus=%7B%22distinct_id%22%3A%20%221758d5cddd49c-0fc7e2c2612624-303464-1fa400-1758d5cddd592f%22%2C%22userFirstDate%22%3A%20%2220201103%22%2C%22userID%22%3A%20%220%22%2C%22userName%22%3A%20%22%22%2C%22userType%22%3A%20%22loginuser%22%2C%22userLoginDate%22%3A%20%2220201103%22%2C%22%24_sessionid%22%3A%200%2C%22%24_sessionTime%22%3A%201604394498%2C%22%24dp%22%3A%200%2C%22%24_sessionPVTime%22%3A%201604394498%2C%22initial_view_time%22%3A%20%221604392649%22%2C%22initial_referrer%22%3A%20%22https%3A%2F%2Fwww.baidu.com%2Flink%3Furl%3DRaFkqqESxpi2iDV4Q7Men69HaM9QOkW5KKUtQakjQzkfkygaOuGzJBFcHSg35wmfSKFA26xUDad7jHwCCv1ksa%26wd%3D%26eqid%3Da38e871500004846000000065fa11d78%22%2C%22initial_referrer_domain%22%3A%20%22www.baidu.com%22%2C%22%24recent_outside_referrer%22%3A%20%22www.baidu.com%22%7D; HexunTrack=SID=20200722150527074dded739ba5e24fd2915f1f692d4ad9c7&CITY=51&TOWN=510100; ADVS=392342a9ca40f5; ASL=18614,00rzr,abdfc0a27d461d18b68a5530; __jsluid_h=35f649169d5fb027d0e857b8ecd24d5b",
"Host": "webusstock.hermes.hexun.com",
"Pragma": "no-cache",
"Referer": "http://stockdata.stock.hexun.com/",
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/87.0.4280.88 Safari/537.36",
"X-Requested-With": "ShockwaveFlash/33.0.0.432",
}
r = requests.get(url, params=params, headers=headers)
r.encoding = "utf-8"
data_dict = json.loads(r.text[1:-2])
data_df = pd.DataFrame(
data_dict["Data"][0],
columns=[list(item.values())[0] for item in data_dict["KLine"]],
)
data_df['时间'] = data_df['时间'].astype(str).str.slice(0, 8)
data_df['前收盘价'] = round(data_df['前收盘价'] / 100, 2)
data_df['开盘价'] = round(data_df['开盘价'] / 100, 2)
data_df['收盘价'] = round(data_df['收盘价'] / 100, 2)
data_df['最高价'] = round(data_df['最高价'] / 100, 2)
data_df['最低价'] = round(data_df['最低价'] / 100, 2)
del data_df['成交额']
return data_df
|
https://github.com/akfamily/akshare/blob/087025d8d6f799b30ca114013e82c1ad22dc9294/project25/akshare/stock/stock_us_zh_hx.py#L67-L108
| 25 |
[
0,
1,
2,
3,
4,
5,
6
] | 16.666667 |
[
7,
8,
14,
27,
28,
29,
30,
34,
35,
36,
37,
38,
39,
40,
41
] | 35.714286 | false | 20 | 42 | 2 | 64.285714 | 4 |
def stock_us_zh_daily(symbol: str = 'BABA') -> pd.DataFrame:
url = "http://webusstock.hermes.hexun.com/usa/kline"
params = {
"code": f"NYSE{symbol}",
"start": "20201218223000",
"number": "-1000",
"type": "5",
}
headers = {
"Accept": "*/*",
"Accept-Encoding": "gzip, deflate",
"Accept-Language": "zh-CN,zh;q=0.9,en;q=0.8",
"Cache-Control": "no-cache",
"Connection": "keep-alive",
"Cookie": "UM_distinctid=1758d5cddd49c-0fc7e2c2612624-303464-1fa400-1758d5cddd592f; ADVC=38ffe1fbf97465; cn_1263247791_dplus=%7B%22distinct_id%22%3A%20%221758d5cddd49c-0fc7e2c2612624-303464-1fa400-1758d5cddd592f%22%2C%22userFirstDate%22%3A%20%2220201103%22%2C%22userID%22%3A%20%220%22%2C%22userName%22%3A%20%22%22%2C%22userType%22%3A%20%22loginuser%22%2C%22userLoginDate%22%3A%20%2220201103%22%2C%22%24_sessionid%22%3A%200%2C%22%24_sessionTime%22%3A%201604394498%2C%22%24dp%22%3A%200%2C%22%24_sessionPVTime%22%3A%201604394498%2C%22initial_view_time%22%3A%20%221604392649%22%2C%22initial_referrer%22%3A%20%22https%3A%2F%2Fwww.baidu.com%2Flink%3Furl%3DRaFkqqESxpi2iDV4Q7Men69HaM9QOkW5KKUtQakjQzkfkygaOuGzJBFcHSg35wmfSKFA26xUDad7jHwCCv1ksa%26wd%3D%26eqid%3Da38e871500004846000000065fa11d78%22%2C%22initial_referrer_domain%22%3A%20%22www.baidu.com%22%2C%22%24recent_outside_referrer%22%3A%20%22www.baidu.com%22%7D; HexunTrack=SID=20200722150527074dded739ba5e24fd2915f1f692d4ad9c7&CITY=51&TOWN=510100; ADVS=392342a9ca40f5; ASL=18614,00rzr,abdfc0a27d461d18b68a5530; __jsluid_h=35f649169d5fb027d0e857b8ecd24d5b",
"Host": "webusstock.hermes.hexun.com",
"Pragma": "no-cache",
"Referer": "http://stockdata.stock.hexun.com/",
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/87.0.4280.88 Safari/537.36",
"X-Requested-With": "ShockwaveFlash/33.0.0.432",
}
r = requests.get(url, params=params, headers=headers)
r.encoding = "utf-8"
data_dict = json.loads(r.text[1:-2])
data_df = pd.DataFrame(
data_dict["Data"][0],
columns=[list(item.values())[0] for item in data_dict["KLine"]],
)
data_df['时间'] = data_df['时间'].astype(str).str.slice(0, 8)
data_df['前收盘价'] = round(data_df['前收盘价'] / 100, 2)
data_df['开盘价'] = round(data_df['开盘价'] / 100, 2)
data_df['收盘价'] = round(data_df['收盘价'] / 100, 2)
data_df['最高价'] = round(data_df['最高价'] / 100, 2)
data_df['最低价'] = round(data_df['最低价'] / 100, 2)
del data_df['成交额']
return data_df
| 18,785 |
akfamily/akshare
|
087025d8d6f799b30ca114013e82c1ad22dc9294
|
akshare/stock/stock_zh_a_special.py
|
stock_zh_a_st_em
|
()
|
return temp_df
|
东方财富网-行情中心-沪深个股-风险警示板
http://quote.eastmoney.com/center/gridlist.html#st_board
:return: 风险警示板
:rtype: pandas.DataFrame
|
东方财富网-行情中心-沪深个股-风险警示板
http://quote.eastmoney.com/center/gridlist.html#st_board
:return: 风险警示板
:rtype: pandas.DataFrame
| 17 | 106 |
def stock_zh_a_st_em() -> pd.DataFrame:
"""
东方财富网-行情中心-沪深个股-风险警示板
http://quote.eastmoney.com/center/gridlist.html#st_board
:return: 风险警示板
:rtype: pandas.DataFrame
"""
url = 'http://40.push2.eastmoney.com/api/qt/clist/get'
params = {
'pn': '1',
'pz': '2000',
'po': '1',
'np': '1',
'ut': 'bd1d9ddb04089700cf9c27f6f7426281',
'fltt': '2',
'invt': '2',
'fid': 'f3',
'fs': 'm:0 f:4,m:1 f:4',
'fields': 'f2,f3,f4,f5,f6,f7,f8,f9,f10,f12,f13,f14,f15,f16,f17,f18,f20,f21,f23,f24,f25,f22,f11,f62,f128,f136,f115,f152',
'_': '1631107510188',
}
r = requests.get(url, params=params)
data_json = r.json()
temp_df = pd.DataFrame(data_json['data']['diff'])
temp_df.reset_index(inplace=True)
temp_df['index'] = range(1, len(temp_df)+1)
temp_df.columns = [
'序号',
'最新价',
'涨跌幅',
'涨跌额',
'成交量',
'成交额',
'振幅',
'换手率',
'市盈率-动态',
'量比',
'_',
'代码',
'_',
'名称',
'最高',
'最低',
'今开',
'昨收',
'_',
'_',
'_',
'市净率',
'_',
'_',
'_',
'_',
'_',
'_',
'_',
'_',
'_',
]
temp_df = temp_df[[
'序号',
'代码',
'名称',
'最新价',
'涨跌幅',
'涨跌额',
'成交量',
'成交额',
'振幅',
'最高',
'最低',
'今开',
'昨收',
'量比',
'换手率',
'市盈率-动态',
'市净率',
]]
temp_df['最新价'] = pd.to_numeric(temp_df['最新价'], errors="coerce")
temp_df['涨跌幅'] = pd.to_numeric(temp_df['涨跌幅'], errors="coerce")
temp_df['涨跌额'] = pd.to_numeric(temp_df['涨跌额'], errors="coerce")
temp_df['成交量'] = pd.to_numeric(temp_df['成交量'], errors="coerce")
temp_df['成交额'] = pd.to_numeric(temp_df['成交额'], errors="coerce")
temp_df['振幅'] = pd.to_numeric(temp_df['振幅'], errors="coerce")
temp_df['最高'] = pd.to_numeric(temp_df['最高'], errors="coerce")
temp_df['最低'] = pd.to_numeric(temp_df['最低'], errors="coerce")
temp_df['今开'] = pd.to_numeric(temp_df['今开'], errors="coerce")
temp_df['量比'] = pd.to_numeric(temp_df['量比'], errors="coerce")
temp_df['换手率'] = pd.to_numeric(temp_df['换手率'], errors="coerce")
return temp_df
|
https://github.com/akfamily/akshare/blob/087025d8d6f799b30ca114013e82c1ad22dc9294/project25/akshare/stock/stock_zh_a_special.py#L17-L106
| 25 |
[
0,
1,
2,
3,
4,
5,
6
] | 7.777778 |
[
7,
8,
21,
22,
23,
24,
25,
26,
59,
78,
79,
80,
81,
82,
83,
84,
85,
86,
87,
88,
89
] | 23.333333 | false | 9.183673 | 90 | 1 | 76.666667 | 4 |
def stock_zh_a_st_em() -> pd.DataFrame:
url = 'http://40.push2.eastmoney.com/api/qt/clist/get'
params = {
'pn': '1',
'pz': '2000',
'po': '1',
'np': '1',
'ut': 'bd1d9ddb04089700cf9c27f6f7426281',
'fltt': '2',
'invt': '2',
'fid': 'f3',
'fs': 'm:0 f:4,m:1 f:4',
'fields': 'f2,f3,f4,f5,f6,f7,f8,f9,f10,f12,f13,f14,f15,f16,f17,f18,f20,f21,f23,f24,f25,f22,f11,f62,f128,f136,f115,f152',
'_': '1631107510188',
}
r = requests.get(url, params=params)
data_json = r.json()
temp_df = pd.DataFrame(data_json['data']['diff'])
temp_df.reset_index(inplace=True)
temp_df['index'] = range(1, len(temp_df)+1)
temp_df.columns = [
'序号',
'最新价',
'涨跌幅',
'涨跌额',
'成交量',
'成交额',
'振幅',
'换手率',
'市盈率-动态',
'量比',
'_',
'代码',
'_',
'名称',
'最高',
'最低',
'今开',
'昨收',
'_',
'_',
'_',
'市净率',
'_',
'_',
'_',
'_',
'_',
'_',
'_',
'_',
'_',
]
temp_df = temp_df[[
'序号',
'代码',
'名称',
'最新价',
'涨跌幅',
'涨跌额',
'成交量',
'成交额',
'振幅',
'最高',
'最低',
'今开',
'昨收',
'量比',
'换手率',
'市盈率-动态',
'市净率',
]]
temp_df['最新价'] = pd.to_numeric(temp_df['最新价'], errors="coerce")
temp_df['涨跌幅'] = pd.to_numeric(temp_df['涨跌幅'], errors="coerce")
temp_df['涨跌额'] = pd.to_numeric(temp_df['涨跌额'], errors="coerce")
temp_df['成交量'] = pd.to_numeric(temp_df['成交量'], errors="coerce")
temp_df['成交额'] = pd.to_numeric(temp_df['成交额'], errors="coerce")
temp_df['振幅'] = pd.to_numeric(temp_df['振幅'], errors="coerce")
temp_df['最高'] = pd.to_numeric(temp_df['最高'], errors="coerce")
temp_df['最低'] = pd.to_numeric(temp_df['最低'], errors="coerce")
temp_df['今开'] = pd.to_numeric(temp_df['今开'], errors="coerce")
temp_df['量比'] = pd.to_numeric(temp_df['量比'], errors="coerce")
temp_df['换手率'] = pd.to_numeric(temp_df['换手率'], errors="coerce")
return temp_df
| 18,786 |
akfamily/akshare
|
087025d8d6f799b30ca114013e82c1ad22dc9294
|
akshare/stock/stock_zh_a_special.py
|
stock_zh_a_new_em
|
()
|
return temp_df
|
东方财富网-行情中心-沪深个股-新股
http://quote.eastmoney.com/center/gridlist.html#newshares
:return: 新股
:rtype: pandas.DataFrame
|
东方财富网-行情中心-沪深个股-新股
http://quote.eastmoney.com/center/gridlist.html#newshares
:return: 新股
:rtype: pandas.DataFrame
| 109 | 198 |
def stock_zh_a_new_em() -> pd.DataFrame:
"""
东方财富网-行情中心-沪深个股-新股
http://quote.eastmoney.com/center/gridlist.html#newshares
:return: 新股
:rtype: pandas.DataFrame
"""
url = 'http://40.push2.eastmoney.com/api/qt/clist/get'
params = {
'pn': '1',
'pz': '2000',
'po': '1',
'np': '1',
'ut': 'bd1d9ddb04089700cf9c27f6f7426281',
'fltt': '2',
'invt': '2',
'fid': 'f26',
'fs': 'm:0 f:8,m:1 f:8',
'fields': 'f2,f3,f4,f5,f6,f7,f8,f9,f10,f12,f13,f14,f15,f16,f17,f18,f20,f21,f23,f24,f25,f22,f11,f62,f128,f136,f115,f152',
'_': '1631107510188',
}
r = requests.get(url, params=params)
data_json = r.json()
temp_df = pd.DataFrame(data_json['data']['diff'])
temp_df.reset_index(inplace=True)
temp_df['index'] = range(1, len(temp_df)+1)
temp_df.columns = [
'序号',
'最新价',
'涨跌幅',
'涨跌额',
'成交量',
'成交额',
'振幅',
'换手率',
'市盈率-动态',
'量比',
'_',
'代码',
'_',
'名称',
'最高',
'最低',
'今开',
'昨收',
'_',
'_',
'_',
'市净率',
'_',
'_',
'_',
'_',
'_',
'_',
'_',
'_',
'_',
]
temp_df = temp_df[[
'序号',
'代码',
'名称',
'最新价',
'涨跌幅',
'涨跌额',
'成交量',
'成交额',
'振幅',
'最高',
'最低',
'今开',
'昨收',
'量比',
'换手率',
'市盈率-动态',
'市净率',
]]
temp_df['最新价'] = pd.to_numeric(temp_df['最新价'], errors="coerce")
temp_df['涨跌幅'] = pd.to_numeric(temp_df['涨跌幅'], errors="coerce")
temp_df['涨跌额'] = pd.to_numeric(temp_df['涨跌额'], errors="coerce")
temp_df['成交量'] = pd.to_numeric(temp_df['成交量'], errors="coerce")
temp_df['成交额'] = pd.to_numeric(temp_df['成交额'], errors="coerce")
temp_df['振幅'] = pd.to_numeric(temp_df['振幅'], errors="coerce")
temp_df['最高'] = pd.to_numeric(temp_df['最高'], errors="coerce")
temp_df['最低'] = pd.to_numeric(temp_df['最低'], errors="coerce")
temp_df['今开'] = pd.to_numeric(temp_df['今开'], errors="coerce")
temp_df['量比'] = pd.to_numeric(temp_df['量比'], errors="coerce")
temp_df['换手率'] = pd.to_numeric(temp_df['换手率'], errors="coerce")
return temp_df
|
https://github.com/akfamily/akshare/blob/087025d8d6f799b30ca114013e82c1ad22dc9294/project25/akshare/stock/stock_zh_a_special.py#L109-L198
| 25 |
[
0,
1,
2,
3,
4,
5,
6
] | 7.777778 |
[
7,
8,
21,
22,
23,
24,
25,
26,
59,
78,
79,
80,
81,
82,
83,
84,
85,
86,
87,
88,
89
] | 23.333333 | false | 9.183673 | 90 | 1 | 76.666667 | 4 |
def stock_zh_a_new_em() -> pd.DataFrame:
url = 'http://40.push2.eastmoney.com/api/qt/clist/get'
params = {
'pn': '1',
'pz': '2000',
'po': '1',
'np': '1',
'ut': 'bd1d9ddb04089700cf9c27f6f7426281',
'fltt': '2',
'invt': '2',
'fid': 'f26',
'fs': 'm:0 f:8,m:1 f:8',
'fields': 'f2,f3,f4,f5,f6,f7,f8,f9,f10,f12,f13,f14,f15,f16,f17,f18,f20,f21,f23,f24,f25,f22,f11,f62,f128,f136,f115,f152',
'_': '1631107510188',
}
r = requests.get(url, params=params)
data_json = r.json()
temp_df = pd.DataFrame(data_json['data']['diff'])
temp_df.reset_index(inplace=True)
temp_df['index'] = range(1, len(temp_df)+1)
temp_df.columns = [
'序号',
'最新价',
'涨跌幅',
'涨跌额',
'成交量',
'成交额',
'振幅',
'换手率',
'市盈率-动态',
'量比',
'_',
'代码',
'_',
'名称',
'最高',
'最低',
'今开',
'昨收',
'_',
'_',
'_',
'市净率',
'_',
'_',
'_',
'_',
'_',
'_',
'_',
'_',
'_',
]
temp_df = temp_df[[
'序号',
'代码',
'名称',
'最新价',
'涨跌幅',
'涨跌额',
'成交量',
'成交额',
'振幅',
'最高',
'最低',
'今开',
'昨收',
'量比',
'换手率',
'市盈率-动态',
'市净率',
]]
temp_df['最新价'] = pd.to_numeric(temp_df['最新价'], errors="coerce")
temp_df['涨跌幅'] = pd.to_numeric(temp_df['涨跌幅'], errors="coerce")
temp_df['涨跌额'] = pd.to_numeric(temp_df['涨跌额'], errors="coerce")
temp_df['成交量'] = pd.to_numeric(temp_df['成交量'], errors="coerce")
temp_df['成交额'] = pd.to_numeric(temp_df['成交额'], errors="coerce")
temp_df['振幅'] = pd.to_numeric(temp_df['振幅'], errors="coerce")
temp_df['最高'] = pd.to_numeric(temp_df['最高'], errors="coerce")
temp_df['最低'] = pd.to_numeric(temp_df['最低'], errors="coerce")
temp_df['今开'] = pd.to_numeric(temp_df['今开'], errors="coerce")
temp_df['量比'] = pd.to_numeric(temp_df['量比'], errors="coerce")
temp_df['换手率'] = pd.to_numeric(temp_df['换手率'], errors="coerce")
return temp_df
| 18,787 |
akfamily/akshare
|
087025d8d6f799b30ca114013e82c1ad22dc9294
|
akshare/stock/stock_zh_a_special.py
|
stock_zh_a_stop_em
|
()
|
return temp_df
|
东方财富网-行情中心-沪深个股-两网及退市
http://quote.eastmoney.com/center/gridlist.html#staq_net_board
:return: 两网及退市
:rtype: pandas.DataFrame
|
东方财富网-行情中心-沪深个股-两网及退市
http://quote.eastmoney.com/center/gridlist.html#staq_net_board
:return: 两网及退市
:rtype: pandas.DataFrame
| 201 | 290 |
def stock_zh_a_stop_em() -> pd.DataFrame:
"""
东方财富网-行情中心-沪深个股-两网及退市
http://quote.eastmoney.com/center/gridlist.html#staq_net_board
:return: 两网及退市
:rtype: pandas.DataFrame
"""
url = 'http://40.push2.eastmoney.com/api/qt/clist/get'
params = {
'pn': '1',
'pz': '2000',
'po': '1',
'np': '1',
'ut': 'bd1d9ddb04089700cf9c27f6f7426281',
'fltt': '2',
'invt': '2',
'fid': 'f3',
'fs': 'm:0 s:3',
'fields': 'f2,f3,f4,f5,f6,f7,f8,f9,f10,f12,f13,f14,f15,f16,f17,f18,f20,f21,f23,f24,f25,f22,f11,f62,f128,f136,f115,f152',
'_': '1631107510188',
}
r = requests.get(url, params=params)
data_json = r.json()
temp_df = pd.DataFrame(data_json['data']['diff'])
temp_df.reset_index(inplace=True)
temp_df['index'] = range(1, len(temp_df)+1)
temp_df.columns = [
'序号',
'最新价',
'涨跌幅',
'涨跌额',
'成交量',
'成交额',
'振幅',
'换手率',
'市盈率-动态',
'量比',
'_',
'代码',
'_',
'名称',
'最高',
'最低',
'今开',
'昨收',
'_',
'_',
'_',
'市净率',
'_',
'_',
'_',
'_',
'_',
'_',
'_',
'_',
'_',
]
temp_df = temp_df[[
'序号',
'代码',
'名称',
'最新价',
'涨跌幅',
'涨跌额',
'成交量',
'成交额',
'振幅',
'最高',
'最低',
'今开',
'昨收',
'量比',
'换手率',
'市盈率-动态',
'市净率',
]]
temp_df['最新价'] = pd.to_numeric(temp_df['最新价'], errors="coerce")
temp_df['涨跌幅'] = pd.to_numeric(temp_df['涨跌幅'], errors="coerce")
temp_df['涨跌额'] = pd.to_numeric(temp_df['涨跌额'], errors="coerce")
temp_df['成交量'] = pd.to_numeric(temp_df['成交量'], errors="coerce")
temp_df['成交额'] = pd.to_numeric(temp_df['成交额'], errors="coerce")
temp_df['振幅'] = pd.to_numeric(temp_df['振幅'], errors="coerce")
temp_df['最高'] = pd.to_numeric(temp_df['最高'], errors="coerce")
temp_df['最低'] = pd.to_numeric(temp_df['最低'], errors="coerce")
temp_df['今开'] = pd.to_numeric(temp_df['今开'], errors="coerce")
temp_df['量比'] = pd.to_numeric(temp_df['量比'], errors="coerce")
temp_df['换手率'] = pd.to_numeric(temp_df['换手率'], errors="coerce")
return temp_df
|
https://github.com/akfamily/akshare/blob/087025d8d6f799b30ca114013e82c1ad22dc9294/project25/akshare/stock/stock_zh_a_special.py#L201-L290
| 25 |
[
0,
1,
2,
3,
4,
5,
6
] | 7.777778 |
[
7,
8,
21,
22,
23,
24,
25,
26,
59,
78,
79,
80,
81,
82,
83,
84,
85,
86,
87,
88,
89
] | 23.333333 | false | 9.183673 | 90 | 1 | 76.666667 | 4 |
def stock_zh_a_stop_em() -> pd.DataFrame:
url = 'http://40.push2.eastmoney.com/api/qt/clist/get'
params = {
'pn': '1',
'pz': '2000',
'po': '1',
'np': '1',
'ut': 'bd1d9ddb04089700cf9c27f6f7426281',
'fltt': '2',
'invt': '2',
'fid': 'f3',
'fs': 'm:0 s:3',
'fields': 'f2,f3,f4,f5,f6,f7,f8,f9,f10,f12,f13,f14,f15,f16,f17,f18,f20,f21,f23,f24,f25,f22,f11,f62,f128,f136,f115,f152',
'_': '1631107510188',
}
r = requests.get(url, params=params)
data_json = r.json()
temp_df = pd.DataFrame(data_json['data']['diff'])
temp_df.reset_index(inplace=True)
temp_df['index'] = range(1, len(temp_df)+1)
temp_df.columns = [
'序号',
'最新价',
'涨跌幅',
'涨跌额',
'成交量',
'成交额',
'振幅',
'换手率',
'市盈率-动态',
'量比',
'_',
'代码',
'_',
'名称',
'最高',
'最低',
'今开',
'昨收',
'_',
'_',
'_',
'市净率',
'_',
'_',
'_',
'_',
'_',
'_',
'_',
'_',
'_',
]
temp_df = temp_df[[
'序号',
'代码',
'名称',
'最新价',
'涨跌幅',
'涨跌额',
'成交量',
'成交额',
'振幅',
'最高',
'最低',
'今开',
'昨收',
'量比',
'换手率',
'市盈率-动态',
'市净率',
]]
temp_df['最新价'] = pd.to_numeric(temp_df['最新价'], errors="coerce")
temp_df['涨跌幅'] = pd.to_numeric(temp_df['涨跌幅'], errors="coerce")
temp_df['涨跌额'] = pd.to_numeric(temp_df['涨跌额'], errors="coerce")
temp_df['成交量'] = pd.to_numeric(temp_df['成交量'], errors="coerce")
temp_df['成交额'] = pd.to_numeric(temp_df['成交额'], errors="coerce")
temp_df['振幅'] = pd.to_numeric(temp_df['振幅'], errors="coerce")
temp_df['最高'] = pd.to_numeric(temp_df['最高'], errors="coerce")
temp_df['最低'] = pd.to_numeric(temp_df['最低'], errors="coerce")
temp_df['今开'] = pd.to_numeric(temp_df['今开'], errors="coerce")
temp_df['量比'] = pd.to_numeric(temp_df['量比'], errors="coerce")
temp_df['换手率'] = pd.to_numeric(temp_df['换手率'], errors="coerce")
return temp_df
| 18,788 |
akfamily/akshare
|
087025d8d6f799b30ca114013e82c1ad22dc9294
|
akshare/stock/stock_zh_a_special.py
|
stock_zh_a_new
|
()
|
return big_df
|
新浪财经-行情中心-沪深股市-次新股
http://vip.stock.finance.sina.com.cn/mkt/#new_stock
:return: 次新股行情数据
:rtype: pandas.DataFrame
|
新浪财经-行情中心-沪深股市-次新股
http://vip.stock.finance.sina.com.cn/mkt/#new_stock
:return: 次新股行情数据
:rtype: pandas.DataFrame
| 293 | 338 |
def stock_zh_a_new() -> pd.DataFrame:
"""
新浪财经-行情中心-沪深股市-次新股
http://vip.stock.finance.sina.com.cn/mkt/#new_stock
:return: 次新股行情数据
:rtype: pandas.DataFrame
"""
url = "http://vip.stock.finance.sina.com.cn/quotes_service/api/json_v2.php/Market_Center.getHQNodeStockCount"
params = {"node": "new_stock"}
r = requests.get(url, params=params)
total_page = math.ceil(int(r.json()) / 80)
url = "http://vip.stock.finance.sina.com.cn/quotes_service/api/json_v2.php/Market_Center.getHQNodeData"
big_df = pd.DataFrame()
for page in range(1, total_page + 1):
params = {
"page": str(page),
"num": "80",
"sort": "symbol",
"asc": "1",
"node": "new_stock",
"symbol": "",
"_s_r_a": "page",
}
r = requests.get(url, params=params)
r.encoding = "gb2312"
data_json = r.json()
temp_df = pd.DataFrame(data_json)
big_df = pd.concat([big_df, temp_df], ignore_index=True)
big_df = big_df[
[
"symbol",
"code",
"name",
"open",
"high",
"low",
"volume",
"amount",
"mktcap",
"turnoverratio",
]
]
big_df['open'] = pd.to_numeric(big_df['open'])
big_df['high'] = pd.to_numeric(big_df['high'])
big_df['low'] = pd.to_numeric(big_df['low'])
return big_df
|
https://github.com/akfamily/akshare/blob/087025d8d6f799b30ca114013e82c1ad22dc9294/project25/akshare/stock/stock_zh_a_special.py#L293-L338
| 25 |
[
0,
1,
2,
3,
4,
5,
6
] | 15.217391 |
[
7,
8,
9,
10,
11,
12,
13,
14,
23,
24,
25,
26,
27,
28,
42,
43,
44,
45
] | 39.130435 | false | 9.183673 | 46 | 2 | 60.869565 | 4 |
def stock_zh_a_new() -> pd.DataFrame:
url = "http://vip.stock.finance.sina.com.cn/quotes_service/api/json_v2.php/Market_Center.getHQNodeStockCount"
params = {"node": "new_stock"}
r = requests.get(url, params=params)
total_page = math.ceil(int(r.json()) / 80)
url = "http://vip.stock.finance.sina.com.cn/quotes_service/api/json_v2.php/Market_Center.getHQNodeData"
big_df = pd.DataFrame()
for page in range(1, total_page + 1):
params = {
"page": str(page),
"num": "80",
"sort": "symbol",
"asc": "1",
"node": "new_stock",
"symbol": "",
"_s_r_a": "page",
}
r = requests.get(url, params=params)
r.encoding = "gb2312"
data_json = r.json()
temp_df = pd.DataFrame(data_json)
big_df = pd.concat([big_df, temp_df], ignore_index=True)
big_df = big_df[
[
"symbol",
"code",
"name",
"open",
"high",
"low",
"volume",
"amount",
"mktcap",
"turnoverratio",
]
]
big_df['open'] = pd.to_numeric(big_df['open'])
big_df['high'] = pd.to_numeric(big_df['high'])
big_df['low'] = pd.to_numeric(big_df['low'])
return big_df
| 18,789 |
akfamily/akshare
|
087025d8d6f799b30ca114013e82c1ad22dc9294
|
akshare/stock/stock_dzjy_em.py
|
stock_dzjy_sctj
|
()
|
return big_df
|
东方财富网-数据中心-大宗交易-市场统计
http://data.eastmoney.com/dzjy/dzjy_sctj.aspx
:return: 市场统计表
:rtype: pandas.DataFrame
|
东方财富网-数据中心-大宗交易-市场统计
http://data.eastmoney.com/dzjy/dzjy_sctj.aspx
:return: 市场统计表
:rtype: pandas.DataFrame
| 12 | 61 |
def stock_dzjy_sctj() -> pd.DataFrame:
"""
东方财富网-数据中心-大宗交易-市场统计
http://data.eastmoney.com/dzjy/dzjy_sctj.aspx
:return: 市场统计表
:rtype: pandas.DataFrame
"""
url = "https://datacenter-web.eastmoney.com/api/data/v1/get"
params = {
'sortColumns': 'TRADE_DATE',
'sortTypes': '-1',
'pageSize': '500',
'pageNumber': '1',
'reportName': 'PRT_BLOCKTRADE_MARKET_STA',
'columns': 'TRADE_DATE,SZ_INDEX,SZ_CHANGE_RATE,BLOCKTRADE_DEAL_AMT,PREMIUM_DEAL_AMT,PREMIUM_RATIO,DISCOUNT_DEAL_AMT,DISCOUNT_RATIO',
'source': 'WEB',
'client': 'WEB',
}
r = requests.get(url, params=params)
data_json = r.json()
total_page = int(data_json['result']["pages"])
big_df = pd.DataFrame()
for page in range(1, total_page+1):
params.update({'pageNumber': page})
r = requests.get(url, params=params)
data_json = r.json()
temp_df = pd.DataFrame(data_json['result']["data"])
big_df = pd.concat([big_df, temp_df], ignore_index=True)
big_df.reset_index(inplace=True)
big_df['index'] = big_df['index'] + 1
big_df.columns = [
"序号",
"交易日期",
"上证指数",
"上证指数涨跌幅",
"大宗交易成交总额",
"溢价成交总额",
"溢价成交总额占比",
"折价成交总额",
"折价成交总额占比",
]
big_df["交易日期"] = pd.to_datetime(big_df["交易日期"]).dt.date
big_df["上证指数"] = pd.to_numeric(big_df["上证指数"])
big_df["上证指数涨跌幅"] = pd.to_numeric(big_df["上证指数涨跌幅"])
big_df["大宗交易成交总额"] = pd.to_numeric(big_df["大宗交易成交总额"])
big_df["溢价成交总额"] = pd.to_numeric(big_df["溢价成交总额"])
big_df["溢价成交总额占比"] = pd.to_numeric(big_df["溢价成交总额占比"])
big_df["折价成交总额"] = pd.to_numeric(big_df["折价成交总额"])
big_df["折价成交总额占比"] = pd.to_numeric(big_df["折价成交总额占比"])
return big_df
|
https://github.com/akfamily/akshare/blob/087025d8d6f799b30ca114013e82c1ad22dc9294/project25/akshare/stock/stock_dzjy_em.py#L12-L61
| 25 |
[
0,
1,
2,
3,
4,
5,
6
] | 14 |
[
7,
8,
18,
19,
20,
21,
22,
23,
24,
25,
26,
27,
28,
29,
30,
41,
42,
43,
44,
45,
46,
47,
48,
49
] | 48 | false | 5.586592 | 50 | 2 | 52 | 4 |
def stock_dzjy_sctj() -> pd.DataFrame:
url = "https://datacenter-web.eastmoney.com/api/data/v1/get"
params = {
'sortColumns': 'TRADE_DATE',
'sortTypes': '-1',
'pageSize': '500',
'pageNumber': '1',
'reportName': 'PRT_BLOCKTRADE_MARKET_STA',
'columns': 'TRADE_DATE,SZ_INDEX,SZ_CHANGE_RATE,BLOCKTRADE_DEAL_AMT,PREMIUM_DEAL_AMT,PREMIUM_RATIO,DISCOUNT_DEAL_AMT,DISCOUNT_RATIO',
'source': 'WEB',
'client': 'WEB',
}
r = requests.get(url, params=params)
data_json = r.json()
total_page = int(data_json['result']["pages"])
big_df = pd.DataFrame()
for page in range(1, total_page+1):
params.update({'pageNumber': page})
r = requests.get(url, params=params)
data_json = r.json()
temp_df = pd.DataFrame(data_json['result']["data"])
big_df = pd.concat([big_df, temp_df], ignore_index=True)
big_df.reset_index(inplace=True)
big_df['index'] = big_df['index'] + 1
big_df.columns = [
"序号",
"交易日期",
"上证指数",
"上证指数涨跌幅",
"大宗交易成交总额",
"溢价成交总额",
"溢价成交总额占比",
"折价成交总额",
"折价成交总额占比",
]
big_df["交易日期"] = pd.to_datetime(big_df["交易日期"]).dt.date
big_df["上证指数"] = pd.to_numeric(big_df["上证指数"])
big_df["上证指数涨跌幅"] = pd.to_numeric(big_df["上证指数涨跌幅"])
big_df["大宗交易成交总额"] = pd.to_numeric(big_df["大宗交易成交总额"])
big_df["溢价成交总额"] = pd.to_numeric(big_df["溢价成交总额"])
big_df["溢价成交总额占比"] = pd.to_numeric(big_df["溢价成交总额占比"])
big_df["折价成交总额"] = pd.to_numeric(big_df["折价成交总额"])
big_df["折价成交总额占比"] = pd.to_numeric(big_df["折价成交总额占比"])
return big_df
| 18,790 |
akfamily/akshare
|
087025d8d6f799b30ca114013e82c1ad22dc9294
|
akshare/stock/stock_dzjy_em.py
|
stock_dzjy_mrmx
|
(symbol: str = '基金', start_date: str = '20220104', end_date: str = '20220104') ->
|
return temp_df
|
东方财富网-数据中心-大宗交易-每日明细
http://data.eastmoney.com/dzjy/dzjy_mrmxa.aspx
:param symbol: choice of {'A股', 'B股', '基金', '债券'}
:type symbol: str
:param start_date: 开始日期
:type start_date: str
:param end_date: 结束日期
:type end_date: str
:return: 每日明细
:rtype: pandas.DataFrame
|
东方财富网-数据中心-大宗交易-每日明细
http://data.eastmoney.com/dzjy/dzjy_mrmxa.aspx
:param symbol: choice of {'A股', 'B股', '基金', '债券'}
:type symbol: str
:param start_date: 开始日期
:type start_date: str
:param end_date: 结束日期
:type end_date: str
:return: 每日明细
:rtype: pandas.DataFrame
| 64 | 186 |
def stock_dzjy_mrmx(symbol: str = '基金', start_date: str = '20220104', end_date: str = '20220104') -> pd.DataFrame:
"""
东方财富网-数据中心-大宗交易-每日明细
http://data.eastmoney.com/dzjy/dzjy_mrmxa.aspx
:param symbol: choice of {'A股', 'B股', '基金', '债券'}
:type symbol: str
:param start_date: 开始日期
:type start_date: str
:param end_date: 结束日期
:type end_date: str
:return: 每日明细
:rtype: pandas.DataFrame
"""
symbol_map = {
'A股': '1',
'B股': '2',
'基金': '3',
'债券': '4',
}
url = "https://datacenter-web.eastmoney.com/api/data/v1/get"
params = {
'sortColumns': 'SECURITY_CODE',
'sortTypes': '1',
'pageSize': '5000',
'pageNumber': '1',
'reportName': 'RPT_DATA_BLOCKTRADE',
'columns': 'TRADE_DATE,SECURITY_CODE,SECUCODE,SECURITY_NAME_ABBR,CHANGE_RATE,CLOSE_PRICE,DEAL_PRICE,PREMIUM_RATIO,DEAL_VOLUME,DEAL_AMT,TURNOVER_RATE,BUYER_NAME,SELLER_NAME,CHANGE_RATE_1DAYS,CHANGE_RATE_5DAYS,CHANGE_RATE_10DAYS,CHANGE_RATE_20DAYS,BUYER_CODE,SELLER_CODE',
'source': 'WEB',
'client': 'WEB',
'filter': f"""(SECURITY_TYPE_WEB={symbol_map[symbol]})(TRADE_DATE>='{'-'.join([start_date[:4], start_date[4:6], start_date[6:]])}')(TRADE_DATE<='{'-'.join([end_date[:4], end_date[4:6], end_date[6:]])}')"""
}
r = requests.get(url, params=params)
data_json = r.json()
if not data_json['result']["data"]:
return pd.DataFrame()
temp_df = pd.DataFrame(data_json['result']["data"])
temp_df.reset_index(inplace=True)
temp_df['index'] = temp_df.index + 1
if symbol in {'A股'}:
temp_df.columns = [
"序号",
"交易日期",
"证券代码",
"-",
"证券简称",
"涨跌幅",
"收盘价",
"成交价",
"折溢率",
"成交量",
"成交额",
"成交额/流通市值",
"买方营业部",
"卖方营业部",
"_",
"_",
"_",
"_",
"_",
"_",
]
temp_df["交易日期"] = pd.to_datetime(temp_df["交易日期"]).dt.date
temp_df = temp_df[[
"序号",
"交易日期",
"证券代码",
"证券简称",
"涨跌幅",
"收盘价",
"成交价",
"折溢率",
"成交量",
"成交额",
"成交额/流通市值",
"买方营业部",
"卖方营业部",
]]
temp_df['涨跌幅'] = pd.to_numeric(temp_df['涨跌幅'])
temp_df['收盘价'] = pd.to_numeric(temp_df['收盘价'])
temp_df['成交价'] = pd.to_numeric(temp_df['成交价'])
temp_df['折溢率'] = pd.to_numeric(temp_df['折溢率'])
temp_df['成交量'] = pd.to_numeric(temp_df['成交量'])
temp_df['成交额'] = pd.to_numeric(temp_df['成交额'])
temp_df['成交额/流通市值'] = pd.to_numeric(temp_df['成交额/流通市值'])
if symbol in {'B股', '基金', '债券'}:
temp_df.columns = [
"序号",
"交易日期",
"证券代码",
"-",
"证券简称",
"-",
"-",
"成交价",
"-",
"成交量",
"成交额",
"-",
"买方营业部",
"卖方营业部",
"_",
"_",
"_",
"_",
"_",
"_",
]
temp_df["交易日期"] = pd.to_datetime(temp_df["交易日期"]).dt.date
temp_df = temp_df[[
"序号",
"交易日期",
"证券代码",
"证券简称",
"成交价",
"成交量",
"成交额",
"买方营业部",
"卖方营业部",
]]
temp_df['成交价'] = pd.to_numeric(temp_df['成交价'])
temp_df['成交量'] = pd.to_numeric(temp_df['成交量'])
temp_df['成交额'] = pd.to_numeric(temp_df['成交额'])
return temp_df
|
https://github.com/akfamily/akshare/blob/087025d8d6f799b30ca114013e82c1ad22dc9294/project25/akshare/stock/stock_dzjy_em.py#L64-L186
| 25 |
[
0,
1,
2,
3,
4,
5,
6,
7,
8,
9,
10,
11,
12
] | 10.569106 |
[
13,
19,
20,
31,
32,
33,
34,
35,
36,
37,
38,
39,
61,
62,
77,
78,
79,
80,
81,
82,
83,
84,
85,
107,
108,
119,
120,
121,
122
] | 23.577236 | false | 5.586592 | 123 | 4 | 76.422764 | 10 |
def stock_dzjy_mrmx(symbol: str = '基金', start_date: str = '20220104', end_date: str = '20220104') -> pd.DataFrame:
symbol_map = {
'A股': '1',
'B股': '2',
'基金': '3',
'债券': '4',
}
url = "https://datacenter-web.eastmoney.com/api/data/v1/get"
params = {
'sortColumns': 'SECURITY_CODE',
'sortTypes': '1',
'pageSize': '5000',
'pageNumber': '1',
'reportName': 'RPT_DATA_BLOCKTRADE',
'columns': 'TRADE_DATE,SECURITY_CODE,SECUCODE,SECURITY_NAME_ABBR,CHANGE_RATE,CLOSE_PRICE,DEAL_PRICE,PREMIUM_RATIO,DEAL_VOLUME,DEAL_AMT,TURNOVER_RATE,BUYER_NAME,SELLER_NAME,CHANGE_RATE_1DAYS,CHANGE_RATE_5DAYS,CHANGE_RATE_10DAYS,CHANGE_RATE_20DAYS,BUYER_CODE,SELLER_CODE',
'source': 'WEB',
'client': 'WEB',
'filter': f"""(SECURITY_TYPE_WEB={symbol_map[symbol]})(TRADE_DATE>='{'-'.join([start_date[:4], start_date[4:6], start_date[6:]])}')(TRADE_DATE<='{'-'.join([end_date[:4], end_date[4:6], end_date[6:]])}')"""
}
r = requests.get(url, params=params)
data_json = r.json()
if not data_json['result']["data"]:
return pd.DataFrame()
temp_df = pd.DataFrame(data_json['result']["data"])
temp_df.reset_index(inplace=True)
temp_df['index'] = temp_df.index + 1
if symbol in {'A股'}:
temp_df.columns = [
"序号",
"交易日期",
"证券代码",
"-",
"证券简称",
"涨跌幅",
"收盘价",
"成交价",
"折溢率",
"成交量",
"成交额",
"成交额/流通市值",
"买方营业部",
"卖方营业部",
"_",
"_",
"_",
"_",
"_",
"_",
]
temp_df["交易日期"] = pd.to_datetime(temp_df["交易日期"]).dt.date
temp_df = temp_df[[
"序号",
"交易日期",
"证券代码",
"证券简称",
"涨跌幅",
"收盘价",
"成交价",
"折溢率",
"成交量",
"成交额",
"成交额/流通市值",
"买方营业部",
"卖方营业部",
]]
temp_df['涨跌幅'] = pd.to_numeric(temp_df['涨跌幅'])
temp_df['收盘价'] = pd.to_numeric(temp_df['收盘价'])
temp_df['成交价'] = pd.to_numeric(temp_df['成交价'])
temp_df['折溢率'] = pd.to_numeric(temp_df['折溢率'])
temp_df['成交量'] = pd.to_numeric(temp_df['成交量'])
temp_df['成交额'] = pd.to_numeric(temp_df['成交额'])
temp_df['成交额/流通市值'] = pd.to_numeric(temp_df['成交额/流通市值'])
if symbol in {'B股', '基金', '债券'}:
temp_df.columns = [
"序号",
"交易日期",
"证券代码",
"-",
"证券简称",
"-",
"-",
"成交价",
"-",
"成交量",
"成交额",
"-",
"买方营业部",
"卖方营业部",
"_",
"_",
"_",
"_",
"_",
"_",
]
temp_df["交易日期"] = pd.to_datetime(temp_df["交易日期"]).dt.date
temp_df = temp_df[[
"序号",
"交易日期",
"证券代码",
"证券简称",
"成交价",
"成交量",
"成交额",
"买方营业部",
"卖方营业部",
]]
temp_df['成交价'] = pd.to_numeric(temp_df['成交价'])
temp_df['成交量'] = pd.to_numeric(temp_df['成交量'])
temp_df['成交额'] = pd.to_numeric(temp_df['成交额'])
return temp_df
| 18,791 |
akfamily/akshare
|
087025d8d6f799b30ca114013e82c1ad22dc9294
|
akshare/stock/stock_dzjy_em.py
|
stock_dzjy_mrtj
|
(start_date: str = '20220105', end_date: str = '20220105')
|
return temp_df
|
东方财富网-数据中心-大宗交易-每日统计
http://data.eastmoney.com/dzjy/dzjy_mrtj.aspx
:param start_date: 开始日期
:type start_date: str
:param end_date: 结束日期
:type end_date: str
:return: 每日统计
:rtype: pandas.DataFrame
|
东方财富网-数据中心-大宗交易-每日统计
http://data.eastmoney.com/dzjy/dzjy_mrtj.aspx
:param start_date: 开始日期
:type start_date: str
:param end_date: 结束日期
:type end_date: str
:return: 每日统计
:rtype: pandas.DataFrame
| 189 | 259 |
def stock_dzjy_mrtj(start_date: str = '20220105', end_date: str = '20220105') -> pd.DataFrame:
"""
东方财富网-数据中心-大宗交易-每日统计
http://data.eastmoney.com/dzjy/dzjy_mrtj.aspx
:param start_date: 开始日期
:type start_date: str
:param end_date: 结束日期
:type end_date: str
:return: 每日统计
:rtype: pandas.DataFrame
"""
url = "https://datacenter-web.eastmoney.com/api/data/v1/get"
params = {
'sortColumns': 'TURNOVERRATE',
'sortTypes': '-1',
'pageSize': '5000',
'pageNumber': '1',
'reportName': 'RPT_BLOCKTRADE_STA',
'columns': 'TRADE_DATE,SECURITY_CODE,SECUCODE,SECURITY_NAME_ABBR,CHANGE_RATE,CLOSE_PRICE,AVERAGE_PRICE,PREMIUM_RATIO,DEAL_NUM,VOLUME,DEAL_AMT,TURNOVERRATE,D1_CLOSE_ADJCHRATE,D5_CLOSE_ADJCHRATE,D10_CLOSE_ADJCHRATE,D20_CLOSE_ADJCHRATE',
'source': 'WEB',
'client': 'WEB',
'filter': f"(TRADE_DATE>='{'-'.join([start_date[:4], start_date[4:6], start_date[6:]])}')(TRADE_DATE<='{'-'.join([end_date[:4], end_date[4:6], end_date[6:]])}')"
}
r = requests.get(url, params=params)
data_json = r.json()
temp_df = pd.DataFrame(data_json['result']["data"])
temp_df.reset_index(inplace=True)
temp_df['index'] = temp_df.index + 1
temp_df.columns = [
"序号",
"交易日期",
"证券代码",
"-",
"证券简称",
"涨跌幅",
"收盘价",
"成交价",
"折溢率",
"成交笔数",
"成交总量",
"成交总额",
"成交总额/流通市值",
"_",
"_",
"_",
"_",
]
temp_df["交易日期"] = pd.to_datetime(temp_df["交易日期"]).dt.date
temp_df = temp_df[[
"序号",
"交易日期",
"证券代码",
"证券简称",
"涨跌幅",
"收盘价",
"成交价",
"折溢率",
"成交笔数",
"成交总量",
"成交总额",
"成交总额/流通市值",
]]
temp_df['涨跌幅'] = pd.to_numeric(temp_df['涨跌幅'])
temp_df['收盘价'] = pd.to_numeric(temp_df['收盘价'])
temp_df['成交价'] = pd.to_numeric(temp_df['成交价'])
temp_df['折溢率'] = pd.to_numeric(temp_df['折溢率'])
temp_df['成交笔数'] = pd.to_numeric(temp_df['成交笔数'])
temp_df['成交总量'] = pd.to_numeric(temp_df['成交总量'])
temp_df['成交总额'] = pd.to_numeric(temp_df['成交总额'])
temp_df['成交总额/流通市值'] = pd.to_numeric(temp_df['成交总额/流通市值'])
return temp_df
|
https://github.com/akfamily/akshare/blob/087025d8d6f799b30ca114013e82c1ad22dc9294/project25/akshare/stock/stock_dzjy_em.py#L189-L259
| 25 |
[
0,
1,
2,
3,
4,
5,
6,
7,
8,
9,
10
] | 15.492958 |
[
11,
12,
23,
24,
25,
26,
27,
28,
47,
48,
62,
63,
64,
65,
66,
67,
68,
69,
70
] | 26.760563 | false | 5.586592 | 71 | 1 | 73.239437 | 8 |
def stock_dzjy_mrtj(start_date: str = '20220105', end_date: str = '20220105') -> pd.DataFrame:
url = "https://datacenter-web.eastmoney.com/api/data/v1/get"
params = {
'sortColumns': 'TURNOVERRATE',
'sortTypes': '-1',
'pageSize': '5000',
'pageNumber': '1',
'reportName': 'RPT_BLOCKTRADE_STA',
'columns': 'TRADE_DATE,SECURITY_CODE,SECUCODE,SECURITY_NAME_ABBR,CHANGE_RATE,CLOSE_PRICE,AVERAGE_PRICE,PREMIUM_RATIO,DEAL_NUM,VOLUME,DEAL_AMT,TURNOVERRATE,D1_CLOSE_ADJCHRATE,D5_CLOSE_ADJCHRATE,D10_CLOSE_ADJCHRATE,D20_CLOSE_ADJCHRATE',
'source': 'WEB',
'client': 'WEB',
'filter': f"(TRADE_DATE>='{'-'.join([start_date[:4], start_date[4:6], start_date[6:]])}')(TRADE_DATE<='{'-'.join([end_date[:4], end_date[4:6], end_date[6:]])}')"
}
r = requests.get(url, params=params)
data_json = r.json()
temp_df = pd.DataFrame(data_json['result']["data"])
temp_df.reset_index(inplace=True)
temp_df['index'] = temp_df.index + 1
temp_df.columns = [
"序号",
"交易日期",
"证券代码",
"-",
"证券简称",
"涨跌幅",
"收盘价",
"成交价",
"折溢率",
"成交笔数",
"成交总量",
"成交总额",
"成交总额/流通市值",
"_",
"_",
"_",
"_",
]
temp_df["交易日期"] = pd.to_datetime(temp_df["交易日期"]).dt.date
temp_df = temp_df[[
"序号",
"交易日期",
"证券代码",
"证券简称",
"涨跌幅",
"收盘价",
"成交价",
"折溢率",
"成交笔数",
"成交总量",
"成交总额",
"成交总额/流通市值",
]]
temp_df['涨跌幅'] = pd.to_numeric(temp_df['涨跌幅'])
temp_df['收盘价'] = pd.to_numeric(temp_df['收盘价'])
temp_df['成交价'] = pd.to_numeric(temp_df['成交价'])
temp_df['折溢率'] = pd.to_numeric(temp_df['折溢率'])
temp_df['成交笔数'] = pd.to_numeric(temp_df['成交笔数'])
temp_df['成交总量'] = pd.to_numeric(temp_df['成交总量'])
temp_df['成交总额'] = pd.to_numeric(temp_df['成交总额'])
temp_df['成交总额/流通市值'] = pd.to_numeric(temp_df['成交总额/流通市值'])
return temp_df
| 18,792 |
akfamily/akshare
|
087025d8d6f799b30ca114013e82c1ad22dc9294
|
akshare/stock/stock_dzjy_em.py
|
stock_dzjy_hygtj
|
(symbol: str = '近三月') -> pd
|
return big_df
|
东方财富网-数据中心-大宗交易-活跃 A 股统计
http://data.eastmoney.com/dzjy/dzjy_hygtj.aspx
:param symbol: choice of {'近一月', '近三月', '近六月', '近一年'}
:type symbol: str
:return: 活跃 A 股统计
:rtype: pandas.DataFrame
|
东方财富网-数据中心-大宗交易-活跃 A 股统计
http://data.eastmoney.com/dzjy/dzjy_hygtj.aspx
:param symbol: choice of {'近一月', '近三月', '近六月', '近一年'}
:type symbol: str
:return: 活跃 A 股统计
:rtype: pandas.DataFrame
| 262 | 352 |
def stock_dzjy_hygtj(symbol: str = '近三月') -> pd.DataFrame:
"""
东方财富网-数据中心-大宗交易-活跃 A 股统计
http://data.eastmoney.com/dzjy/dzjy_hygtj.aspx
:param symbol: choice of {'近一月', '近三月', '近六月', '近一年'}
:type symbol: str
:return: 活跃 A 股统计
:rtype: pandas.DataFrame
"""
period_map = {
'近一月': '1',
'近三月': '3',
'近六月': '6',
'近一年': '12',
}
url = "https://datacenter-web.eastmoney.com/api/data/v1/get"
params = {
'sortColumns': 'DEAL_NUM,SECURITY_CODE',
'sortTypes': '-1,-1',
'pageSize': '5000',
'pageNumber': '1',
'reportName': 'RPT_BLOCKTRADE_ACSTA',
'columns': 'SECURITY_CODE,SECUCODE,SECURITY_NAME_ABBR,CLOSE_PRICE,CHANGE_RATE,TRADE_DATE,DEAL_AMT,PREMIUM_RATIO,SUM_TURNOVERRATE,DEAL_NUM,PREMIUM_TIMES,DISCOUNT_TIMES,D1_AVG_ADJCHRATE,D5_AVG_ADJCHRATE,D10_AVG_ADJCHRATE,D20_AVG_ADJCHRATE,DATE_TYPE_CODE',
'source': 'WEB',
'client': 'WEB',
'filter': f'(DATE_TYPE_CODE={period_map[symbol]})',
}
r = requests.get(url, params=params)
data_json = r.json()
total_page = data_json['result']["pages"]
big_df = pd.DataFrame()
for page in range(1, int(total_page)+1):
params.update({"pageNumber": page})
r = requests.get(url, params=params)
data_json = r.json()
temp_df = pd.DataFrame(data_json['result']["data"])
big_df = pd.concat([big_df, temp_df], ignore_index=True)
big_df.reset_index(inplace=True)
big_df['index'] = big_df.index + 1
big_df.columns = [
"序号",
"证券代码",
"_",
"证券简称",
"最新价",
"涨跌幅",
"最近上榜日",
"总成交额",
"折溢率",
"成交总额/流通市值",
"上榜次数-总计",
"上榜次数-溢价",
"上榜次数-折价",
"上榜日后平均涨跌幅-1日",
"上榜日后平均涨跌幅-5日",
"上榜日后平均涨跌幅-10日",
"上榜日后平均涨跌幅-20日",
"_",
]
big_df = big_df[[
"序号",
"证券代码",
"证券简称",
"最新价",
"涨跌幅",
"最近上榜日",
"上榜次数-总计",
"上榜次数-溢价",
"上榜次数-折价",
"总成交额",
"折溢率",
"成交总额/流通市值",
"上榜日后平均涨跌幅-1日",
"上榜日后平均涨跌幅-5日",
"上榜日后平均涨跌幅-10日",
"上榜日后平均涨跌幅-20日",
]]
big_df["最近上榜日"] = pd.to_datetime(big_df["最近上榜日"]).dt.date
big_df["最新价"] = pd.to_numeric(big_df["最新价"])
big_df["涨跌幅"] = pd.to_numeric(big_df["涨跌幅"])
big_df["上榜次数-总计"] = pd.to_numeric(big_df["上榜次数-总计"])
big_df["上榜次数-溢价"] = pd.to_numeric(big_df["上榜次数-溢价"])
big_df["上榜次数-折价"] = pd.to_numeric(big_df["上榜次数-折价"])
big_df["总成交额"] = pd.to_numeric(big_df["总成交额"])
big_df["折溢率"] = pd.to_numeric(big_df["折溢率"])
big_df["成交总额/流通市值"] = pd.to_numeric(big_df["成交总额/流通市值"])
big_df["上榜日后平均涨跌幅-1日"] = pd.to_numeric(big_df["上榜日后平均涨跌幅-1日"])
big_df["上榜日后平均涨跌幅-5日"] = pd.to_numeric(big_df["上榜日后平均涨跌幅-5日"])
big_df["上榜日后平均涨跌幅-10日"] = pd.to_numeric(big_df["上榜日后平均涨跌幅-10日"])
big_df["上榜日后平均涨跌幅-20日"] = pd.to_numeric(big_df["上榜日后平均涨跌幅-20日"])
return big_df
|
https://github.com/akfamily/akshare/blob/087025d8d6f799b30ca114013e82c1ad22dc9294/project25/akshare/stock/stock_dzjy_em.py#L262-L352
| 25 |
[
0,
1,
2,
3,
4,
5,
6,
7,
8
] | 9.89011 |
[
9,
15,
16,
27,
28,
29,
30,
31,
32,
33,
34,
35,
36,
37,
38,
39,
59,
77,
78,
79,
80,
81,
82,
83,
84,
85,
86,
87,
88,
89,
90
] | 34.065934 | false | 5.586592 | 91 | 2 | 65.934066 | 6 |
def stock_dzjy_hygtj(symbol: str = '近三月') -> pd.DataFrame:
period_map = {
'近一月': '1',
'近三月': '3',
'近六月': '6',
'近一年': '12',
}
url = "https://datacenter-web.eastmoney.com/api/data/v1/get"
params = {
'sortColumns': 'DEAL_NUM,SECURITY_CODE',
'sortTypes': '-1,-1',
'pageSize': '5000',
'pageNumber': '1',
'reportName': 'RPT_BLOCKTRADE_ACSTA',
'columns': 'SECURITY_CODE,SECUCODE,SECURITY_NAME_ABBR,CLOSE_PRICE,CHANGE_RATE,TRADE_DATE,DEAL_AMT,PREMIUM_RATIO,SUM_TURNOVERRATE,DEAL_NUM,PREMIUM_TIMES,DISCOUNT_TIMES,D1_AVG_ADJCHRATE,D5_AVG_ADJCHRATE,D10_AVG_ADJCHRATE,D20_AVG_ADJCHRATE,DATE_TYPE_CODE',
'source': 'WEB',
'client': 'WEB',
'filter': f'(DATE_TYPE_CODE={period_map[symbol]})',
}
r = requests.get(url, params=params)
data_json = r.json()
total_page = data_json['result']["pages"]
big_df = pd.DataFrame()
for page in range(1, int(total_page)+1):
params.update({"pageNumber": page})
r = requests.get(url, params=params)
data_json = r.json()
temp_df = pd.DataFrame(data_json['result']["data"])
big_df = pd.concat([big_df, temp_df], ignore_index=True)
big_df.reset_index(inplace=True)
big_df['index'] = big_df.index + 1
big_df.columns = [
"序号",
"证券代码",
"_",
"证券简称",
"最新价",
"涨跌幅",
"最近上榜日",
"总成交额",
"折溢率",
"成交总额/流通市值",
"上榜次数-总计",
"上榜次数-溢价",
"上榜次数-折价",
"上榜日后平均涨跌幅-1日",
"上榜日后平均涨跌幅-5日",
"上榜日后平均涨跌幅-10日",
"上榜日后平均涨跌幅-20日",
"_",
]
big_df = big_df[[
"序号",
"证券代码",
"证券简称",
"最新价",
"涨跌幅",
"最近上榜日",
"上榜次数-总计",
"上榜次数-溢价",
"上榜次数-折价",
"总成交额",
"折溢率",
"成交总额/流通市值",
"上榜日后平均涨跌幅-1日",
"上榜日后平均涨跌幅-5日",
"上榜日后平均涨跌幅-10日",
"上榜日后平均涨跌幅-20日",
]]
big_df["最近上榜日"] = pd.to_datetime(big_df["最近上榜日"]).dt.date
big_df["最新价"] = pd.to_numeric(big_df["最新价"])
big_df["涨跌幅"] = pd.to_numeric(big_df["涨跌幅"])
big_df["上榜次数-总计"] = pd.to_numeric(big_df["上榜次数-总计"])
big_df["上榜次数-溢价"] = pd.to_numeric(big_df["上榜次数-溢价"])
big_df["上榜次数-折价"] = pd.to_numeric(big_df["上榜次数-折价"])
big_df["总成交额"] = pd.to_numeric(big_df["总成交额"])
big_df["折溢率"] = pd.to_numeric(big_df["折溢率"])
big_df["成交总额/流通市值"] = pd.to_numeric(big_df["成交总额/流通市值"])
big_df["上榜日后平均涨跌幅-1日"] = pd.to_numeric(big_df["上榜日后平均涨跌幅-1日"])
big_df["上榜日后平均涨跌幅-5日"] = pd.to_numeric(big_df["上榜日后平均涨跌幅-5日"])
big_df["上榜日后平均涨跌幅-10日"] = pd.to_numeric(big_df["上榜日后平均涨跌幅-10日"])
big_df["上榜日后平均涨跌幅-20日"] = pd.to_numeric(big_df["上榜日后平均涨跌幅-20日"])
return big_df
| 18,793 |
akfamily/akshare
|
087025d8d6f799b30ca114013e82c1ad22dc9294
|
akshare/stock/stock_dzjy_em.py
|
stock_dzjy_hyyybtj
|
(symbol: str = '近3日') ->
|
return big_df
|
东方财富网-数据中心-大宗交易-活跃营业部统计
https://data.eastmoney.com/dzjy/dzjy_hyyybtj.html
:param symbol: choice of {'当前交易日', '近3日', '近5日', '近10日', '近30日'}
:type symbol: str
:return: 活跃营业部统计
:rtype: pandas.DataFrame
|
东方财富网-数据中心-大宗交易-活跃营业部统计
https://data.eastmoney.com/dzjy/dzjy_hyyybtj.html
:param symbol: choice of {'当前交易日', '近3日', '近5日', '近10日', '近30日'}
:type symbol: str
:return: 活跃营业部统计
:rtype: pandas.DataFrame
| 355 | 425 |
def stock_dzjy_hyyybtj(symbol: str = '近3日') -> pd.DataFrame:
"""
东方财富网-数据中心-大宗交易-活跃营业部统计
https://data.eastmoney.com/dzjy/dzjy_hyyybtj.html
:param symbol: choice of {'当前交易日', '近3日', '近5日', '近10日', '近30日'}
:type symbol: str
:return: 活跃营业部统计
:rtype: pandas.DataFrame
"""
period_map = {
'当前交易日': '1',
'近3日': '3',
'近5日': '5',
'近10日': '10',
'近30日': '30',
}
url = "https://datacenter-web.eastmoney.com/api/data/v1/get"
params = {
'sortColumns': 'BUYER_NUM,TOTAL_BUYAMT',
'sortTypes': '-1,-1',
'pageSize': '5000',
'pageNumber': '1',
'reportName': 'RPT_BLOCKTRADE_OPERATEDEPTSTATISTICS',
'columns': 'OPERATEDEPT_CODE,OPERATEDEPT_NAME,ONLIST_DATE,STOCK_DETAILS,BUYER_NUM,SELLER_NUM,TOTAL_BUYAMT,TOTAL_SELLAMT,TOTAL_NETAMT,N_DATE',
'source': 'WEB',
'client': 'WEB',
'filter': f'(N_DATE=-{period_map[symbol]})',
}
r = requests.get(url, params=params)
data_json = r.json()
total_page = data_json['result']["pages"]
big_df = pd.DataFrame()
for page in range(1, int(total_page)+1):
params.update({"pageNumber": page})
r = requests.get(url, params=params)
data_json = r.json()
temp_df = pd.DataFrame(data_json['result']["data"])
big_df = pd.concat([big_df, temp_df], ignore_index=True)
big_df.reset_index(inplace=True)
big_df['index'] = big_df.index + 1
big_df.columns = [
"序号",
"_",
"营业部名称",
"最近上榜日",
"买入的股票",
"次数总计-买入",
"次数总计-卖出",
"成交金额统计-买入",
"成交金额统计-卖出",
"成交金额统计-净买入额",
"_",
]
big_df = big_df[[
"序号",
"最近上榜日",
"营业部名称",
"次数总计-买入",
"次数总计-卖出",
"成交金额统计-买入",
"成交金额统计-卖出",
"成交金额统计-净买入额",
"买入的股票",
]]
big_df["最近上榜日"] = pd.to_datetime(big_df["最近上榜日"]).dt.date
big_df["次数总计-买入"] = pd.to_numeric(big_df["次数总计-买入"])
big_df["次数总计-卖出"] = pd.to_numeric(big_df["次数总计-卖出"])
big_df["成交金额统计-买入"] = pd.to_numeric(big_df["成交金额统计-买入"])
big_df["成交金额统计-卖出"] = pd.to_numeric(big_df["成交金额统计-卖出"])
big_df["成交金额统计-净买入额"] = pd.to_numeric(big_df["成交金额统计-净买入额"])
return big_df
|
https://github.com/akfamily/akshare/blob/087025d8d6f799b30ca114013e82c1ad22dc9294/project25/akshare/stock/stock_dzjy_em.py#L355-L425
| 25 |
[
0,
1,
2,
3,
4,
5,
6,
7,
8
] | 12.676056 |
[
9,
16,
17,
28,
29,
30,
31,
32,
33,
34,
35,
36,
37,
38,
39,
40,
53,
64,
65,
66,
67,
68,
69,
70
] | 33.802817 | false | 5.586592 | 71 | 2 | 66.197183 | 6 |
def stock_dzjy_hyyybtj(symbol: str = '近3日') -> pd.DataFrame:
period_map = {
'当前交易日': '1',
'近3日': '3',
'近5日': '5',
'近10日': '10',
'近30日': '30',
}
url = "https://datacenter-web.eastmoney.com/api/data/v1/get"
params = {
'sortColumns': 'BUYER_NUM,TOTAL_BUYAMT',
'sortTypes': '-1,-1',
'pageSize': '5000',
'pageNumber': '1',
'reportName': 'RPT_BLOCKTRADE_OPERATEDEPTSTATISTICS',
'columns': 'OPERATEDEPT_CODE,OPERATEDEPT_NAME,ONLIST_DATE,STOCK_DETAILS,BUYER_NUM,SELLER_NUM,TOTAL_BUYAMT,TOTAL_SELLAMT,TOTAL_NETAMT,N_DATE',
'source': 'WEB',
'client': 'WEB',
'filter': f'(N_DATE=-{period_map[symbol]})',
}
r = requests.get(url, params=params)
data_json = r.json()
total_page = data_json['result']["pages"]
big_df = pd.DataFrame()
for page in range(1, int(total_page)+1):
params.update({"pageNumber": page})
r = requests.get(url, params=params)
data_json = r.json()
temp_df = pd.DataFrame(data_json['result']["data"])
big_df = pd.concat([big_df, temp_df], ignore_index=True)
big_df.reset_index(inplace=True)
big_df['index'] = big_df.index + 1
big_df.columns = [
"序号",
"_",
"营业部名称",
"最近上榜日",
"买入的股票",
"次数总计-买入",
"次数总计-卖出",
"成交金额统计-买入",
"成交金额统计-卖出",
"成交金额统计-净买入额",
"_",
]
big_df = big_df[[
"序号",
"最近上榜日",
"营业部名称",
"次数总计-买入",
"次数总计-卖出",
"成交金额统计-买入",
"成交金额统计-卖出",
"成交金额统计-净买入额",
"买入的股票",
]]
big_df["最近上榜日"] = pd.to_datetime(big_df["最近上榜日"]).dt.date
big_df["次数总计-买入"] = pd.to_numeric(big_df["次数总计-买入"])
big_df["次数总计-卖出"] = pd.to_numeric(big_df["次数总计-卖出"])
big_df["成交金额统计-买入"] = pd.to_numeric(big_df["成交金额统计-买入"])
big_df["成交金额统计-卖出"] = pd.to_numeric(big_df["成交金额统计-卖出"])
big_df["成交金额统计-净买入额"] = pd.to_numeric(big_df["成交金额统计-净买入额"])
return big_df
| 18,794 |
akfamily/akshare
|
087025d8d6f799b30ca114013e82c1ad22dc9294
|
akshare/stock/stock_dzjy_em.py
|
stock_dzjy_yybph
|
(symbol: str = '近三月') -> pd
|
return big_df
|
东方财富网-数据中心-大宗交易-营业部排行
http://data.eastmoney.com/dzjy/dzjy_yybph.aspx
:param symbol: choice of {'近一月', '近三月', '近六月', '近一年'}
:type symbol: str
:return: 营业部排行
:rtype: pandas.DataFrame
|
东方财富网-数据中心-大宗交易-营业部排行
http://data.eastmoney.com/dzjy/dzjy_yybph.aspx
:param symbol: choice of {'近一月', '近三月', '近六月', '近一年'}
:type symbol: str
:return: 营业部排行
:rtype: pandas.DataFrame
| 428 | 515 |
def stock_dzjy_yybph(symbol: str = '近三月') -> pd.DataFrame:
"""
东方财富网-数据中心-大宗交易-营业部排行
http://data.eastmoney.com/dzjy/dzjy_yybph.aspx
:param symbol: choice of {'近一月', '近三月', '近六月', '近一年'}
:type symbol: str
:return: 营业部排行
:rtype: pandas.DataFrame
"""
period_map = {
'近一月': '30',
'近三月': '90',
'近六月': '120',
'近一年': '360',
}
url = "https://datacenter-web.eastmoney.com/api/data/v1/get"
params = {
'sortColumns': 'D5_BUYER_NUM,D1_AVERAGE_INCREASE',
'sortTypes': '-1,-1',
'pageSize': '5000',
'pageNumber': '1',
'reportName': 'RPT_BLOCKTRADE_OPERATEDEPT_RANK',
'columns': 'OPERATEDEPT_CODE,OPERATEDEPT_NAME,D1_BUYER_NUM,D1_AVERAGE_INCREASE,D1_RISE_PROBABILITY,D5_BUYER_NUM,D5_AVERAGE_INCREASE,D5_RISE_PROBABILITY,D10_BUYER_NUM,D10_AVERAGE_INCREASE,D10_RISE_PROBABILITY,D20_BUYER_NUM,D20_AVERAGE_INCREASE,D20_RISE_PROBABILITY,N_DATE,RELATED_ORG_CODE',
'source': 'WEB',
'client': 'WEB',
'filter': f'(N_DATE=-{period_map[symbol]})',
}
r = requests.get(url, params=params)
data_json = r.json()
total_page = data_json['result']["pages"]
big_df = pd.DataFrame()
for page in range(1, int(total_page)+1):
params.update({"pageNumber": page})
r = requests.get(url, params=params)
data_json = r.json()
temp_df = pd.DataFrame(data_json['result']["data"])
big_df = pd.concat([big_df, temp_df], ignore_index=True)
big_df.reset_index(inplace=True)
big_df['index'] = big_df.index + 1
big_df.columns = [
"序号",
"_",
"营业部名称",
"上榜后1天-买入次数",
"上榜后1天-平均涨幅",
"上榜后1天-上涨概率",
"上榜后5天-买入次数",
"上榜后5天-平均涨幅",
"上榜后5天-上涨概率",
"上榜后10天-买入次数",
"上榜后10天-平均涨幅",
"上榜后10天-上涨概率",
"上榜后20天-买入次数",
"上榜后20天-平均涨幅",
"上榜后20天-上涨概率",
"_",
"_",
]
big_df = big_df[[
"序号",
"营业部名称",
"上榜后1天-买入次数",
"上榜后1天-平均涨幅",
"上榜后1天-上涨概率",
"上榜后5天-买入次数",
"上榜后5天-平均涨幅",
"上榜后5天-上涨概率",
"上榜后10天-买入次数",
"上榜后10天-平均涨幅",
"上榜后10天-上涨概率",
"上榜后20天-买入次数",
"上榜后20天-平均涨幅",
"上榜后20天-上涨概率",
]]
big_df['上榜后1天-买入次数'] = pd.to_numeric(big_df['上榜后1天-买入次数'])
big_df['上榜后1天-平均涨幅'] = pd.to_numeric(big_df['上榜后1天-平均涨幅'])
big_df['上榜后1天-上涨概率'] = pd.to_numeric(big_df['上榜后1天-上涨概率'])
big_df['上榜后5天-买入次数'] = pd.to_numeric(big_df['上榜后5天-买入次数'])
big_df['上榜后5天-平均涨幅'] = pd.to_numeric(big_df['上榜后5天-平均涨幅'])
big_df['上榜后5天-上涨概率'] = pd.to_numeric(big_df['上榜后5天-上涨概率'])
big_df['上榜后10天-买入次数'] = pd.to_numeric(big_df['上榜后10天-买入次数'])
big_df['上榜后10天-平均涨幅'] = pd.to_numeric(big_df['上榜后10天-平均涨幅'])
big_df['上榜后10天-上涨概率'] = pd.to_numeric(big_df['上榜后10天-上涨概率'])
big_df['上榜后20天-买入次数'] = pd.to_numeric(big_df['上榜后20天-买入次数'])
big_df['上榜后20天-平均涨幅'] = pd.to_numeric(big_df['上榜后20天-平均涨幅'])
big_df['上榜后20天-上涨概率'] = pd.to_numeric(big_df['上榜后20天-上涨概率'])
return big_df
|
https://github.com/akfamily/akshare/blob/087025d8d6f799b30ca114013e82c1ad22dc9294/project25/akshare/stock/stock_dzjy_em.py#L428-L515
| 25 |
[
0,
1,
2,
3,
4,
5,
6,
7,
8
] | 10.227273 |
[
9,
15,
16,
27,
28,
29,
30,
31,
32,
33,
34,
35,
36,
38,
39,
40,
59,
75,
76,
77,
78,
79,
80,
81,
82,
83,
84,
85,
86,
87
] | 34.090909 | false | 5.586592 | 88 | 2 | 65.909091 | 6 |
def stock_dzjy_yybph(symbol: str = '近三月') -> pd.DataFrame:
period_map = {
'近一月': '30',
'近三月': '90',
'近六月': '120',
'近一年': '360',
}
url = "https://datacenter-web.eastmoney.com/api/data/v1/get"
params = {
'sortColumns': 'D5_BUYER_NUM,D1_AVERAGE_INCREASE',
'sortTypes': '-1,-1',
'pageSize': '5000',
'pageNumber': '1',
'reportName': 'RPT_BLOCKTRADE_OPERATEDEPT_RANK',
'columns': 'OPERATEDEPT_CODE,OPERATEDEPT_NAME,D1_BUYER_NUM,D1_AVERAGE_INCREASE,D1_RISE_PROBABILITY,D5_BUYER_NUM,D5_AVERAGE_INCREASE,D5_RISE_PROBABILITY,D10_BUYER_NUM,D10_AVERAGE_INCREASE,D10_RISE_PROBABILITY,D20_BUYER_NUM,D20_AVERAGE_INCREASE,D20_RISE_PROBABILITY,N_DATE,RELATED_ORG_CODE',
'source': 'WEB',
'client': 'WEB',
'filter': f'(N_DATE=-{period_map[symbol]})',
}
r = requests.get(url, params=params)
data_json = r.json()
total_page = data_json['result']["pages"]
big_df = pd.DataFrame()
for page in range(1, int(total_page)+1):
params.update({"pageNumber": page})
r = requests.get(url, params=params)
data_json = r.json()
temp_df = pd.DataFrame(data_json['result']["data"])
big_df = pd.concat([big_df, temp_df], ignore_index=True)
big_df.reset_index(inplace=True)
big_df['index'] = big_df.index + 1
big_df.columns = [
"序号",
"_",
"营业部名称",
"上榜后1天-买入次数",
"上榜后1天-平均涨幅",
"上榜后1天-上涨概率",
"上榜后5天-买入次数",
"上榜后5天-平均涨幅",
"上榜后5天-上涨概率",
"上榜后10天-买入次数",
"上榜后10天-平均涨幅",
"上榜后10天-上涨概率",
"上榜后20天-买入次数",
"上榜后20天-平均涨幅",
"上榜后20天-上涨概率",
"_",
"_",
]
big_df = big_df[[
"序号",
"营业部名称",
"上榜后1天-买入次数",
"上榜后1天-平均涨幅",
"上榜后1天-上涨概率",
"上榜后5天-买入次数",
"上榜后5天-平均涨幅",
"上榜后5天-上涨概率",
"上榜后10天-买入次数",
"上榜后10天-平均涨幅",
"上榜后10天-上涨概率",
"上榜后20天-买入次数",
"上榜后20天-平均涨幅",
"上榜后20天-上涨概率",
]]
big_df['上榜后1天-买入次数'] = pd.to_numeric(big_df['上榜后1天-买入次数'])
big_df['上榜后1天-平均涨幅'] = pd.to_numeric(big_df['上榜后1天-平均涨幅'])
big_df['上榜后1天-上涨概率'] = pd.to_numeric(big_df['上榜后1天-上涨概率'])
big_df['上榜后5天-买入次数'] = pd.to_numeric(big_df['上榜后5天-买入次数'])
big_df['上榜后5天-平均涨幅'] = pd.to_numeric(big_df['上榜后5天-平均涨幅'])
big_df['上榜后5天-上涨概率'] = pd.to_numeric(big_df['上榜后5天-上涨概率'])
big_df['上榜后10天-买入次数'] = pd.to_numeric(big_df['上榜后10天-买入次数'])
big_df['上榜后10天-平均涨幅'] = pd.to_numeric(big_df['上榜后10天-平均涨幅'])
big_df['上榜后10天-上涨概率'] = pd.to_numeric(big_df['上榜后10天-上涨概率'])
big_df['上榜后20天-买入次数'] = pd.to_numeric(big_df['上榜后20天-买入次数'])
big_df['上榜后20天-平均涨幅'] = pd.to_numeric(big_df['上榜后20天-平均涨幅'])
big_df['上榜后20天-上涨概率'] = pd.to_numeric(big_df['上榜后20天-上涨概率'])
return big_df
| 18,795 |
akfamily/akshare
|
087025d8d6f799b30ca114013e82c1ad22dc9294
|
akshare/stock/stock_cg_guarantee.py
|
stock_cg_guarantee_cninfo
|
(
symbol: str = "全部", start_date: str = "20180630", end_date: str = "20210927"
)
|
return temp_df
|
巨潮资讯-数据中心-专题统计-公司治理-对外担保
http://webapi.cninfo.com.cn/#/thematicStatistics
:param symbol: choice of {"全部", "深市主板", "沪市", "创业板", "科创板"}
:type symbol: str
:param start_date: 开始统计时间
:type start_date: str
:param end_date: 结束统计时间
:type end_date: str
:return: 对外担保
:rtype: pandas.DataFrame
|
巨潮资讯-数据中心-专题统计-公司治理-对外担保
http://webapi.cninfo.com.cn/#/thematicStatistics
:param symbol: choice of {"全部", "深市主板", "沪市", "创业板", "科创板"}
:type symbol: str
:param start_date: 开始统计时间
:type start_date: str
:param end_date: 结束统计时间
:type end_date: str
:return: 对外担保
:rtype: pandas.DataFrame
| 45 | 119 |
def stock_cg_guarantee_cninfo(
symbol: str = "全部", start_date: str = "20180630", end_date: str = "20210927"
) -> pd.DataFrame:
"""
巨潮资讯-数据中心-专题统计-公司治理-对外担保
http://webapi.cninfo.com.cn/#/thematicStatistics
:param symbol: choice of {"全部", "深市主板", "沪市", "创业板", "科创板"}
:type symbol: str
:param start_date: 开始统计时间
:type start_date: str
:param end_date: 结束统计时间
:type end_date: str
:return: 对外担保
:rtype: pandas.DataFrame
"""
symbol_map = {
"全部": '',
"深市主板": '012002',
"沪市": '012001',
"创业板": '012015',
"科创板": '012029',
}
url = "http://webapi.cninfo.com.cn/api/sysapi/p_sysapi1054"
random_time_str = str(int(time.time()))
js_code = py_mini_racer.MiniRacer()
js_code.eval(js_str)
mcode = js_code.call("mcode", random_time_str)
headers = {
"Accept": "*/*",
"Accept-Encoding": "gzip, deflate",
"Accept-Language": "zh-CN,zh;q=0.9,en;q=0.8",
"Cache-Control": "no-cache",
"Content-Length": "0",
"Host": "webapi.cninfo.com.cn",
"mcode": mcode,
"Origin": "http://webapi.cninfo.com.cn",
"Pragma": "no-cache",
"Proxy-Connection": "keep-alive",
"Referer": "http://webapi.cninfo.com.cn/",
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/93.0.4577.63 Safari/537.36",
"X-Requested-With": "XMLHttpRequest",
}
params = {
"sdate": "-".join([start_date[:4], start_date[4:6], start_date[6:]]),
"edate": "-".join([end_date[:4], end_date[4:6], end_date[6:]]),
"market": symbol_map[symbol],
}
r = requests.post(url, headers=headers, params=params)
data_json = r.json()
temp_df = pd.DataFrame(data_json["records"])
temp_df.columns = [
"公告统计区间",
"担保金融占净资产比例",
"担保金额",
"担保笔数",
"证券简称",
"证券代码",
"归属于母公司所有者权益",
]
temp_df = temp_df[
[
"证券代码",
"证券简称",
"公告统计区间",
"担保笔数",
"担保金额",
"归属于母公司所有者权益",
"担保金融占净资产比例",
]
]
temp_df["担保笔数"] = pd.to_numeric(temp_df["担保笔数"])
temp_df["担保金额"] = pd.to_numeric(temp_df["担保金额"])
temp_df["归属于母公司所有者权益"] = pd.to_numeric(temp_df["归属于母公司所有者权益"])
temp_df["担保金融占净资产比例"] = pd.to_numeric(temp_df["担保金融占净资产比例"])
return temp_df
|
https://github.com/akfamily/akshare/blob/087025d8d6f799b30ca114013e82c1ad22dc9294/project25/akshare/stock/stock_cg_guarantee.py#L45-L119
| 25 |
[
0
] | 1.333333 |
[
15,
22,
23,
24,
25,
26,
27,
42,
47,
48,
49,
50,
59,
70,
71,
72,
73,
74
] | 24 | false | 28.571429 | 75 | 1 | 76 | 10 |
def stock_cg_guarantee_cninfo(
symbol: str = "全部", start_date: str = "20180630", end_date: str = "20210927"
) -> pd.DataFrame:
symbol_map = {
"全部": '',
"深市主板": '012002',
"沪市": '012001',
"创业板": '012015',
"科创板": '012029',
}
url = "http://webapi.cninfo.com.cn/api/sysapi/p_sysapi1054"
random_time_str = str(int(time.time()))
js_code = py_mini_racer.MiniRacer()
js_code.eval(js_str)
mcode = js_code.call("mcode", random_time_str)
headers = {
"Accept": "*/*",
"Accept-Encoding": "gzip, deflate",
"Accept-Language": "zh-CN,zh;q=0.9,en;q=0.8",
"Cache-Control": "no-cache",
"Content-Length": "0",
"Host": "webapi.cninfo.com.cn",
"mcode": mcode,
"Origin": "http://webapi.cninfo.com.cn",
"Pragma": "no-cache",
"Proxy-Connection": "keep-alive",
"Referer": "http://webapi.cninfo.com.cn/",
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/93.0.4577.63 Safari/537.36",
"X-Requested-With": "XMLHttpRequest",
}
params = {
"sdate": "-".join([start_date[:4], start_date[4:6], start_date[6:]]),
"edate": "-".join([end_date[:4], end_date[4:6], end_date[6:]]),
"market": symbol_map[symbol],
}
r = requests.post(url, headers=headers, params=params)
data_json = r.json()
temp_df = pd.DataFrame(data_json["records"])
temp_df.columns = [
"公告统计区间",
"担保金融占净资产比例",
"担保金额",
"担保笔数",
"证券简称",
"证券代码",
"归属于母公司所有者权益",
]
temp_df = temp_df[
[
"证券代码",
"证券简称",
"公告统计区间",
"担保笔数",
"担保金额",
"归属于母公司所有者权益",
"担保金融占净资产比例",
]
]
temp_df["担保笔数"] = pd.to_numeric(temp_df["担保笔数"])
temp_df["担保金额"] = pd.to_numeric(temp_df["担保金额"])
temp_df["归属于母公司所有者权益"] = pd.to_numeric(temp_df["归属于母公司所有者权益"])
temp_df["担保金融占净资产比例"] = pd.to_numeric(temp_df["担保金融占净资产比例"])
return temp_df
| 18,796 |
akfamily/akshare
|
087025d8d6f799b30ca114013e82c1ad22dc9294
|
akshare/stock/stock_fund.py
|
stock_individual_fund_flow
|
(
stock: str = "600094", market: str = "sh"
)
|
return temp_df
|
东方财富网-数据中心-资金流向-个股
http://data.eastmoney.com/zjlx/detail.html
:param stock: 股票代码
:type stock: str
:param market: 股票市场; 上海证券交易所: sh, 深证证券交易所: sz
:type market: str
:return: 近期个股的资金流数据
:rtype: pandas.DataFrame
|
东方财富网-数据中心-资金流向-个股
http://data.eastmoney.com/zjlx/detail.html
:param stock: 股票代码
:type stock: str
:param market: 股票市场; 上海证券交易所: sh, 深证证券交易所: sz
:type market: str
:return: 近期个股的资金流数据
:rtype: pandas.DataFrame
| 15 | 94 |
def stock_individual_fund_flow(
stock: str = "600094", market: str = "sh"
) -> pd.DataFrame:
"""
东方财富网-数据中心-资金流向-个股
http://data.eastmoney.com/zjlx/detail.html
:param stock: 股票代码
:type stock: str
:param market: 股票市场; 上海证券交易所: sh, 深证证券交易所: sz
:type market: str
:return: 近期个股的资金流数据
:rtype: pandas.DataFrame
"""
market_map = {"sh": 1, "sz": 0}
url = "http://push2his.eastmoney.com/api/qt/stock/fflow/daykline/get"
headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/81.0.4044.138 Safari/537.36",
}
params = {
"lmt": "0",
"klt": "101",
"secid": f"{market_map[market]}.{stock}",
"fields1": "f1,f2,f3,f7",
"fields2": "f51,f52,f53,f54,f55,f56,f57,f58,f59,f60,f61,f62,f63,f64,f65",
"ut": "b2884a393a59ad64002292a3e90d46a5",
"cb": "jQuery183003743205523325188_1589197499471",
"_": int(time.time() * 1000),
}
r = requests.get(url, params=params, headers=headers)
text_data = r.text
json_data = json.loads(text_data[text_data.find("{") : -2])
content_list = json_data["data"]["klines"]
temp_df = pd.DataFrame([item.split(",") for item in content_list])
temp_df.columns = [
"日期",
"主力净流入-净额",
"小单净流入-净额",
"中单净流入-净额",
"大单净流入-净额",
"超大单净流入-净额",
"主力净流入-净占比",
"小单净流入-净占比",
"中单净流入-净占比",
"大单净流入-净占比",
"超大单净流入-净占比",
"收盘价",
"涨跌幅",
"-",
"-",
]
temp_df = temp_df[
[
"日期",
"收盘价",
"涨跌幅",
"主力净流入-净额",
"主力净流入-净占比",
"超大单净流入-净额",
"超大单净流入-净占比",
"大单净流入-净额",
"大单净流入-净占比",
"中单净流入-净额",
"中单净流入-净占比",
"小单净流入-净额",
"小单净流入-净占比",
]
]
temp_df["主力净流入-净额"] = pd.to_numeric(temp_df["主力净流入-净额"])
temp_df["小单净流入-净额"] = pd.to_numeric(temp_df["小单净流入-净额"])
temp_df["中单净流入-净额"] = pd.to_numeric(temp_df["中单净流入-净额"])
temp_df["大单净流入-净额"] = pd.to_numeric(temp_df["大单净流入-净额"])
temp_df["超大单净流入-净额"] = pd.to_numeric(temp_df["超大单净流入-净额"])
temp_df["主力净流入-净占比"] = pd.to_numeric(temp_df["主力净流入-净占比"])
temp_df["小单净流入-净占比"] = pd.to_numeric(temp_df["小单净流入-净占比"])
temp_df["中单净流入-净占比"] = pd.to_numeric(temp_df["中单净流入-净占比"])
temp_df["大单净流入-净占比"] = pd.to_numeric(temp_df["大单净流入-净占比"])
temp_df["超大单净流入-净占比"] = pd.to_numeric(temp_df["超大单净流入-净占比"])
temp_df["收盘价"] = pd.to_numeric(temp_df["收盘价"])
temp_df["涨跌幅"] = pd.to_numeric(temp_df["涨跌幅"])
return temp_df
|
https://github.com/akfamily/akshare/blob/087025d8d6f799b30ca114013e82c1ad22dc9294/project25/akshare/stock/stock_fund.py#L15-L94
| 25 |
[
0
] | 1.25 |
[
13,
14,
15,
18,
28,
29,
30,
31,
32,
33,
50,
67,
68,
69,
70,
71,
72,
73,
74,
75,
76,
77,
78,
79
] | 30 | false | 7.8125 | 80 | 2 | 70 | 8 |
def stock_individual_fund_flow(
stock: str = "600094", market: str = "sh"
) -> pd.DataFrame:
market_map = {"sh": 1, "sz": 0}
url = "http://push2his.eastmoney.com/api/qt/stock/fflow/daykline/get"
headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/81.0.4044.138 Safari/537.36",
}
params = {
"lmt": "0",
"klt": "101",
"secid": f"{market_map[market]}.{stock}",
"fields1": "f1,f2,f3,f7",
"fields2": "f51,f52,f53,f54,f55,f56,f57,f58,f59,f60,f61,f62,f63,f64,f65",
"ut": "b2884a393a59ad64002292a3e90d46a5",
"cb": "jQuery183003743205523325188_1589197499471",
"_": int(time.time() * 1000),
}
r = requests.get(url, params=params, headers=headers)
text_data = r.text
json_data = json.loads(text_data[text_data.find("{") : -2])
content_list = json_data["data"]["klines"]
temp_df = pd.DataFrame([item.split(",") for item in content_list])
temp_df.columns = [
"日期",
"主力净流入-净额",
"小单净流入-净额",
"中单净流入-净额",
"大单净流入-净额",
"超大单净流入-净额",
"主力净流入-净占比",
"小单净流入-净占比",
"中单净流入-净占比",
"大单净流入-净占比",
"超大单净流入-净占比",
"收盘价",
"涨跌幅",
"-",
"-",
]
temp_df = temp_df[
[
"日期",
"收盘价",
"涨跌幅",
"主力净流入-净额",
"主力净流入-净占比",
"超大单净流入-净额",
"超大单净流入-净占比",
"大单净流入-净额",
"大单净流入-净占比",
"中单净流入-净额",
"中单净流入-净占比",
"小单净流入-净额",
"小单净流入-净占比",
]
]
temp_df["主力净流入-净额"] = pd.to_numeric(temp_df["主力净流入-净额"])
temp_df["小单净流入-净额"] = pd.to_numeric(temp_df["小单净流入-净额"])
temp_df["中单净流入-净额"] = pd.to_numeric(temp_df["中单净流入-净额"])
temp_df["大单净流入-净额"] = pd.to_numeric(temp_df["大单净流入-净额"])
temp_df["超大单净流入-净额"] = pd.to_numeric(temp_df["超大单净流入-净额"])
temp_df["主力净流入-净占比"] = pd.to_numeric(temp_df["主力净流入-净占比"])
temp_df["小单净流入-净占比"] = pd.to_numeric(temp_df["小单净流入-净占比"])
temp_df["中单净流入-净占比"] = pd.to_numeric(temp_df["中单净流入-净占比"])
temp_df["大单净流入-净占比"] = pd.to_numeric(temp_df["大单净流入-净占比"])
temp_df["超大单净流入-净占比"] = pd.to_numeric(temp_df["超大单净流入-净占比"])
temp_df["收盘价"] = pd.to_numeric(temp_df["收盘价"])
temp_df["涨跌幅"] = pd.to_numeric(temp_df["涨跌幅"])
return temp_df
| 18,797 |
akfamily/akshare
|
087025d8d6f799b30ca114013e82c1ad22dc9294
|
akshare/stock/stock_fund.py
|
stock_individual_fund_flow_rank
|
(indicator: str = "5日") -
|
return temp_df
|
东方财富网-数据中心-资金流向-排名
http://data.eastmoney.com/zjlx/detail.html
:param indicator: choice of {"今日", "3日", "5日", "10日"}
:type indicator: str
:return: 指定 indicator 资金流向排行
:rtype: pandas.DataFrame
|
东方财富网-数据中心-资金流向-排名
http://data.eastmoney.com/zjlx/detail.html
:param indicator: choice of {"今日", "3日", "5日", "10日"}
:type indicator: str
:return: 指定 indicator 资金流向排行
:rtype: pandas.DataFrame
| 97 | 306 |
def stock_individual_fund_flow_rank(indicator: str = "5日") -> pd.DataFrame:
"""
东方财富网-数据中心-资金流向-排名
http://data.eastmoney.com/zjlx/detail.html
:param indicator: choice of {"今日", "3日", "5日", "10日"}
:type indicator: str
:return: 指定 indicator 资金流向排行
:rtype: pandas.DataFrame
"""
indicator_map = {
"今日": [
"f62",
"f12,f14,f2,f3,f62,f184,f66,f69,f72,f75,f78,f81,f84,f87,f204,f205,f124",
],
"3日": [
"f267",
"f12,f14,f2,f127,f267,f268,f269,f270,f271,f272,f273,f274,f275,f276,f257,f258,f124",
],
"5日": [
"f164",
"f12,f14,f2,f109,f164,f165,f166,f167,f168,f169,f170,f171,f172,f173,f257,f258,f124",
],
"10日": [
"f174",
"f12,f14,f2,f160,f174,f175,f176,f177,f178,f179,f180,f181,f182,f183,f260,f261,f124",
],
}
url = "http://push2.eastmoney.com/api/qt/clist/get"
params = {
"fid": indicator_map[indicator][0],
"po": "1",
"pz": "5000",
"pn": "1",
"np": "1",
"fltt": "2",
"invt": "2",
"ut": "b2884a393a59ad64002292a3e90d46a5",
"fs": "m:0+t:6+f:!2,m:0+t:13+f:!2,m:0+t:80+f:!2,m:1+t:2+f:!2,m:1+t:23+f:!2,m:0+t:7+f:!2,m:1+t:3+f:!2",
"fields": indicator_map[indicator][1],
}
r = requests.get(url, params=params)
data_json = r.json()
temp_df = pd.DataFrame(data_json["data"]["diff"])
temp_df.reset_index(inplace=True)
temp_df["index"] = range(1, len(temp_df) + 1)
if indicator == "今日":
temp_df.columns = [
"序号",
"最新价",
"今日涨跌幅",
"代码",
"名称",
"今日主力净流入-净额",
"今日超大单净流入-净额",
"今日超大单净流入-净占比",
"今日大单净流入-净额",
"今日大单净流入-净占比",
"今日中单净流入-净额",
"今日中单净流入-净占比",
"今日小单净流入-净额",
"今日小单净流入-净占比",
"_",
"今日主力净流入-净占比",
"_",
"_",
"_",
]
temp_df = temp_df[
[
"序号",
"代码",
"名称",
"最新价",
"今日涨跌幅",
"今日主力净流入-净额",
"今日主力净流入-净占比",
"今日超大单净流入-净额",
"今日超大单净流入-净占比",
"今日大单净流入-净额",
"今日大单净流入-净占比",
"今日中单净流入-净额",
"今日中单净流入-净占比",
"今日小单净流入-净额",
"今日小单净流入-净占比",
]
]
elif indicator == "3日":
temp_df.columns = [
"序号",
"最新价",
"代码",
"名称",
"_",
"3日涨跌幅",
"_",
"_",
"_",
"3日主力净流入-净额",
"3日主力净流入-净占比",
"3日超大单净流入-净额",
"3日超大单净流入-净占比",
"3日大单净流入-净额",
"3日大单净流入-净占比",
"3日中单净流入-净额",
"3日中单净流入-净占比",
"3日小单净流入-净额",
"3日小单净流入-净占比",
]
temp_df = temp_df[
[
"序号",
"代码",
"名称",
"最新价",
"3日涨跌幅",
"3日主力净流入-净额",
"3日主力净流入-净占比",
"3日超大单净流入-净额",
"3日超大单净流入-净占比",
"3日大单净流入-净额",
"3日大单净流入-净占比",
"3日中单净流入-净额",
"3日中单净流入-净占比",
"3日小单净流入-净额",
"3日小单净流入-净占比",
]
]
elif indicator == "5日":
temp_df.columns = [
"序号",
"最新价",
"代码",
"名称",
"5日涨跌幅",
"_",
"5日主力净流入-净额",
"5日主力净流入-净占比",
"5日超大单净流入-净额",
"5日超大单净流入-净占比",
"5日大单净流入-净额",
"5日大单净流入-净占比",
"5日中单净流入-净额",
"5日中单净流入-净占比",
"5日小单净流入-净额",
"5日小单净流入-净占比",
"_",
"_",
"_",
]
temp_df = temp_df[
[
"序号",
"代码",
"名称",
"最新价",
"5日涨跌幅",
"5日主力净流入-净额",
"5日主力净流入-净占比",
"5日超大单净流入-净额",
"5日超大单净流入-净占比",
"5日大单净流入-净额",
"5日大单净流入-净占比",
"5日中单净流入-净额",
"5日中单净流入-净占比",
"5日小单净流入-净额",
"5日小单净流入-净占比",
]
]
elif indicator == "10日":
temp_df.columns = [
"序号",
"最新价",
"代码",
"名称",
"_",
"10日涨跌幅",
"10日主力净流入-净额",
"10日主力净流入-净占比",
"10日超大单净流入-净额",
"10日超大单净流入-净占比",
"10日大单净流入-净额",
"10日大单净流入-净占比",
"10日中单净流入-净额",
"10日中单净流入-净占比",
"10日小单净流入-净额",
"10日小单净流入-净占比",
"_",
"_",
"_",
]
temp_df = temp_df[
[
"序号",
"代码",
"名称",
"最新价",
"10日涨跌幅",
"10日主力净流入-净额",
"10日主力净流入-净占比",
"10日超大单净流入-净额",
"10日超大单净流入-净占比",
"10日大单净流入-净额",
"10日大单净流入-净占比",
"10日中单净流入-净额",
"10日中单净流入-净占比",
"10日小单净流入-净额",
"10日小单净流入-净占比",
]
]
return temp_df
|
https://github.com/akfamily/akshare/blob/087025d8d6f799b30ca114013e82c1ad22dc9294/project25/akshare/stock/stock_fund.py#L97-L306
| 25 |
[
0,
1,
2,
3,
4,
5,
6,
7,
8
] | 4.285714 |
[
9,
27,
28,
40,
41,
42,
43,
44,
45,
46,
67,
86,
87,
108,
127,
128,
149,
168,
169,
190,
209
] | 10 | false | 7.8125 | 210 | 5 | 90 | 6 |
def stock_individual_fund_flow_rank(indicator: str = "5日") -> pd.DataFrame:
indicator_map = {
"今日": [
"f62",
"f12,f14,f2,f3,f62,f184,f66,f69,f72,f75,f78,f81,f84,f87,f204,f205,f124",
],
"3日": [
"f267",
"f12,f14,f2,f127,f267,f268,f269,f270,f271,f272,f273,f274,f275,f276,f257,f258,f124",
],
"5日": [
"f164",
"f12,f14,f2,f109,f164,f165,f166,f167,f168,f169,f170,f171,f172,f173,f257,f258,f124",
],
"10日": [
"f174",
"f12,f14,f2,f160,f174,f175,f176,f177,f178,f179,f180,f181,f182,f183,f260,f261,f124",
],
}
url = "http://push2.eastmoney.com/api/qt/clist/get"
params = {
"fid": indicator_map[indicator][0],
"po": "1",
"pz": "5000",
"pn": "1",
"np": "1",
"fltt": "2",
"invt": "2",
"ut": "b2884a393a59ad64002292a3e90d46a5",
"fs": "m:0+t:6+f:!2,m:0+t:13+f:!2,m:0+t:80+f:!2,m:1+t:2+f:!2,m:1+t:23+f:!2,m:0+t:7+f:!2,m:1+t:3+f:!2",
"fields": indicator_map[indicator][1],
}
r = requests.get(url, params=params)
data_json = r.json()
temp_df = pd.DataFrame(data_json["data"]["diff"])
temp_df.reset_index(inplace=True)
temp_df["index"] = range(1, len(temp_df) + 1)
if indicator == "今日":
temp_df.columns = [
"序号",
"最新价",
"今日涨跌幅",
"代码",
"名称",
"今日主力净流入-净额",
"今日超大单净流入-净额",
"今日超大单净流入-净占比",
"今日大单净流入-净额",
"今日大单净流入-净占比",
"今日中单净流入-净额",
"今日中单净流入-净占比",
"今日小单净流入-净额",
"今日小单净流入-净占比",
"_",
"今日主力净流入-净占比",
"_",
"_",
"_",
]
temp_df = temp_df[
[
"序号",
"代码",
"名称",
"最新价",
"今日涨跌幅",
"今日主力净流入-净额",
"今日主力净流入-净占比",
"今日超大单净流入-净额",
"今日超大单净流入-净占比",
"今日大单净流入-净额",
"今日大单净流入-净占比",
"今日中单净流入-净额",
"今日中单净流入-净占比",
"今日小单净流入-净额",
"今日小单净流入-净占比",
]
]
elif indicator == "3日":
temp_df.columns = [
"序号",
"最新价",
"代码",
"名称",
"_",
"3日涨跌幅",
"_",
"_",
"_",
"3日主力净流入-净额",
"3日主力净流入-净占比",
"3日超大单净流入-净额",
"3日超大单净流入-净占比",
"3日大单净流入-净额",
"3日大单净流入-净占比",
"3日中单净流入-净额",
"3日中单净流入-净占比",
"3日小单净流入-净额",
"3日小单净流入-净占比",
]
temp_df = temp_df[
[
"序号",
"代码",
"名称",
"最新价",
"3日涨跌幅",
"3日主力净流入-净额",
"3日主力净流入-净占比",
"3日超大单净流入-净额",
"3日超大单净流入-净占比",
"3日大单净流入-净额",
"3日大单净流入-净占比",
"3日中单净流入-净额",
"3日中单净流入-净占比",
"3日小单净流入-净额",
"3日小单净流入-净占比",
]
]
elif indicator == "5日":
temp_df.columns = [
"序号",
"最新价",
"代码",
"名称",
"5日涨跌幅",
"_",
"5日主力净流入-净额",
"5日主力净流入-净占比",
"5日超大单净流入-净额",
"5日超大单净流入-净占比",
"5日大单净流入-净额",
"5日大单净流入-净占比",
"5日中单净流入-净额",
"5日中单净流入-净占比",
"5日小单净流入-净额",
"5日小单净流入-净占比",
"_",
"_",
"_",
]
temp_df = temp_df[
[
"序号",
"代码",
"名称",
"最新价",
"5日涨跌幅",
"5日主力净流入-净额",
"5日主力净流入-净占比",
"5日超大单净流入-净额",
"5日超大单净流入-净占比",
"5日大单净流入-净额",
"5日大单净流入-净占比",
"5日中单净流入-净额",
"5日中单净流入-净占比",
"5日小单净流入-净额",
"5日小单净流入-净占比",
]
]
elif indicator == "10日":
temp_df.columns = [
"序号",
"最新价",
"代码",
"名称",
"_",
"10日涨跌幅",
"10日主力净流入-净额",
"10日主力净流入-净占比",
"10日超大单净流入-净额",
"10日超大单净流入-净占比",
"10日大单净流入-净额",
"10日大单净流入-净占比",
"10日中单净流入-净额",
"10日中单净流入-净占比",
"10日小单净流入-净额",
"10日小单净流入-净占比",
"_",
"_",
"_",
]
temp_df = temp_df[
[
"序号",
"代码",
"名称",
"最新价",
"10日涨跌幅",
"10日主力净流入-净额",
"10日主力净流入-净占比",
"10日超大单净流入-净额",
"10日超大单净流入-净占比",
"10日大单净流入-净额",
"10日大单净流入-净占比",
"10日中单净流入-净额",
"10日中单净流入-净占比",
"10日小单净流入-净额",
"10日小单净流入-净占比",
]
]
return temp_df
| 18,798 |
akfamily/akshare
|
087025d8d6f799b30ca114013e82c1ad22dc9294
|
akshare/stock/stock_fund.py
|
stock_market_fund_flow
|
()
|
return temp_df
|
东方财富网-数据中心-资金流向-大盘
http://data.eastmoney.com/zjlx/dpzjlx.html
:return: 近期大盘的资金流数据
:rtype: pandas.DataFrame
|
东方财富网-数据中心-资金流向-大盘
http://data.eastmoney.com/zjlx/dpzjlx.html
:return: 近期大盘的资金流数据
:rtype: pandas.DataFrame
| 309 | 389 |
def stock_market_fund_flow() -> pd.DataFrame:
"""
东方财富网-数据中心-资金流向-大盘
http://data.eastmoney.com/zjlx/dpzjlx.html
:return: 近期大盘的资金流数据
:rtype: pandas.DataFrame
"""
url = "http://push2his.eastmoney.com/api/qt/stock/fflow/daykline/get"
headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/81.0.4044.138 Safari/537.36",
}
params = {
"lmt": "0",
"klt": "101",
"secid": "1.000001",
"secid2": "0.399001",
"fields1": "f1,f2,f3,f7",
"fields2": "f51,f52,f53,f54,f55,f56,f57,f58,f59,f60,f61,f62,f63,f64,f65",
"ut": "b2884a393a59ad64002292a3e90d46a5",
"cb": "jQuery183003743205523325188_1589197499471",
"_": int(time.time() * 1000),
}
r = requests.get(url, params=params, headers=headers)
text_data = r.text
json_data = json.loads(text_data[text_data.find("{") : -2])
content_list = json_data["data"]["klines"]
temp_df = pd.DataFrame([item.split(",") for item in content_list])
temp_df.columns = [
"日期",
"主力净流入-净额",
"小单净流入-净额",
"中单净流入-净额",
"大单净流入-净额",
"超大单净流入-净额",
"主力净流入-净占比",
"小单净流入-净占比",
"中单净流入-净占比",
"大单净流入-净占比",
"超大单净流入-净占比",
"上证-收盘价",
"上证-涨跌幅",
"深证-收盘价",
"深证-涨跌幅",
]
temp_df = temp_df[
[
"日期",
"上证-收盘价",
"上证-涨跌幅",
"深证-收盘价",
"深证-涨跌幅",
"主力净流入-净额",
"主力净流入-净占比",
"超大单净流入-净额",
"超大单净流入-净占比",
"大单净流入-净额",
"大单净流入-净占比",
"中单净流入-净额",
"中单净流入-净占比",
"小单净流入-净额",
"小单净流入-净占比",
]
]
temp_df["日期"] = pd.to_datetime(temp_df["日期"]).dt.date
temp_df["上证-收盘价"] = pd.to_numeric(temp_df["上证-收盘价"], errors="coerce")
temp_df["上证-涨跌幅"] = pd.to_numeric(temp_df["上证-涨跌幅"], errors="coerce")
temp_df["深证-收盘价"] = pd.to_numeric(temp_df["深证-收盘价"], errors="coerce")
temp_df["深证-涨跌幅"] = pd.to_numeric(temp_df["深证-涨跌幅"], errors="coerce")
temp_df["主力净流入-净额"] = pd.to_numeric(temp_df["主力净流入-净额"], errors="coerce")
temp_df["主力净流入-净占比"] = pd.to_numeric(temp_df["主力净流入-净占比"], errors="coerce")
temp_df["超大单净流入-净额"] = pd.to_numeric(temp_df["超大单净流入-净额"], errors="coerce")
temp_df["超大单净流入-净占比"] = pd.to_numeric(
temp_df["超大单净流入-净占比"], errors="coerce"
)
temp_df["大单净流入-净额"] = pd.to_numeric(temp_df["大单净流入-净额"], errors="coerce")
temp_df["大单净流入-净占比"] = pd.to_numeric(temp_df["大单净流入-净占比"], errors="coerce")
temp_df["中单净流入-净额"] = pd.to_numeric(temp_df["中单净流入-净额"], errors="coerce")
temp_df["中单净流入-净占比"] = pd.to_numeric(temp_df["中单净流入-净占比"], errors="coerce")
temp_df["小单净流入-净额"] = pd.to_numeric(temp_df["小单净流入-净额"], errors="coerce")
temp_df["小单净流入-净占比"] = pd.to_numeric(temp_df["小单净流入-净占比"], errors="coerce")
return temp_df
|
https://github.com/akfamily/akshare/blob/087025d8d6f799b30ca114013e82c1ad22dc9294/project25/akshare/stock/stock_fund.py#L309-L389
| 25 |
[
0,
1,
2,
3,
4,
5,
6
] | 8.641975 |
[
7,
8,
11,
22,
23,
24,
25,
26,
27,
44,
63,
64,
65,
66,
67,
68,
69,
70,
71,
74,
75,
76,
77,
78,
79,
80
] | 32.098765 | false | 7.8125 | 81 | 2 | 67.901235 | 4 |
def stock_market_fund_flow() -> pd.DataFrame:
url = "http://push2his.eastmoney.com/api/qt/stock/fflow/daykline/get"
headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/81.0.4044.138 Safari/537.36",
}
params = {
"lmt": "0",
"klt": "101",
"secid": "1.000001",
"secid2": "0.399001",
"fields1": "f1,f2,f3,f7",
"fields2": "f51,f52,f53,f54,f55,f56,f57,f58,f59,f60,f61,f62,f63,f64,f65",
"ut": "b2884a393a59ad64002292a3e90d46a5",
"cb": "jQuery183003743205523325188_1589197499471",
"_": int(time.time() * 1000),
}
r = requests.get(url, params=params, headers=headers)
text_data = r.text
json_data = json.loads(text_data[text_data.find("{") : -2])
content_list = json_data["data"]["klines"]
temp_df = pd.DataFrame([item.split(",") for item in content_list])
temp_df.columns = [
"日期",
"主力净流入-净额",
"小单净流入-净额",
"中单净流入-净额",
"大单净流入-净额",
"超大单净流入-净额",
"主力净流入-净占比",
"小单净流入-净占比",
"中单净流入-净占比",
"大单净流入-净占比",
"超大单净流入-净占比",
"上证-收盘价",
"上证-涨跌幅",
"深证-收盘价",
"深证-涨跌幅",
]
temp_df = temp_df[
[
"日期",
"上证-收盘价",
"上证-涨跌幅",
"深证-收盘价",
"深证-涨跌幅",
"主力净流入-净额",
"主力净流入-净占比",
"超大单净流入-净额",
"超大单净流入-净占比",
"大单净流入-净额",
"大单净流入-净占比",
"中单净流入-净额",
"中单净流入-净占比",
"小单净流入-净额",
"小单净流入-净占比",
]
]
temp_df["日期"] = pd.to_datetime(temp_df["日期"]).dt.date
temp_df["上证-收盘价"] = pd.to_numeric(temp_df["上证-收盘价"], errors="coerce")
temp_df["上证-涨跌幅"] = pd.to_numeric(temp_df["上证-涨跌幅"], errors="coerce")
temp_df["深证-收盘价"] = pd.to_numeric(temp_df["深证-收盘价"], errors="coerce")
temp_df["深证-涨跌幅"] = pd.to_numeric(temp_df["深证-涨跌幅"], errors="coerce")
temp_df["主力净流入-净额"] = pd.to_numeric(temp_df["主力净流入-净额"], errors="coerce")
temp_df["主力净流入-净占比"] = pd.to_numeric(temp_df["主力净流入-净占比"], errors="coerce")
temp_df["超大单净流入-净额"] = pd.to_numeric(temp_df["超大单净流入-净额"], errors="coerce")
temp_df["超大单净流入-净占比"] = pd.to_numeric(
temp_df["超大单净流入-净占比"], errors="coerce"
)
temp_df["大单净流入-净额"] = pd.to_numeric(temp_df["大单净流入-净额"], errors="coerce")
temp_df["大单净流入-净占比"] = pd.to_numeric(temp_df["大单净流入-净占比"], errors="coerce")
temp_df["中单净流入-净额"] = pd.to_numeric(temp_df["中单净流入-净额"], errors="coerce")
temp_df["中单净流入-净占比"] = pd.to_numeric(temp_df["中单净流入-净占比"], errors="coerce")
temp_df["小单净流入-净额"] = pd.to_numeric(temp_df["小单净流入-净额"], errors="coerce")
temp_df["小单净流入-净占比"] = pd.to_numeric(temp_df["小单净流入-净占比"], errors="coerce")
return temp_df
| 18,799 |
akfamily/akshare
|
087025d8d6f799b30ca114013e82c1ad22dc9294
|
akshare/stock/stock_fund.py
|
stock_sector_fund_flow_rank
|
(
indicator: str = "10日", sector_type: str = "行业资金流"
)
|
return temp_df
|
东方财富网-数据中心-资金流向-板块资金流-排名
http://data.eastmoney.com/bkzj/hy.html
:param indicator: choice of {"今日", "5日", "10日"}
:type indicator: str
:param sector_type: choice of {"行业资金流", "概念资金流", "地域资金流"}
:type sector_type: str
:return: 指定参数的资金流排名数据
:rtype: pandas.DataFrame
|
东方财富网-数据中心-资金流向-板块资金流-排名
http://data.eastmoney.com/bkzj/hy.html
:param indicator: choice of {"今日", "5日", "10日"}
:type indicator: str
:param sector_type: choice of {"行业资金流", "概念资金流", "地域资金流"}
:type sector_type: str
:return: 指定参数的资金流排名数据
:rtype: pandas.DataFrame
| 392 | 576 |
def stock_sector_fund_flow_rank(
indicator: str = "10日", sector_type: str = "行业资金流"
) -> pd.DataFrame:
"""
东方财富网-数据中心-资金流向-板块资金流-排名
http://data.eastmoney.com/bkzj/hy.html
:param indicator: choice of {"今日", "5日", "10日"}
:type indicator: str
:param sector_type: choice of {"行业资金流", "概念资金流", "地域资金流"}
:type sector_type: str
:return: 指定参数的资金流排名数据
:rtype: pandas.DataFrame
"""
sector_type_map = {"行业资金流": "2", "概念资金流": "3", "地域资金流": "1"}
indicator_map = {
"今日": [
"f62",
"1",
"f12,f14,f2,f3,f62,f184,f66,f69,f72,f75,f78,f81,f84,f87,f204,f205,f124",
],
"5日": [
"f164",
"5",
"f12,f14,f2,f109,f164,f165,f166,f167,f168,f169,f170,f171,f172,f173,f257,f258,f124",
],
"10日": [
"f174",
"10",
"f12,f14,f2,f160,f174,f175,f176,f177,f178,f179,f180,f181,f182,f183,f260,f261,f124",
],
}
url = "http://push2.eastmoney.com/api/qt/clist/get"
headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/81.0.4044.138 Safari/537.36",
}
params = {
"pn": "1",
"pz": "5000",
"po": "1",
"np": "1",
"ut": "b2884a393a59ad64002292a3e90d46a5",
"fltt": "2",
"invt": "2",
"fid0": indicator_map[indicator][0],
"fs": f"m:90 t:{sector_type_map[sector_type]}",
"stat": indicator_map[indicator][1],
"fields": indicator_map[indicator][2],
"rt": "52975239",
"cb": "jQuery18308357908311220152_1589256588824",
"_": int(time.time() * 1000),
}
r = requests.get(url, params=params, headers=headers)
text_data = r.text
json_data = json.loads(text_data[text_data.find("{") : -2])
temp_df = pd.DataFrame(json_data["data"]["diff"])
if indicator == "今日":
temp_df.columns = [
"-",
"今日涨跌幅",
"_",
"名称",
"今日主力净流入-净额",
"今日超大单净流入-净额",
"今日超大单净流入-净占比",
"今日大单净流入-净额",
"今日大单净流入-净占比",
"今日中单净流入-净额",
"今日中单净流入-净占比",
"今日小单净流入-净额",
"今日小单净流入-净占比",
"-",
"今日主力净流入-净占比",
"今日主力净流入最大股",
"今日主力净流入最大股代码",
"是否净流入",
]
temp_df = temp_df[
[
"名称",
"今日涨跌幅",
"今日主力净流入-净额",
"今日主力净流入-净占比",
"今日超大单净流入-净额",
"今日超大单净流入-净占比",
"今日大单净流入-净额",
"今日大单净流入-净占比",
"今日中单净流入-净额",
"今日中单净流入-净占比",
"今日小单净流入-净额",
"今日小单净流入-净占比",
"今日主力净流入最大股",
]
]
temp_df.sort_values(["今日主力净流入-净额"], ascending=False, inplace=True)
temp_df.reset_index(inplace=True)
temp_df["index"] = range(1, len(temp_df) + 1)
temp_df.rename({"index": "序号"}, axis=1, inplace=True)
elif indicator == "5日":
temp_df.columns = [
"-",
"_",
"名称",
"5日涨跌幅",
"_",
"5日主力净流入-净额",
"5日主力净流入-净占比",
"5日超大单净流入-净额",
"5日超大单净流入-净占比",
"5日大单净流入-净额",
"5日大单净流入-净占比",
"5日中单净流入-净额",
"5日中单净流入-净占比",
"5日小单净流入-净额",
"5日小单净流入-净占比",
"5日主力净流入最大股",
"_",
"_",
]
temp_df = temp_df[
[
"名称",
"5日涨跌幅",
"5日主力净流入-净额",
"5日主力净流入-净占比",
"5日超大单净流入-净额",
"5日超大单净流入-净占比",
"5日大单净流入-净额",
"5日大单净流入-净占比",
"5日中单净流入-净额",
"5日中单净流入-净占比",
"5日小单净流入-净额",
"5日小单净流入-净占比",
"5日主力净流入最大股",
]
]
temp_df.sort_values(["5日主力净流入-净额"], ascending=False, inplace=True)
temp_df.reset_index(inplace=True)
temp_df["index"] = range(1, len(temp_df) + 1)
temp_df.rename({"index": "序号"}, axis=1, inplace=True)
elif indicator == "10日":
temp_df.columns = [
"-",
"_",
"名称",
"_",
"10日涨跌幅",
"10日主力净流入-净额",
"10日主力净流入-净占比",
"10日超大单净流入-净额",
"10日超大单净流入-净占比",
"10日大单净流入-净额",
"10日大单净流入-净占比",
"10日中单净流入-净额",
"10日中单净流入-净占比",
"10日小单净流入-净额",
"10日小单净流入-净占比",
"10日主力净流入最大股",
"_",
"_",
]
temp_df = temp_df[
[
"名称",
"10日涨跌幅",
"10日主力净流入-净额",
"10日主力净流入-净占比",
"10日超大单净流入-净额",
"10日超大单净流入-净占比",
"10日大单净流入-净额",
"10日大单净流入-净占比",
"10日中单净流入-净额",
"10日中单净流入-净占比",
"10日小单净流入-净额",
"10日小单净流入-净占比",
"10日主力净流入最大股",
]
]
temp_df.sort_values(["10日主力净流入-净额"], ascending=False, inplace=True)
temp_df.reset_index(inplace=True)
temp_df["index"] = range(1, len(temp_df) + 1)
temp_df.rename({"index": "序号"}, axis=1, inplace=True)
return temp_df
|
https://github.com/akfamily/akshare/blob/087025d8d6f799b30ca114013e82c1ad22dc9294/project25/akshare/stock/stock_fund.py#L392-L576
| 25 |
[
0
] | 0.540541 |
[
13,
14,
31,
32,
35,
51,
52,
53,
54,
55,
56,
77,
94,
95,
96,
97,
98,
99,
120,
137,
138,
139,
140,
141,
142,
163,
180,
181,
182,
183,
184
] | 16.756757 | false | 7.8125 | 185 | 4 | 83.243243 | 8 |
def stock_sector_fund_flow_rank(
indicator: str = "10日", sector_type: str = "行业资金流"
) -> pd.DataFrame:
sector_type_map = {"行业资金流": "2", "概念资金流": "3", "地域资金流": "1"}
indicator_map = {
"今日": [
"f62",
"1",
"f12,f14,f2,f3,f62,f184,f66,f69,f72,f75,f78,f81,f84,f87,f204,f205,f124",
],
"5日": [
"f164",
"5",
"f12,f14,f2,f109,f164,f165,f166,f167,f168,f169,f170,f171,f172,f173,f257,f258,f124",
],
"10日": [
"f174",
"10",
"f12,f14,f2,f160,f174,f175,f176,f177,f178,f179,f180,f181,f182,f183,f260,f261,f124",
],
}
url = "http://push2.eastmoney.com/api/qt/clist/get"
headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/81.0.4044.138 Safari/537.36",
}
params = {
"pn": "1",
"pz": "5000",
"po": "1",
"np": "1",
"ut": "b2884a393a59ad64002292a3e90d46a5",
"fltt": "2",
"invt": "2",
"fid0": indicator_map[indicator][0],
"fs": f"m:90 t:{sector_type_map[sector_type]}",
"stat": indicator_map[indicator][1],
"fields": indicator_map[indicator][2],
"rt": "52975239",
"cb": "jQuery18308357908311220152_1589256588824",
"_": int(time.time() * 1000),
}
r = requests.get(url, params=params, headers=headers)
text_data = r.text
json_data = json.loads(text_data[text_data.find("{") : -2])
temp_df = pd.DataFrame(json_data["data"]["diff"])
if indicator == "今日":
temp_df.columns = [
"-",
"今日涨跌幅",
"_",
"名称",
"今日主力净流入-净额",
"今日超大单净流入-净额",
"今日超大单净流入-净占比",
"今日大单净流入-净额",
"今日大单净流入-净占比",
"今日中单净流入-净额",
"今日中单净流入-净占比",
"今日小单净流入-净额",
"今日小单净流入-净占比",
"-",
"今日主力净流入-净占比",
"今日主力净流入最大股",
"今日主力净流入最大股代码",
"是否净流入",
]
temp_df = temp_df[
[
"名称",
"今日涨跌幅",
"今日主力净流入-净额",
"今日主力净流入-净占比",
"今日超大单净流入-净额",
"今日超大单净流入-净占比",
"今日大单净流入-净额",
"今日大单净流入-净占比",
"今日中单净流入-净额",
"今日中单净流入-净占比",
"今日小单净流入-净额",
"今日小单净流入-净占比",
"今日主力净流入最大股",
]
]
temp_df.sort_values(["今日主力净流入-净额"], ascending=False, inplace=True)
temp_df.reset_index(inplace=True)
temp_df["index"] = range(1, len(temp_df) + 1)
temp_df.rename({"index": "序号"}, axis=1, inplace=True)
elif indicator == "5日":
temp_df.columns = [
"-",
"_",
"名称",
"5日涨跌幅",
"_",
"5日主力净流入-净额",
"5日主力净流入-净占比",
"5日超大单净流入-净额",
"5日超大单净流入-净占比",
"5日大单净流入-净额",
"5日大单净流入-净占比",
"5日中单净流入-净额",
"5日中单净流入-净占比",
"5日小单净流入-净额",
"5日小单净流入-净占比",
"5日主力净流入最大股",
"_",
"_",
]
temp_df = temp_df[
[
"名称",
"5日涨跌幅",
"5日主力净流入-净额",
"5日主力净流入-净占比",
"5日超大单净流入-净额",
"5日超大单净流入-净占比",
"5日大单净流入-净额",
"5日大单净流入-净占比",
"5日中单净流入-净额",
"5日中单净流入-净占比",
"5日小单净流入-净额",
"5日小单净流入-净占比",
"5日主力净流入最大股",
]
]
temp_df.sort_values(["5日主力净流入-净额"], ascending=False, inplace=True)
temp_df.reset_index(inplace=True)
temp_df["index"] = range(1, len(temp_df) + 1)
temp_df.rename({"index": "序号"}, axis=1, inplace=True)
elif indicator == "10日":
temp_df.columns = [
"-",
"_",
"名称",
"_",
"10日涨跌幅",
"10日主力净流入-净额",
"10日主力净流入-净占比",
"10日超大单净流入-净额",
"10日超大单净流入-净占比",
"10日大单净流入-净额",
"10日大单净流入-净占比",
"10日中单净流入-净额",
"10日中单净流入-净占比",
"10日小单净流入-净额",
"10日小单净流入-净占比",
"10日主力净流入最大股",
"_",
"_",
]
temp_df = temp_df[
[
"名称",
"10日涨跌幅",
"10日主力净流入-净额",
"10日主力净流入-净占比",
"10日超大单净流入-净额",
"10日超大单净流入-净占比",
"10日大单净流入-净额",
"10日大单净流入-净占比",
"10日中单净流入-净额",
"10日中单净流入-净占比",
"10日小单净流入-净额",
"10日小单净流入-净占比",
"10日主力净流入最大股",
]
]
temp_df.sort_values(["10日主力净流入-净额"], ascending=False, inplace=True)
temp_df.reset_index(inplace=True)
temp_df["index"] = range(1, len(temp_df) + 1)
temp_df.rename({"index": "序号"}, axis=1, inplace=True)
return temp_df
| 18,800 |
akfamily/akshare
|
087025d8d6f799b30ca114013e82c1ad22dc9294
|
akshare/stock/stock_stop.py
|
stock_staq_net_stop
|
()
|
return temp_df
|
东方财富网-行情中心-沪深个股-两网及退市
https://quote.eastmoney.com/center/gridlist.html#staq_net_board
:return: 两网及退市
:rtype: pandas.DataFrame
|
东方财富网-行情中心-沪深个股-两网及退市
https://quote.eastmoney.com/center/gridlist.html#staq_net_board
:return: 两网及退市
:rtype: pandas.DataFrame
| 12 | 39 |
def stock_staq_net_stop() -> pd.DataFrame:
"""
东方财富网-行情中心-沪深个股-两网及退市
https://quote.eastmoney.com/center/gridlist.html#staq_net_board
:return: 两网及退市
:rtype: pandas.DataFrame
"""
url = "http://5.push2.eastmoney.com/api/qt/clist/get"
params = {
"pn": "1",
"pz": "2000",
"po": "1",
"np": "1",
"ut": "bd1d9ddb04089700cf9c27f6f7426281",
"fltt": "2",
"invt": "2",
"fid": "f3",
"fs": "m:0 s:3",
"fields": "f12,f14",
"_": "1622622663841",
}
r = requests.get(url, params=params)
data_json = r.json()
temp_df = pd.DataFrame(data_json["data"]["diff"])
temp_df.reset_index(inplace=True)
temp_df["index"] = temp_df.index + 1
temp_df.columns = ["序号", "代码", "名称"]
return temp_df
|
https://github.com/akfamily/akshare/blob/087025d8d6f799b30ca114013e82c1ad22dc9294/project25/akshare/stock/stock_stop.py#L12-L39
| 25 |
[
0,
1,
2,
3,
4,
5,
6
] | 25 |
[
7,
8,
21,
22,
23,
24,
25,
26,
27
] | 32.142857 | false | 31.25 | 28 | 1 | 67.857143 | 4 |
def stock_staq_net_stop() -> pd.DataFrame:
url = "http://5.push2.eastmoney.com/api/qt/clist/get"
params = {
"pn": "1",
"pz": "2000",
"po": "1",
"np": "1",
"ut": "bd1d9ddb04089700cf9c27f6f7426281",
"fltt": "2",
"invt": "2",
"fid": "f3",
"fs": "m:0 s:3",
"fields": "f12,f14",
"_": "1622622663841",
}
r = requests.get(url, params=params)
data_json = r.json()
temp_df = pd.DataFrame(data_json["data"]["diff"])
temp_df.reset_index(inplace=True)
temp_df["index"] = temp_df.index + 1
temp_df.columns = ["序号", "代码", "名称"]
return temp_df
| 18,801 |
akfamily/akshare
|
087025d8d6f799b30ca114013e82c1ad22dc9294
|
akshare/stock/stock_zh_kcb_report.py
|
_stock_zh_kcb_report_em_page
|
()
|
return page_num
|
科创板报告的页数
http://data.eastmoney.com/notices/kcb.html
:return: 科创板报告的页数
:rtype: int
|
科创板报告的页数
http://data.eastmoney.com/notices/kcb.html
:return: 科创板报告的页数
:rtype: int
| 13 | 35 |
def _stock_zh_kcb_report_em_page() -> int:
"""
科创板报告的页数
http://data.eastmoney.com/notices/kcb.html
:return: 科创板报告的页数
:rtype: int
"""
url = "http://np-anotice-stock.eastmoney.com/api/security/ann"
params = {
"sr": "-1",
"page_size": "100",
"page_index": "1",
"ann_type": "KCB",
"client_source": "web",
"f_node": "0",
"s_node": "0",
}
r = requests.get(url, params=params)
data_json = r.json()
page_num = int(
int(data_json["data"]["total_hits"]) / int(data_json["data"]["page_size"])
)
return page_num
|
https://github.com/akfamily/akshare/blob/087025d8d6f799b30ca114013e82c1ad22dc9294/project25/akshare/stock/stock_zh_kcb_report.py#L13-L35
| 25 |
[
0,
1,
2,
3,
4,
5,
6
] | 30.434783 |
[
7,
8,
17,
18,
19,
22
] | 26.086957 | false | 24.137931 | 23 | 1 | 73.913043 | 4 |
def _stock_zh_kcb_report_em_page() -> int:
url = "http://np-anotice-stock.eastmoney.com/api/security/ann"
params = {
"sr": "-1",
"page_size": "100",
"page_index": "1",
"ann_type": "KCB",
"client_source": "web",
"f_node": "0",
"s_node": "0",
}
r = requests.get(url, params=params)
data_json = r.json()
page_num = int(
int(data_json["data"]["total_hits"]) / int(data_json["data"]["page_size"])
)
return page_num
| 18,802 |
akfamily/akshare
|
087025d8d6f799b30ca114013e82c1ad22dc9294
|
akshare/stock/stock_zh_kcb_report.py
|
stock_zh_kcb_report_em
|
(from_page: int = 1, to_page: int = 100)
|
return big_df
|
科创板报告内容
http://data.eastmoney.com/notices/kcb.html
:param from_page: 开始获取的页码
:type from_page: int
:param to_page: 结束获取的页码
:type to_page: int
:return: 科创板报告内容
:rtype: pandas.DataFrame
|
科创板报告内容
http://data.eastmoney.com/notices/kcb.html
:param from_page: 开始获取的页码
:type from_page: int
:param to_page: 结束获取的页码
:type to_page: int
:return: 科创板报告内容
:rtype: pandas.DataFrame
| 38 | 89 |
def stock_zh_kcb_report_em(from_page: int = 1, to_page: int = 100) -> pd.DataFrame:
"""
科创板报告内容
http://data.eastmoney.com/notices/kcb.html
:param from_page: 开始获取的页码
:type from_page: int
:param to_page: 结束获取的页码
:type to_page: int
:return: 科创板报告内容
:rtype: pandas.DataFrame
"""
big_df = pd.DataFrame()
url = "http://np-anotice-stock.eastmoney.com/api/security/ann"
total_page = _stock_zh_kcb_report_em_page()
if to_page >= total_page:
to_page = total_page
for i in tqdm(range(from_page, to_page + 1), leave=False):
params = {
"sr": "-1",
"page_size": "100",
"page_index": i,
"ann_type": "KCB",
"client_source": "web",
"f_node": "0",
"s_node": "0",
}
r = requests.get(url, params=params)
data_json = r.json()
temp_df = pd.DataFrame(
[
[item["codes"][0]["stock_code"] for item in data_json["data"]["list"]],
[item["codes"][0]["short_name"] for item in data_json["data"]["list"]],
[item["title"] for item in data_json["data"]["list"]],
[
item["columns"][0]["column_name"]
for item in data_json["data"]["list"]
],
[item["notice_date"] for item in data_json["data"]["list"]],
[item["art_code"] for item in data_json["data"]["list"]],
]
).T
big_df = big_df.append(temp_df, ignore_index=True)
big_df.columns = [
"代码",
"名称",
"公告标题",
"公告类型",
"公告日期",
"公告代码",
]
big_df['公告日期'] = pd.to_datetime(big_df['公告日期']).dt.date
return big_df
|
https://github.com/akfamily/akshare/blob/087025d8d6f799b30ca114013e82c1ad22dc9294/project25/akshare/stock/stock_zh_kcb_report.py#L38-L89
| 25 |
[
0,
1,
2,
3,
4,
5,
6,
7,
8,
9,
10
] | 21.153846 |
[
11,
12,
13,
14,
15,
16,
17,
26,
27,
28,
41,
42,
50,
51
] | 26.923077 | false | 24.137931 | 52 | 9 | 73.076923 | 8 |
def stock_zh_kcb_report_em(from_page: int = 1, to_page: int = 100) -> pd.DataFrame:
big_df = pd.DataFrame()
url = "http://np-anotice-stock.eastmoney.com/api/security/ann"
total_page = _stock_zh_kcb_report_em_page()
if to_page >= total_page:
to_page = total_page
for i in tqdm(range(from_page, to_page + 1), leave=False):
params = {
"sr": "-1",
"page_size": "100",
"page_index": i,
"ann_type": "KCB",
"client_source": "web",
"f_node": "0",
"s_node": "0",
}
r = requests.get(url, params=params)
data_json = r.json()
temp_df = pd.DataFrame(
[
[item["codes"][0]["stock_code"] for item in data_json["data"]["list"]],
[item["codes"][0]["short_name"] for item in data_json["data"]["list"]],
[item["title"] for item in data_json["data"]["list"]],
[
item["columns"][0]["column_name"]
for item in data_json["data"]["list"]
],
[item["notice_date"] for item in data_json["data"]["list"]],
[item["art_code"] for item in data_json["data"]["list"]],
]
).T
big_df = big_df.append(temp_df, ignore_index=True)
big_df.columns = [
"代码",
"名称",
"公告标题",
"公告类型",
"公告日期",
"公告代码",
]
big_df['公告日期'] = pd.to_datetime(big_df['公告日期']).dt.date
return big_df
| 18,803 |
akfamily/akshare
|
087025d8d6f799b30ca114013e82c1ad22dc9294
|
akshare/stock/stock_summary.py
|
stock_szse_summary
|
(date: str = "20200619")
|
return temp_df
|
深证证券交易所-总貌-证券类别统计
https://www.szse.cn/market/overview/index.html
:param date: 最近结束交易日
:type date: str
:return: 证券类别统计
:rtype: pandas.DataFrame
|
深证证券交易所-总貌-证券类别统计
https://www.szse.cn/market/overview/index.html
:param date: 最近结束交易日
:type date: str
:return: 证券类别统计
:rtype: pandas.DataFrame
| 18 | 46 |
def stock_szse_summary(date: str = "20200619") -> pd.DataFrame:
"""
深证证券交易所-总貌-证券类别统计
https://www.szse.cn/market/overview/index.html
:param date: 最近结束交易日
:type date: str
:return: 证券类别统计
:rtype: pandas.DataFrame
"""
url = "http://www.szse.cn/api/report/ShowReport"
params = {
"SHOWTYPE": "xlsx",
"CATALOGID": "1803_sczm",
"TABKEY": "tab1",
"txtQueryDate": "-".join([date[:4], date[4:6], date[6:]]),
"random": "0.39339437497296137",
}
r = requests.get(url, params=params)
with warnings.catch_warnings(record=True):
warnings.simplefilter("always")
temp_df = pd.read_excel(BytesIO(r.content), engine="openpyxl")
temp_df["证券类别"] = temp_df["证券类别"].str.strip()
temp_df.iloc[:, 2:] = temp_df.iloc[:, 2:].applymap(lambda x: x.replace(",", ""))
temp_df.columns = ["证券类别", "数量", "成交金额", "总市值", "流通市值"]
temp_df["数量"] = pd.to_numeric(temp_df["数量"])
temp_df["成交金额"] = pd.to_numeric(temp_df["成交金额"])
temp_df["总市值"] = pd.to_numeric(temp_df["总市值"], errors="coerce")
temp_df["流通市值"] = pd.to_numeric(temp_df["流通市值"], errors="coerce")
return temp_df
|
https://github.com/akfamily/akshare/blob/087025d8d6f799b30ca114013e82c1ad22dc9294/project25/akshare/stock/stock_summary.py#L18-L46
| 25 |
[
0,
1,
2,
3,
4,
5,
6,
7,
8
] | 31.034483 |
[
9,
10,
17,
18,
19,
20,
21,
22,
23,
24,
25,
26,
27,
28
] | 48.275862 | false | 7.228916 | 29 | 2 | 51.724138 | 6 |
def stock_szse_summary(date: str = "20200619") -> pd.DataFrame:
url = "http://www.szse.cn/api/report/ShowReport"
params = {
"SHOWTYPE": "xlsx",
"CATALOGID": "1803_sczm",
"TABKEY": "tab1",
"txtQueryDate": "-".join([date[:4], date[4:6], date[6:]]),
"random": "0.39339437497296137",
}
r = requests.get(url, params=params)
with warnings.catch_warnings(record=True):
warnings.simplefilter("always")
temp_df = pd.read_excel(BytesIO(r.content), engine="openpyxl")
temp_df["证券类别"] = temp_df["证券类别"].str.strip()
temp_df.iloc[:, 2:] = temp_df.iloc[:, 2:].applymap(lambda x: x.replace(",", ""))
temp_df.columns = ["证券类别", "数量", "成交金额", "总市值", "流通市值"]
temp_df["数量"] = pd.to_numeric(temp_df["数量"])
temp_df["成交金额"] = pd.to_numeric(temp_df["成交金额"])
temp_df["总市值"] = pd.to_numeric(temp_df["总市值"], errors="coerce")
temp_df["流通市值"] = pd.to_numeric(temp_df["流通市值"], errors="coerce")
return temp_df
| 18,804 |
akfamily/akshare
|
087025d8d6f799b30ca114013e82c1ad22dc9294
|
akshare/stock/stock_summary.py
|
stock_szse_area_summary
|
(date: str = "202203")
|
return temp_df
|
深证证券交易所-总貌-地区交易排序
https://www.szse.cn/market/overview/index.html
:param date: 最近结束交易日
:type date: str
:return: 地区交易排序
:rtype: pandas.DataFrame
|
深证证券交易所-总貌-地区交易排序
https://www.szse.cn/market/overview/index.html
:param date: 最近结束交易日
:type date: str
:return: 地区交易排序
:rtype: pandas.DataFrame
| 49 | 80 |
def stock_szse_area_summary(date: str = "202203") -> pd.DataFrame:
"""
深证证券交易所-总貌-地区交易排序
https://www.szse.cn/market/overview/index.html
:param date: 最近结束交易日
:type date: str
:return: 地区交易排序
:rtype: pandas.DataFrame
"""
url = "http://www.szse.cn/api/report/ShowReport"
params = {
"SHOWTYPE": "xlsx",
"CATALOGID": "1803_sczm",
"TABKEY": "tab2",
"DATETIME": "-".join([date[:4], date[4:6]]),
"random": "0.39339437497296137",
}
r = requests.get(url, params=params)
with warnings.catch_warnings(record=True):
warnings.simplefilter("always")
temp_df = pd.read_excel(BytesIO(r.content), engine="openpyxl")
temp_df.columns = ["序号", "地区", "总交易额", "占市场", "股票交易额", "基金交易额", "债券交易额"]
temp_df["总交易额"] = temp_df["总交易额"].str.replace(",", "")
temp_df["总交易额"] = pd.to_numeric(temp_df["总交易额"])
temp_df["占市场"] = pd.to_numeric(temp_df["占市场"])
temp_df["股票交易额"] = temp_df["股票交易额"].str.replace(",", "")
temp_df["股票交易额"] = pd.to_numeric(temp_df["股票交易额"], errors="coerce")
temp_df["基金交易额"] = temp_df["基金交易额"].str.replace(",", "")
temp_df["基金交易额"] = pd.to_numeric(temp_df["基金交易额"], errors="coerce")
temp_df["债券交易额"] = temp_df["债券交易额"].str.replace(",", "")
temp_df["债券交易额"] = pd.to_numeric(temp_df["债券交易额"], errors="coerce")
return temp_df
|
https://github.com/akfamily/akshare/blob/087025d8d6f799b30ca114013e82c1ad22dc9294/project25/akshare/stock/stock_summary.py#L49-L80
| 25 |
[
0,
1,
2,
3,
4,
5,
6,
7,
8
] | 28.125 |
[
9,
10,
17,
18,
19,
20,
21,
22,
23,
24,
25,
26,
27,
28,
29,
30,
31
] | 53.125 | false | 7.228916 | 32 | 2 | 46.875 | 6 |
def stock_szse_area_summary(date: str = "202203") -> pd.DataFrame:
url = "http://www.szse.cn/api/report/ShowReport"
params = {
"SHOWTYPE": "xlsx",
"CATALOGID": "1803_sczm",
"TABKEY": "tab2",
"DATETIME": "-".join([date[:4], date[4:6]]),
"random": "0.39339437497296137",
}
r = requests.get(url, params=params)
with warnings.catch_warnings(record=True):
warnings.simplefilter("always")
temp_df = pd.read_excel(BytesIO(r.content), engine="openpyxl")
temp_df.columns = ["序号", "地区", "总交易额", "占市场", "股票交易额", "基金交易额", "债券交易额"]
temp_df["总交易额"] = temp_df["总交易额"].str.replace(",", "")
temp_df["总交易额"] = pd.to_numeric(temp_df["总交易额"])
temp_df["占市场"] = pd.to_numeric(temp_df["占市场"])
temp_df["股票交易额"] = temp_df["股票交易额"].str.replace(",", "")
temp_df["股票交易额"] = pd.to_numeric(temp_df["股票交易额"], errors="coerce")
temp_df["基金交易额"] = temp_df["基金交易额"].str.replace(",", "")
temp_df["基金交易额"] = pd.to_numeric(temp_df["基金交易额"], errors="coerce")
temp_df["债券交易额"] = temp_df["债券交易额"].str.replace(",", "")
temp_df["债券交易额"] = pd.to_numeric(temp_df["债券交易额"], errors="coerce")
return temp_df
| 18,805 |
akfamily/akshare
|
087025d8d6f799b30ca114013e82c1ad22dc9294
|
akshare/stock/stock_summary.py
|
stock_szse_sector_summary
|
(symbol: str = "当月", date: str = "202203") ->
|
return temp_df
|
深圳证券交易所-统计资料-股票行业成交
https://docs.static.szse.cn/www/market/periodical/month/W020220511355248518608.html
:param symbol: choice of {"当月", "当年"}
:type symbol: str
:param date: 交易年月
:type date: str
:return: 股票行业成交
:rtype: pandas.DataFrame
|
深圳证券交易所-统计资料-股票行业成交
https://docs.static.szse.cn/www/market/periodical/month/W020220511355248518608.html
:param symbol: choice of {"当月", "当年"}
:type symbol: str
:param date: 交易年月
:type date: str
:return: 股票行业成交
:rtype: pandas.DataFrame
| 83 | 157 |
def stock_szse_sector_summary(symbol: str = "当月", date: str = "202203") -> pd.DataFrame:
"""
深圳证券交易所-统计资料-股票行业成交
https://docs.static.szse.cn/www/market/periodical/month/W020220511355248518608.html
:param symbol: choice of {"当月", "当年"}
:type symbol: str
:param date: 交易年月
:type date: str
:return: 股票行业成交
:rtype: pandas.DataFrame
"""
url = "https://www.szse.cn/market/periodical/month/index.html"
r = requests.get(url)
r.encoding = "utf8"
soup = BeautifulSoup(r.text, "lxml")
tags_list = soup.find_all("div", attrs={"class": "g-container"})[4].find_all(
"script"
)
tags_dict = [
eval(
item.string[item.string.find("{") : item.string.find("}") + 1]
.replace("\n", "")
.replace(" ", "")
.replace("value", "'value'")
.replace("text", "'text'")
)
for item in tags_list
]
date_url_dict = dict(
zip(
[item["text"] for item in tags_dict],
[item["value"][2:] for item in tags_dict],
)
)
date_format = "-".join([date[:4], date[4:]])
url = f"http://www.szse.cn/market/periodical/month/{date_url_dict[date_format]}"
r = requests.get(url)
r.encoding = "utf8"
soup = BeautifulSoup(r.text, "lxml")
url = soup.find("a", text="股票行业成交数据")["href"]
if symbol == "当月":
temp_df = pd.read_html(url, encoding="gbk")[0]
temp_df.columns = [
"项目名称",
"项目名称-英文",
"交易天数",
"成交金额-人民币元",
"成交金额-占总计",
"成交股数-股数",
"成交股数-占总计",
"成交笔数-笔",
"成交笔数-占总计",
]
else:
temp_df = pd.read_html(url, encoding="gbk")[1]
temp_df.columns = [
"项目名称",
"项目名称-英文",
"交易天数",
"成交金额-人民币元",
"成交金额-占总计",
"成交股数-股数",
"成交股数-占总计",
"成交笔数-笔",
"成交笔数-占总计",
]
temp_df["交易天数"] = pd.to_numeric(temp_df["交易天数"])
temp_df["成交金额-人民币元"] = pd.to_numeric(temp_df["成交金额-人民币元"])
temp_df["成交金额-占总计"] = pd.to_numeric(temp_df["成交金额-占总计"])
temp_df["成交股数-股数"] = pd.to_numeric(temp_df["成交股数-股数"])
temp_df["成交股数-占总计"] = pd.to_numeric(temp_df["成交股数-占总计"])
temp_df["成交笔数-笔"] = pd.to_numeric(temp_df["成交笔数-笔"])
temp_df["成交笔数-占总计"] = pd.to_numeric(temp_df["成交笔数-占总计"])
return temp_df
|
https://github.com/akfamily/akshare/blob/087025d8d6f799b30ca114013e82c1ad22dc9294/project25/akshare/stock/stock_summary.py#L83-L157
| 25 |
[
0,
1,
2,
3,
4,
5,
6,
7,
8,
9,
10
] | 14.666667 |
[
11,
12,
13,
14,
15,
18,
28,
34,
35,
36,
37,
38,
39,
40,
41,
42,
54,
55,
67,
68,
69,
70,
71,
72,
73,
74
] | 34.666667 | false | 7.228916 | 75 | 5 | 65.333333 | 8 |
def stock_szse_sector_summary(symbol: str = "当月", date: str = "202203") -> pd.DataFrame:
url = "https://www.szse.cn/market/periodical/month/index.html"
r = requests.get(url)
r.encoding = "utf8"
soup = BeautifulSoup(r.text, "lxml")
tags_list = soup.find_all("div", attrs={"class": "g-container"})[4].find_all(
"script"
)
tags_dict = [
eval(
item.string[item.string.find("{") : item.string.find("}") + 1]
.replace("\n", "")
.replace(" ", "")
.replace("value", "'value'")
.replace("text", "'text'")
)
for item in tags_list
]
date_url_dict = dict(
zip(
[item["text"] for item in tags_dict],
[item["value"][2:] for item in tags_dict],
)
)
date_format = "-".join([date[:4], date[4:]])
url = f"http://www.szse.cn/market/periodical/month/{date_url_dict[date_format]}"
r = requests.get(url)
r.encoding = "utf8"
soup = BeautifulSoup(r.text, "lxml")
url = soup.find("a", text="股票行业成交数据")["href"]
if symbol == "当月":
temp_df = pd.read_html(url, encoding="gbk")[0]
temp_df.columns = [
"项目名称",
"项目名称-英文",
"交易天数",
"成交金额-人民币元",
"成交金额-占总计",
"成交股数-股数",
"成交股数-占总计",
"成交笔数-笔",
"成交笔数-占总计",
]
else:
temp_df = pd.read_html(url, encoding="gbk")[1]
temp_df.columns = [
"项目名称",
"项目名称-英文",
"交易天数",
"成交金额-人民币元",
"成交金额-占总计",
"成交股数-股数",
"成交股数-占总计",
"成交笔数-笔",
"成交笔数-占总计",
]
temp_df["交易天数"] = pd.to_numeric(temp_df["交易天数"])
temp_df["成交金额-人民币元"] = pd.to_numeric(temp_df["成交金额-人民币元"])
temp_df["成交金额-占总计"] = pd.to_numeric(temp_df["成交金额-占总计"])
temp_df["成交股数-股数"] = pd.to_numeric(temp_df["成交股数-股数"])
temp_df["成交股数-占总计"] = pd.to_numeric(temp_df["成交股数-占总计"])
temp_df["成交笔数-笔"] = pd.to_numeric(temp_df["成交笔数-笔"])
temp_df["成交笔数-占总计"] = pd.to_numeric(temp_df["成交笔数-占总计"])
return temp_df
| 18,806 |
akfamily/akshare
|
087025d8d6f799b30ca114013e82c1ad22dc9294
|
akshare/stock/stock_summary.py
|
stock_sse_summary
|
()
|
return temp_df
|
上海证券交易所-总貌
https://www.sse.com.cn/market/stockdata/statistic/
:return: 上海证券交易所-总貌
:rtype: pandas.DataFrame
|
上海证券交易所-总貌
https://www.sse.com.cn/market/stockdata/statistic/
:return: 上海证券交易所-总貌
:rtype: pandas.DataFrame
| 160 | 201 |
def stock_sse_summary() -> pd.DataFrame:
"""
上海证券交易所-总貌
https://www.sse.com.cn/market/stockdata/statistic/
:return: 上海证券交易所-总貌
:rtype: pandas.DataFrame
"""
url = "http://query.sse.com.cn/commonQuery.do"
params = {
"sqlId": "COMMON_SSE_SJ_GPSJ_GPSJZM_TJSJ_L",
"PRODUCT_NAME": "股票,主板,科创板",
"type": "inParams",
"_": "1640855495128",
}
headers = {
"Referer": "http://www.sse.com.cn/",
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/89.0.4389.90 Safari/537.36",
}
r = requests.get(url, params=params, headers=headers)
data_json = r.json()
temp_df = pd.DataFrame(data_json["result"]).T
temp_df.reset_index(inplace=True)
temp_df["index"] = [
"流通股本",
"总市值",
"平均市盈率",
"上市公司",
"上市股票",
"流通市值",
"报告时间",
"-",
"总股本",
"项目",
]
temp_df = temp_df[temp_df["index"] != "-"].iloc[:-1, :]
temp_df.columns = [
"项目",
"股票",
"主板",
"科创板",
]
return temp_df
|
https://github.com/akfamily/akshare/blob/087025d8d6f799b30ca114013e82c1ad22dc9294/project25/akshare/stock/stock_summary.py#L160-L201
| 25 |
[
0,
1,
2,
3,
4,
5,
6
] | 16.666667 |
[
7,
8,
14,
18,
19,
20,
21,
22,
34,
35,
41
] | 26.190476 | false | 7.228916 | 42 | 1 | 73.809524 | 4 |
def stock_sse_summary() -> pd.DataFrame:
url = "http://query.sse.com.cn/commonQuery.do"
params = {
"sqlId": "COMMON_SSE_SJ_GPSJ_GPSJZM_TJSJ_L",
"PRODUCT_NAME": "股票,主板,科创板",
"type": "inParams",
"_": "1640855495128",
}
headers = {
"Referer": "http://www.sse.com.cn/",
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/89.0.4389.90 Safari/537.36",
}
r = requests.get(url, params=params, headers=headers)
data_json = r.json()
temp_df = pd.DataFrame(data_json["result"]).T
temp_df.reset_index(inplace=True)
temp_df["index"] = [
"流通股本",
"总市值",
"平均市盈率",
"上市公司",
"上市股票",
"流通市值",
"报告时间",
"-",
"总股本",
"项目",
]
temp_df = temp_df[temp_df["index"] != "-"].iloc[:-1, :]
temp_df.columns = [
"项目",
"股票",
"主板",
"科创板",
]
return temp_df
| 18,807 |
akfamily/akshare
|
087025d8d6f799b30ca114013e82c1ad22dc9294
|
akshare/stock/stock_summary.py
|
stock_sse_deal_daily
|
(date: str = "20220331")
|
上海证券交易所-数据-股票数据-成交概况-股票成交概况-每日股票情况
https://www.sse.com.cn/market/stockdata/overview/day/
:return: 每日股票情况
:rtype: pandas.DataFrame
|
上海证券交易所-数据-股票数据-成交概况-股票成交概况-每日股票情况
https://www.sse.com.cn/market/stockdata/overview/day/
:return: 每日股票情况
:rtype: pandas.DataFrame
| 204 | 438 |
def stock_sse_deal_daily(date: str = "20220331") -> pd.DataFrame:
"""
上海证券交易所-数据-股票数据-成交概况-股票成交概况-每日股票情况
https://www.sse.com.cn/market/stockdata/overview/day/
:return: 每日股票情况
:rtype: pandas.DataFrame
"""
if int(date) <= 20211224:
url = "http://query.sse.com.cn/commonQuery.do"
params = {
"searchDate": "-".join([date[:4], date[4:6], date[6:]]),
"sqlId": "COMMON_SSE_SJ_GPSJ_CJGK_DAYCJGK_C",
"stockType": "90",
"_": "1616744620492",
}
headers = {
"Referer": "http://www.sse.com.cn/",
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/89.0.4389.90 Safari/537.36",
}
r = requests.get(url, params=params, headers=headers)
data_json = r.json()
temp_df = pd.DataFrame(data_json["result"])
temp_df = temp_df.T
temp_df.reset_index(inplace=True)
temp_df.columns = [
"单日情况",
"主板A",
"股票",
"主板B",
"_",
"股票回购",
"科创板",
]
temp_df = temp_df[
[
"单日情况",
"股票",
"主板A",
"主板B",
"科创板",
"股票回购",
]
]
temp_df["单日情况"] = [
"流通市值",
"流通换手率",
"平均市盈率",
"_",
"市价总值",
"_",
"换手率",
"_",
"挂牌数",
"_",
"_",
"_",
"_",
"_",
"成交笔数",
"成交金额",
"成交量",
"次新股换手率",
"_",
"_",
]
temp_df = temp_df[temp_df["单日情况"] != "_"]
temp_df["单日情况"] = temp_df["单日情况"].astype("category")
list_custom_new = [
"挂牌数",
"市价总值",
"流通市值",
"成交金额",
"成交量",
"成交笔数",
"平均市盈率",
"换手率",
"次新股换手率",
"流通换手率",
]
temp_df["单日情况"].cat.set_categories(list_custom_new)
temp_df.sort_values("单日情况", ascending=True, inplace=True)
temp_df.reset_index(drop=True, inplace=True)
temp_df["股票"] = pd.to_numeric(temp_df["股票"], errors="coerce")
temp_df["主板A"] = pd.to_numeric(temp_df["主板A"], errors="coerce")
temp_df["主板B"] = pd.to_numeric(temp_df["主板B"], errors="coerce")
temp_df["科创板"] = pd.to_numeric(temp_df["科创板"], errors="coerce")
temp_df["股票回购"] = pd.to_numeric(temp_df["股票回购"], errors="coerce")
return temp_df
elif int(date) <= 20220224:
url = "http://query.sse.com.cn/commonQuery.do"
params = {
"sqlId": "COMMON_SSE_SJ_GPSJ_CJGK_MRGK_C",
"SEARCH_DATE": "-".join([date[:4], date[4:6], date[6:]]),
"_": "1640836561673",
}
headers = {
"Referer": "http://www.sse.com.cn/",
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/89.0.4389.90 Safari/537.36",
}
r = requests.get(url, params=params, headers=headers)
data_json = r.json()
temp_df = pd.DataFrame(data_json["result"])
temp_df = temp_df.T
temp_df.reset_index(inplace=True)
temp_df.columns = [
"单日情况",
"主板A",
"主板B",
"科创板",
"-",
"-",
"-",
"-",
"-",
]
temp_df = temp_df[
[
"单日情况",
"主板A",
"主板B",
"科创板",
]
]
temp_df["单日情况"] = [
"市价总值",
"成交量",
"平均市盈率",
"换手率",
"成交金额",
"-",
"流通市值",
"流通换手率",
"报告日期",
"挂牌数",
"-",
]
temp_df = temp_df[temp_df["单日情况"] != "-"]
temp_df["单日情况"] = temp_df["单日情况"].astype("category")
list_custom_new = [
"挂牌数",
"市价总值",
"流通市值",
"成交金额",
"成交量",
"平均市盈率",
"换手率",
"流通换手率",
]
temp_df["单日情况"].cat.set_categories(list_custom_new)
temp_df.sort_values("单日情况", ascending=True, inplace=True)
temp_df.reset_index(inplace=True, drop=True)
temp_df["主板A"] = pd.to_numeric(temp_df["主板A"], errors="coerce")
temp_df["主板B"] = pd.to_numeric(temp_df["主板B"], errors="coerce")
temp_df["科创板"] = pd.to_numeric(temp_df["科创板"], errors="coerce")
return temp_df
else:
url = "http://query.sse.com.cn/commonQuery.do"
params = {
"sqlId": "COMMON_SSE_SJ_GPSJ_CJGK_MRGK_C",
"PRODUCT_CODE": "01,02,03,11,17",
"type": "inParams",
"SEARCH_DATE": "-".join([date[:4], date[4:6], date[6:]]),
"_": "1640836561673",
}
headers = {
"Referer": "http://www.sse.com.cn/",
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/89.0.4389.90 Safari/537.36",
}
r = requests.get(url, params=params, headers=headers)
data_json = r.json()
temp_df = pd.DataFrame(data_json["result"])
temp_df = temp_df.T
temp_df.reset_index(inplace=True)
if len(temp_df.T) == 5:
temp_df.columns = [
"单日情况",
"主板A",
"主板B",
"科创板",
"股票",
]
temp_df["股票回购"] = "-"
else:
temp_df.columns = [
"单日情况",
"主板A",
"主板B",
"科创板",
"股票回购",
"股票",
]
temp_df = temp_df[
[
"单日情况",
"股票",
"主板A",
"主板B",
"科创板",
"股票回购",
]
]
temp_df["单日情况"] = [
"市价总值",
"成交量",
"平均市盈率",
"换手率",
"成交金额",
"-",
"流通市值",
"流通换手率",
"报告日期",
"挂牌数",
"-",
]
temp_df = temp_df[temp_df["单日情况"] != "-"]
temp_df["单日情况"] = temp_df["单日情况"].astype("category")
list_custom_new = [
"挂牌数",
"市价总值",
"流通市值",
"成交金额",
"成交量",
"平均市盈率",
"换手率",
"流通换手率",
]
temp_df["单日情况"].cat.set_categories(list_custom_new)
temp_df.sort_values("单日情况", ascending=True, inplace=True)
temp_df.reset_index(inplace=True, drop=True)
temp_df["主板A"] = pd.to_numeric(temp_df["主板A"], errors="coerce")
temp_df["主板B"] = pd.to_numeric(temp_df["主板B"], errors="coerce")
temp_df["科创板"] = pd.to_numeric(temp_df["科创板"], errors="coerce")
temp_df["股票"] = pd.to_numeric(temp_df["股票"], errors="coerce")
temp_df["股票回购"] = pd.to_numeric(temp_df["股票回购"], errors="coerce")
return temp_df
|
https://github.com/akfamily/akshare/blob/087025d8d6f799b30ca114013e82c1ad22dc9294/project25/akshare/stock/stock_summary.py#L204-L438
| 25 |
[
0,
1,
2,
3,
4,
5,
6
] | 2.978723 |
[
7,
8,
9,
15,
19,
20,
21,
22,
23,
24,
33,
43,
65,
66,
67,
79,
80,
81,
82,
83,
84,
85,
86,
87,
88,
89,
90,
95,
99,
100,
101,
102,
103,
104,
115,
123,
136,
137,
138,
148,
149,
150,
151,
152,
153,
154,
156,
157,
164,
168,
169,
170,
171,
172,
173,
174,
181,
183,
191,
201,
214,
215,
216,
226,
227,
228,
229,
230,
231,
232,
233,
234
] | 30.638298 | false | 7.228916 | 235 | 4 | 69.361702 | 4 |
def stock_sse_deal_daily(date: str = "20220331") -> pd.DataFrame:
if int(date) <= 20211224:
url = "http://query.sse.com.cn/commonQuery.do"
params = {
"searchDate": "-".join([date[:4], date[4:6], date[6:]]),
"sqlId": "COMMON_SSE_SJ_GPSJ_CJGK_DAYCJGK_C",
"stockType": "90",
"_": "1616744620492",
}
headers = {
"Referer": "http://www.sse.com.cn/",
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/89.0.4389.90 Safari/537.36",
}
r = requests.get(url, params=params, headers=headers)
data_json = r.json()
temp_df = pd.DataFrame(data_json["result"])
temp_df = temp_df.T
temp_df.reset_index(inplace=True)
temp_df.columns = [
"单日情况",
"主板A",
"股票",
"主板B",
"_",
"股票回购",
"科创板",
]
temp_df = temp_df[
[
"单日情况",
"股票",
"主板A",
"主板B",
"科创板",
"股票回购",
]
]
temp_df["单日情况"] = [
"流通市值",
"流通换手率",
"平均市盈率",
"_",
"市价总值",
"_",
"换手率",
"_",
"挂牌数",
"_",
"_",
"_",
"_",
"_",
"成交笔数",
"成交金额",
"成交量",
"次新股换手率",
"_",
"_",
]
temp_df = temp_df[temp_df["单日情况"] != "_"]
temp_df["单日情况"] = temp_df["单日情况"].astype("category")
list_custom_new = [
"挂牌数",
"市价总值",
"流通市值",
"成交金额",
"成交量",
"成交笔数",
"平均市盈率",
"换手率",
"次新股换手率",
"流通换手率",
]
temp_df["单日情况"].cat.set_categories(list_custom_new)
temp_df.sort_values("单日情况", ascending=True, inplace=True)
temp_df.reset_index(drop=True, inplace=True)
temp_df["股票"] = pd.to_numeric(temp_df["股票"], errors="coerce")
temp_df["主板A"] = pd.to_numeric(temp_df["主板A"], errors="coerce")
temp_df["主板B"] = pd.to_numeric(temp_df["主板B"], errors="coerce")
temp_df["科创板"] = pd.to_numeric(temp_df["科创板"], errors="coerce")
temp_df["股票回购"] = pd.to_numeric(temp_df["股票回购"], errors="coerce")
return temp_df
elif int(date) <= 20220224:
url = "http://query.sse.com.cn/commonQuery.do"
params = {
"sqlId": "COMMON_SSE_SJ_GPSJ_CJGK_MRGK_C",
"SEARCH_DATE": "-".join([date[:4], date[4:6], date[6:]]),
"_": "1640836561673",
}
headers = {
"Referer": "http://www.sse.com.cn/",
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/89.0.4389.90 Safari/537.36",
}
r = requests.get(url, params=params, headers=headers)
data_json = r.json()
temp_df = pd.DataFrame(data_json["result"])
temp_df = temp_df.T
temp_df.reset_index(inplace=True)
temp_df.columns = [
"单日情况",
"主板A",
"主板B",
"科创板",
"-",
"-",
"-",
"-",
"-",
]
temp_df = temp_df[
[
"单日情况",
"主板A",
"主板B",
"科创板",
]
]
temp_df["单日情况"] = [
"市价总值",
"成交量",
"平均市盈率",
"换手率",
"成交金额",
"-",
"流通市值",
"流通换手率",
"报告日期",
"挂牌数",
"-",
]
temp_df = temp_df[temp_df["单日情况"] != "-"]
temp_df["单日情况"] = temp_df["单日情况"].astype("category")
list_custom_new = [
"挂牌数",
"市价总值",
"流通市值",
"成交金额",
"成交量",
"平均市盈率",
"换手率",
"流通换手率",
]
temp_df["单日情况"].cat.set_categories(list_custom_new)
temp_df.sort_values("单日情况", ascending=True, inplace=True)
temp_df.reset_index(inplace=True, drop=True)
temp_df["主板A"] = pd.to_numeric(temp_df["主板A"], errors="coerce")
temp_df["主板B"] = pd.to_numeric(temp_df["主板B"], errors="coerce")
temp_df["科创板"] = pd.to_numeric(temp_df["科创板"], errors="coerce")
return temp_df
else:
url = "http://query.sse.com.cn/commonQuery.do"
params = {
"sqlId": "COMMON_SSE_SJ_GPSJ_CJGK_MRGK_C",
"PRODUCT_CODE": "01,02,03,11,17",
"type": "inParams",
"SEARCH_DATE": "-".join([date[:4], date[4:6], date[6:]]),
"_": "1640836561673",
}
headers = {
"Referer": "http://www.sse.com.cn/",
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/89.0.4389.90 Safari/537.36",
}
r = requests.get(url, params=params, headers=headers)
data_json = r.json()
temp_df = pd.DataFrame(data_json["result"])
temp_df = temp_df.T
temp_df.reset_index(inplace=True)
if len(temp_df.T) == 5:
temp_df.columns = [
"单日情况",
"主板A",
"主板B",
"科创板",
"股票",
]
temp_df["股票回购"] = "-"
else:
temp_df.columns = [
"单日情况",
"主板A",
"主板B",
"科创板",
"股票回购",
"股票",
]
temp_df = temp_df[
[
"单日情况",
"股票",
"主板A",
"主板B",
"科创板",
"股票回购",
]
]
temp_df["单日情况"] = [
"市价总值",
"成交量",
"平均市盈率",
"换手率",
"成交金额",
"-",
"流通市值",
"流通换手率",
"报告日期",
"挂牌数",
"-",
]
temp_df = temp_df[temp_df["单日情况"] != "-"]
temp_df["单日情况"] = temp_df["单日情况"].astype("category")
list_custom_new = [
"挂牌数",
"市价总值",
"流通市值",
"成交金额",
"成交量",
"平均市盈率",
"换手率",
"流通换手率",
]
temp_df["单日情况"].cat.set_categories(list_custom_new)
temp_df.sort_values("单日情况", ascending=True, inplace=True)
temp_df.reset_index(inplace=True, drop=True)
temp_df["主板A"] = pd.to_numeric(temp_df["主板A"], errors="coerce")
temp_df["主板B"] = pd.to_numeric(temp_df["主板B"], errors="coerce")
temp_df["科创板"] = pd.to_numeric(temp_df["科创板"], errors="coerce")
temp_df["股票"] = pd.to_numeric(temp_df["股票"], errors="coerce")
temp_df["股票回购"] = pd.to_numeric(temp_df["股票回购"], errors="coerce")
return temp_df
| 18,808 |
|
akfamily/akshare
|
087025d8d6f799b30ca114013e82c1ad22dc9294
|
akshare/stock/stock_allotment_cninfo.py
|
stock_allotment_cninfo
|
(
symbol: str = "600030", start_date: str = "19700101", end_date: str = "22220222"
)
|
return temp_df
|
巨潮资讯-个股-配股实施方案
http://webapi.cninfo.com.cn/#/dataBrowse
:param symbol: 股票代码
:type symbol: str
:param start_date: 开始查询的日期
:type symbol: str
:param end_date: 结束查询的日期
:type symbol: str
:return: 配股实施方案
:rtype: pandas.DataFrame
|
巨潮资讯-个股-配股实施方案
http://webapi.cninfo.com.cn/#/dataBrowse
:param symbol: 股票代码
:type symbol: str
:param start_date: 开始查询的日期
:type symbol: str
:param end_date: 结束查询的日期
:type symbol: str
:return: 配股实施方案
:rtype: pandas.DataFrame
| 45 | 205 |
def stock_allotment_cninfo(
symbol: str = "600030", start_date: str = "19700101", end_date: str = "22220222"
) -> pd.DataFrame:
"""
巨潮资讯-个股-配股实施方案
http://webapi.cninfo.com.cn/#/dataBrowse
:param symbol: 股票代码
:type symbol: str
:param start_date: 开始查询的日期
:type symbol: str
:param end_date: 结束查询的日期
:type symbol: str
:return: 配股实施方案
:rtype: pandas.DataFrame
"""
url = "http://webapi.cninfo.com.cn/api/stock/p_stock2232"
params = {
"scode": symbol,
"sdate": start_date
if not start_date
else f"{start_date[0:4]}-{start_date[4:6]}-{start_date[6:8]}",
"edate": end_date
if not end_date
else f"{end_date[0:4]}-{end_date[4:6]}-{end_date[6:8]}",
}
random_time_str = str(int(time.time()))
js_code = py_mini_racer.MiniRacer()
js_code.eval(js_str)
mcode = js_code.call("mcode", random_time_str)
headers = {
"Accept": "*/*",
"Accept-Encoding": "gzip, deflate",
"Accept-Language": "zh-CN,zh;q=0.9,en;q=0.8",
"Cache-Control": "no-cache",
"Content-Length": "0",
"Host": "webapi.cninfo.com.cn",
"mcode": mcode,
"Origin": "http://webapi.cninfo.com.cn",
"Pragma": "no-cache",
"Proxy-Connection": "keep-alive",
"Referer": "http://webapi.cninfo.com.cn/",
"X-Requested-With": "XMLHttpRequest",
}
r = requests.post(url, params=params, headers=headers)
data_json = r.json()
columns = [
"记录标识",
"证券简称",
"停牌起始日",
"上市公告日期",
"配股缴款起始日",
"可转配股数量",
"停牌截止日",
"实际配股数量",
"配股价格",
"配股比例",
"配股前总股本",
"每股配权转让费(元)",
"法人股实配数量",
"实际募资净额",
"大股东认购方式",
"其他配售简称",
"发行方式",
"配股失败,退还申购款日期",
"除权基准日",
"预计发行费用",
"配股发行结果公告日",
"证券代码",
"配股权证交易截止日",
"其他股份实配数量",
"国家股实配数量",
"委托单位",
"公众获转配数量",
"其他配售代码",
"配售对象",
"配股权证交易起始日",
"资金到账日",
"机构名称",
"股权登记日",
"实际募资总额",
"预计募集资金",
"大股东认购数量",
"公众股实配数量",
"转配股实配数量",
"承销费用",
"法人获转配数量",
"配股后流通股本",
"股票类别",
"公众配售简称",
"发行方式编码",
"承销方式",
"公告日期",
"配股上市日",
"配股缴款截止日",
"承销余额(股)",
"预计配股数量",
"配股后总股本",
"职工股实配数量",
"承销方式编码",
"发行费用总额",
"配股前流通股本",
"股票类别编码",
"公众配售代码",
]
if data_json["records"]:
# 有配股记录
temp_df = pd.DataFrame(data_json["records"])
temp_df.columns = columns
dates = (
"停牌起始日",
"上市公告日期",
"配股失败,退还申购款日期",
"配股缴款起始日",
"停牌截止日",
"除权基准日",
"配股发行结果公告日",
"配股权证交易截止日",
"配股权证交易起始日",
"资金到账日",
"股权登记日",
"公告日期",
"配股上市日",
"配股缴款截止日",
)
for s in dates:
temp_df[s] = pd.to_datetime(temp_df[s], errors="coerce").dt.date
nums = (
"可转配股数量",
"实际配股数量",
"配股价格",
"配股比例",
"配股前总股本",
"每股配权转让费(元)",
"法人股实配数量",
"实际募资净额",
"预计发行费用",
"其他股份实配数量",
"国家股实配数量",
"公众获转配数量",
"实际募资总额",
"预计募集资金",
"大股东认购数量",
"公众股实配数量",
"转配股实配数量",
"承销费用",
"法人获转配数量",
"配股后流通股本",
"承销余额(股)",
"预计配股数量",
"配股后总股本",
"职工股实配数量",
"发行费用总额",
"配股前流通股本",
)
for s in nums:
temp_df[s] = pd.to_numeric(temp_df[s], errors="coerce")
else:
# 没有配股数据
temp_df = pd.DataFrame(columns=columns)
return temp_df
|
https://github.com/akfamily/akshare/blob/087025d8d6f799b30ca114013e82c1ad22dc9294/project25/akshare/stock/stock_allotment_cninfo.py#L45-L205
| 25 |
[
0
] | 0.621118 |
[
15,
16,
25,
26,
27,
28,
29,
43,
44,
45,
104,
106,
107,
108,
124,
125,
126,
154,
155,
158,
160
] | 13.043478 | false | 25.806452 | 161 | 4 | 86.956522 | 10 |
def stock_allotment_cninfo(
symbol: str = "600030", start_date: str = "19700101", end_date: str = "22220222"
) -> pd.DataFrame:
url = "http://webapi.cninfo.com.cn/api/stock/p_stock2232"
params = {
"scode": symbol,
"sdate": start_date
if not start_date
else f"{start_date[0:4]}-{start_date[4:6]}-{start_date[6:8]}",
"edate": end_date
if not end_date
else f"{end_date[0:4]}-{end_date[4:6]}-{end_date[6:8]}",
}
random_time_str = str(int(time.time()))
js_code = py_mini_racer.MiniRacer()
js_code.eval(js_str)
mcode = js_code.call("mcode", random_time_str)
headers = {
"Accept": "*/*",
"Accept-Encoding": "gzip, deflate",
"Accept-Language": "zh-CN,zh;q=0.9,en;q=0.8",
"Cache-Control": "no-cache",
"Content-Length": "0",
"Host": "webapi.cninfo.com.cn",
"mcode": mcode,
"Origin": "http://webapi.cninfo.com.cn",
"Pragma": "no-cache",
"Proxy-Connection": "keep-alive",
"Referer": "http://webapi.cninfo.com.cn/",
"X-Requested-With": "XMLHttpRequest",
}
r = requests.post(url, params=params, headers=headers)
data_json = r.json()
columns = [
"记录标识",
"证券简称",
"停牌起始日",
"上市公告日期",
"配股缴款起始日",
"可转配股数量",
"停牌截止日",
"实际配股数量",
"配股价格",
"配股比例",
"配股前总股本",
"每股配权转让费(元)",
"法人股实配数量",
"实际募资净额",
"大股东认购方式",
"其他配售简称",
"发行方式",
"配股失败,退还申购款日期",
"除权基准日",
"预计发行费用",
"配股发行结果公告日",
"证券代码",
"配股权证交易截止日",
"其他股份实配数量",
"国家股实配数量",
"委托单位",
"公众获转配数量",
"其他配售代码",
"配售对象",
"配股权证交易起始日",
"资金到账日",
"机构名称",
"股权登记日",
"实际募资总额",
"预计募集资金",
"大股东认购数量",
"公众股实配数量",
"转配股实配数量",
"承销费用",
"法人获转配数量",
"配股后流通股本",
"股票类别",
"公众配售简称",
"发行方式编码",
"承销方式",
"公告日期",
"配股上市日",
"配股缴款截止日",
"承销余额(股)",
"预计配股数量",
"配股后总股本",
"职工股实配数量",
"承销方式编码",
"发行费用总额",
"配股前流通股本",
"股票类别编码",
"公众配售代码",
]
if data_json["records"]:
# 有配股记录
temp_df = pd.DataFrame(data_json["records"])
temp_df.columns = columns
dates = (
"停牌起始日",
"上市公告日期",
"配股失败,退还申购款日期",
"配股缴款起始日",
"停牌截止日",
"除权基准日",
"配股发行结果公告日",
"配股权证交易截止日",
"配股权证交易起始日",
"资金到账日",
"股权登记日",
"公告日期",
"配股上市日",
"配股缴款截止日",
)
for s in dates:
temp_df[s] = pd.to_datetime(temp_df[s], errors="coerce").dt.date
nums = (
"可转配股数量",
"实际配股数量",
"配股价格",
"配股比例",
"配股前总股本",
"每股配权转让费(元)",
"法人股实配数量",
"实际募资净额",
"预计发行费用",
"其他股份实配数量",
"国家股实配数量",
"公众获转配数量",
"实际募资总额",
"预计募集资金",
"大股东认购数量",
"公众股实配数量",
"转配股实配数量",
"承销费用",
"法人获转配数量",
"配股后流通股本",
"承销余额(股)",
"预计配股数量",
"配股后总股本",
"职工股实配数量",
"发行费用总额",
"配股前流通股本",
)
for s in nums:
temp_df[s] = pd.to_numeric(temp_df[s], errors="coerce")
else:
# 没有配股数据
temp_df = pd.DataFrame(columns=columns)
return temp_df
| 18,809 |
akfamily/akshare
|
087025d8d6f799b30ca114013e82c1ad22dc9294
|
akshare/stock/stock_zh_a_tick_tx_163.py
|
stock_zh_a_tick_tx_js
|
(symbol: str = "sz000001")
|
return big_df
|
腾讯财经-历史分笔数据
http://gu.qq.com/sz300494/gp/detail
:param symbol: 股票代码
:type symbol: str
:return: 股票代码
:rtype: pandas.DataFrame
|
腾讯财经-历史分笔数据
http://gu.qq.com/sz300494/gp/detail
:param symbol: 股票代码
:type symbol: str
:return: 股票代码
:rtype: pandas.DataFrame
| 17 | 68 |
def stock_zh_a_tick_tx_js(symbol: str = "sz000001") -> pd.DataFrame:
"""
腾讯财经-历史分笔数据
http://gu.qq.com/sz300494/gp/detail
:param symbol: 股票代码
:type symbol: str
:return: 股票代码
:rtype: pandas.DataFrame
"""
big_df = pd.DataFrame()
page = 0
warnings.warn("正在下载数据,请稍等")
while True:
try:
url = "http://stock.gtimg.cn/data/index.php"
params = {
"appn": "detail",
"action": "data",
"c": symbol,
"p": page,
}
r = requests.get(url, params=params)
text_data = r.text
temp_df = (
pd.DataFrame(eval(text_data[text_data.find("[") :])[1].split("|"))
.iloc[:, 0]
.str.split("/", expand=True)
)
page += 1
big_df = pd.concat([big_df, temp_df], ignore_index=True)
except:
break
if not big_df.empty:
big_df = big_df.iloc[:, 1:]
big_df.columns = ["成交时间", "成交价格", "价格变动", "成交量", "成交金额", "性质"]
big_df.reset_index(drop=True, inplace=True)
property_map = {
"S": "卖盘",
"B": "买盘",
"M": "中性盘",
}
big_df["性质"] = big_df["性质"].map(property_map)
big_df = big_df.astype({
'成交时间': str,
'成交价格': float,
'价格变动': float,
'成交量': int,
'成交金额': int,
'性质': str,
})
return big_df
|
https://github.com/akfamily/akshare/blob/087025d8d6f799b30ca114013e82c1ad22dc9294/project25/akshare/stock/stock_zh_a_tick_tx_163.py#L17-L68
| 25 |
[
0,
1,
2,
3,
4,
5,
6,
7,
8
] | 17.307692 |
[
9,
10,
11,
12,
13,
14,
15,
21,
22,
23,
28,
29,
31,
32,
33,
34,
35,
36,
37,
42,
43,
51
] | 42.307692 | false | 12.5 | 52 | 4 | 57.692308 | 6 |
def stock_zh_a_tick_tx_js(symbol: str = "sz000001") -> pd.DataFrame:
big_df = pd.DataFrame()
page = 0
warnings.warn("正在下载数据,请稍等")
while True:
try:
url = "http://stock.gtimg.cn/data/index.php"
params = {
"appn": "detail",
"action": "data",
"c": symbol,
"p": page,
}
r = requests.get(url, params=params)
text_data = r.text
temp_df = (
pd.DataFrame(eval(text_data[text_data.find("[") :])[1].split("|"))
.iloc[:, 0]
.str.split("/", expand=True)
)
page += 1
big_df = pd.concat([big_df, temp_df], ignore_index=True)
except:
break
if not big_df.empty:
big_df = big_df.iloc[:, 1:]
big_df.columns = ["成交时间", "成交价格", "价格变动", "成交量", "成交金额", "性质"]
big_df.reset_index(drop=True, inplace=True)
property_map = {
"S": "卖盘",
"B": "买盘",
"M": "中性盘",
}
big_df["性质"] = big_df["性质"].map(property_map)
big_df = big_df.astype({
'成交时间': str,
'成交价格': float,
'价格变动': float,
'成交量': int,
'成交金额': int,
'性质': str,
})
return big_df
| 18,810 |
akfamily/akshare
|
087025d8d6f799b30ca114013e82c1ad22dc9294
|
akshare/stock/stock_zh_a_tick_tx_163.py
|
stock_zh_a_tick_tx
|
(
symbol: str = "sz000001", trade_date: str = "20210316"
)
|
return temp_df
|
http://gu.qq.com/sz000001/gp/detail
成交明细-每个交易日 16:00 提供当日数据
:param symbol: 带市场标识的股票代码
:type symbol: str
:param trade_date: 需要提取数据的日期
:type trade_date: str
:return: 返回当日股票成交明细的数据
:rtype: pandas.DataFrame
|
http://gu.qq.com/sz000001/gp/detail
成交明细-每个交易日 16:00 提供当日数据
:param symbol: 带市场标识的股票代码
:type symbol: str
:param trade_date: 需要提取数据的日期
:type trade_date: str
:return: 返回当日股票成交明细的数据
:rtype: pandas.DataFrame
| 71 | 94 |
def stock_zh_a_tick_tx(
symbol: str = "sz000001", trade_date: str = "20210316"
) -> pd.DataFrame:
"""
http://gu.qq.com/sz000001/gp/detail
成交明细-每个交易日 16:00 提供当日数据
:param symbol: 带市场标识的股票代码
:type symbol: str
:param trade_date: 需要提取数据的日期
:type trade_date: str
:return: 返回当日股票成交明细的数据
:rtype: pandas.DataFrame
"""
url = "http://stock.gtimg.cn/data/index.php"
params = {
"appn": "detail",
"action": "download",
"c": symbol,
"d": trade_date,
}
r = requests.get(url, params=params)
r.encoding = "gbk"
temp_df = pd.read_table(StringIO(r.text))
return temp_df
|
https://github.com/akfamily/akshare/blob/087025d8d6f799b30ca114013e82c1ad22dc9294/project25/akshare/stock/stock_zh_a_tick_tx_163.py#L71-L94
| 25 |
[
0
] | 4.166667 |
[
13,
14,
20,
21,
22,
23
] | 25 | false | 12.5 | 24 | 1 | 75 | 8 |
def stock_zh_a_tick_tx(
symbol: str = "sz000001", trade_date: str = "20210316"
) -> pd.DataFrame:
url = "http://stock.gtimg.cn/data/index.php"
params = {
"appn": "detail",
"action": "download",
"c": symbol,
"d": trade_date,
}
r = requests.get(url, params=params)
r.encoding = "gbk"
temp_df = pd.read_table(StringIO(r.text))
return temp_df
| 18,811 |
akfamily/akshare
|
087025d8d6f799b30ca114013e82c1ad22dc9294
|
akshare/stock/stock_zh_a_tick_tx_163.py
|
stock_zh_a_tick_163
|
(
symbol: str = "sz000001", trade_date: str = "20220429"
)
|
return temp_df
|
成交明细-每个交易日 22:00 提供当日数据; 该接口目前还不支持北交所的股票
http://quotes.money.163.com/trade/cjmx_000001.html#01b05
:param symbol: 带市场标识的股票代码
:type symbol: str
:param trade_date: 需要提取数据的日期
:type trade_date: str
:return: 返回当日股票成交明细的数据
:rtype: pandas.DataFrame
|
成交明细-每个交易日 22:00 提供当日数据; 该接口目前还不支持北交所的股票
http://quotes.money.163.com/trade/cjmx_000001.html#01b05
:param symbol: 带市场标识的股票代码
:type symbol: str
:param trade_date: 需要提取数据的日期
:type trade_date: str
:return: 返回当日股票成交明细的数据
:rtype: pandas.DataFrame
| 97 | 127 |
def stock_zh_a_tick_163(
symbol: str = "sz000001", trade_date: str = "20220429"
) -> pd.DataFrame:
"""
成交明细-每个交易日 22:00 提供当日数据; 该接口目前还不支持北交所的股票
http://quotes.money.163.com/trade/cjmx_000001.html#01b05
:param symbol: 带市场标识的股票代码
:type symbol: str
:param trade_date: 需要提取数据的日期
:type trade_date: str
:return: 返回当日股票成交明细的数据
:rtype: pandas.DataFrame
"""
name_code_map = {"sh": "0", "sz": "1"}
url = f"http://quotes.money.163.com/cjmx/{trade_date[:4]}/{trade_date}/{name_code_map[symbol[:2]]}{symbol[2:]}.xls"
r = requests.get(url)
r.encoding = "utf-8"
temp_df = pd.read_excel(BytesIO(r.content), engine="xlrd")
temp_df.columns = [
"时间",
"成交价",
"价格变动",
"成交量",
"成交额",
"性质",
]
temp_df['成交价'] = pd.to_numeric(temp_df['成交价'])
temp_df['价格变动'] = pd.to_numeric(temp_df['价格变动'])
temp_df['成交量'] = pd.to_numeric(temp_df['成交量'])
temp_df['成交额'] = pd.to_numeric(temp_df['成交额'])
return temp_df
|
https://github.com/akfamily/akshare/blob/087025d8d6f799b30ca114013e82c1ad22dc9294/project25/akshare/stock/stock_zh_a_tick_tx_163.py#L97-L127
| 25 |
[
0
] | 3.225806 |
[
13,
14,
15,
16,
17,
18,
26,
27,
28,
29,
30
] | 35.483871 | false | 12.5 | 31 | 1 | 64.516129 | 8 |
def stock_zh_a_tick_163(
symbol: str = "sz000001", trade_date: str = "20220429"
) -> pd.DataFrame:
name_code_map = {"sh": "0", "sz": "1"}
url = f"http://quotes.money.163.com/cjmx/{trade_date[:4]}/{trade_date}/{name_code_map[symbol[:2]]}{symbol[2:]}.xls"
r = requests.get(url)
r.encoding = "utf-8"
temp_df = pd.read_excel(BytesIO(r.content), engine="xlrd")
temp_df.columns = [
"时间",
"成交价",
"价格变动",
"成交量",
"成交额",
"性质",
]
temp_df['成交价'] = pd.to_numeric(temp_df['成交价'])
temp_df['价格变动'] = pd.to_numeric(temp_df['价格变动'])
temp_df['成交量'] = pd.to_numeric(temp_df['成交量'])
temp_df['成交额'] = pd.to_numeric(temp_df['成交额'])
return temp_df
| 18,812 |
akfamily/akshare
|
087025d8d6f799b30ca114013e82c1ad22dc9294
|
akshare/stock/stock_zh_a_tick_tx_163.py
|
stock_zh_a_tick_163_now
|
(symbol: str = "000001")
|
return big_df
|
成交明细-收盘后获取, 补充 stock_zh_a_tick_163 接口, 用来尽快获取数据
http://quotes.money.163.com/trade/cjmx_000001.html#01b05
:param symbol: 带市场标识的股票代码
:type symbol: str
:return: 返回当日股票成交明细的数据
:rtype: pandas.DataFrame
|
成交明细-收盘后获取, 补充 stock_zh_a_tick_163 接口, 用来尽快获取数据
http://quotes.money.163.com/trade/cjmx_000001.html#01b05
:param symbol: 带市场标识的股票代码
:type symbol: str
:return: 返回当日股票成交明细的数据
:rtype: pandas.DataFrame
| 130 | 187 |
def stock_zh_a_tick_163_now(symbol: str = "000001") -> pd.DataFrame:
"""
成交明细-收盘后获取, 补充 stock_zh_a_tick_163 接口, 用来尽快获取数据
http://quotes.money.163.com/trade/cjmx_000001.html#01b05
:param symbol: 带市场标识的股票代码
:type symbol: str
:return: 返回当日股票成交明细的数据
:rtype: pandas.DataFrame
"""
time_list_one = [
item.isoformat().split("T")[1]
for item in pd.date_range("09:30:00", "11:30:00", freq="5min").tolist()
][1:]
time_list_two = [
item.isoformat().split("T")[1]
for item in pd.date_range("13:00:00", "15:00:00", freq="5min").tolist()
][1:]
time_list_one.extend(time_list_two)
big_df = pd.DataFrame()
for item in tqdm(time_list_one):
url = "http://quotes.money.163.com/service/zhubi_ajax.html"
params = {"symbol": symbol, "end": item}
r = requests.get(url, params=params)
data_json = r.json()
if not data_json['zhubi_list']:
break
temp_df = pd.DataFrame(data_json["zhubi_list"])
del temp_df["_id"]
del temp_df["TRADE_TYPE"]
del temp_df["DATE"]
temp_df.reset_index(inplace=True)
temp_df.sort_values(
by="index", ascending=False, ignore_index=True, inplace=True
)
big_df = pd.concat([big_df,temp_df], ignore_index=True)
del big_df["index"]
big_df.columns = [
"_",
"成交量",
"成交价",
"成交额",
"价格变动",
"成交时间",
"性质",
]
big_df = big_df[
[
"成交时间",
"成交价",
"价格变动",
"成交量",
"成交额",
"性质",
]
]
big_df["成交量"] = big_df["成交量"] / 100
return big_df
|
https://github.com/akfamily/akshare/blob/087025d8d6f799b30ca114013e82c1ad22dc9294/project25/akshare/stock/stock_zh_a_tick_tx_163.py#L130-L187
| 25 |
[
0,
1,
2,
3,
4,
5,
6,
7,
8
] | 15.517241 |
[
9,
13,
17,
18,
19,
20,
21,
22,
23,
24,
25,
26,
27,
28,
29,
30,
31,
34,
36,
37,
46,
56,
57
] | 39.655172 | false | 12.5 | 58 | 5 | 60.344828 | 6 |
def stock_zh_a_tick_163_now(symbol: str = "000001") -> pd.DataFrame:
time_list_one = [
item.isoformat().split("T")[1]
for item in pd.date_range("09:30:00", "11:30:00", freq="5min").tolist()
][1:]
time_list_two = [
item.isoformat().split("T")[1]
for item in pd.date_range("13:00:00", "15:00:00", freq="5min").tolist()
][1:]
time_list_one.extend(time_list_two)
big_df = pd.DataFrame()
for item in tqdm(time_list_one):
url = "http://quotes.money.163.com/service/zhubi_ajax.html"
params = {"symbol": symbol, "end": item}
r = requests.get(url, params=params)
data_json = r.json()
if not data_json['zhubi_list']:
break
temp_df = pd.DataFrame(data_json["zhubi_list"])
del temp_df["_id"]
del temp_df["TRADE_TYPE"]
del temp_df["DATE"]
temp_df.reset_index(inplace=True)
temp_df.sort_values(
by="index", ascending=False, ignore_index=True, inplace=True
)
big_df = pd.concat([big_df,temp_df], ignore_index=True)
del big_df["index"]
big_df.columns = [
"_",
"成交量",
"成交价",
"成交额",
"价格变动",
"成交时间",
"性质",
]
big_df = big_df[
[
"成交时间",
"成交价",
"价格变动",
"成交量",
"成交额",
"性质",
]
]
big_df["成交量"] = big_df["成交量"] / 100
return big_df
| 18,813 |
akfamily/akshare
|
087025d8d6f799b30ca114013e82c1ad22dc9294
|
akshare/stock/stock_fund_hold.py
|
stock_report_fund_hold
|
(
symbol: str = "基金持仓", date: str = "20210331"
)
|
return big_df
|
东方财富网-数据中心-主力数据-基金持仓
http://data.eastmoney.com/zlsj/2020-12-31-1-2.html
:param symbol: choice of {"基金持仓", "QFII持仓", "社保持仓", "券商持仓", "保险持仓", "信托持仓"}
:type symbol: str
:param date: 财报发布日期, xxxx-03-31, xxxx-06-30, xxxx-09-30, xxxx-12-31
:type date: str
:return: 基金持仓数据
:rtype: pandas.DataFrame
|
东方财富网-数据中心-主力数据-基金持仓
http://data.eastmoney.com/zlsj/2020-12-31-1-2.html
:param symbol: choice of {"基金持仓", "QFII持仓", "社保持仓", "券商持仓", "保险持仓", "信托持仓"}
:type symbol: str
:param date: 财报发布日期, xxxx-03-31, xxxx-06-30, xxxx-09-30, xxxx-12-31
:type date: str
:return: 基金持仓数据
:rtype: pandas.DataFrame
| 14 | 108 |
def stock_report_fund_hold(
symbol: str = "基金持仓", date: str = "20210331"
) -> pd.DataFrame:
"""
东方财富网-数据中心-主力数据-基金持仓
http://data.eastmoney.com/zlsj/2020-12-31-1-2.html
:param symbol: choice of {"基金持仓", "QFII持仓", "社保持仓", "券商持仓", "保险持仓", "信托持仓"}
:type symbol: str
:param date: 财报发布日期, xxxx-03-31, xxxx-06-30, xxxx-09-30, xxxx-12-31
:type date: str
:return: 基金持仓数据
:rtype: pandas.DataFrame
"""
symbol_map = {
"基金持仓": "1",
"QFII持仓": "2",
"社保持仓": "3",
"券商持仓": "4",
"保险持仓": "5",
"信托持仓": "6",
}
date = "-".join([date[:4], date[4:6], date[6:]])
url = "http://data.eastmoney.com/dataapi/zlsj/list"
params = {
"date": date,
"type": symbol_map[symbol],
"zjc": "0",
"sortField": "HOULD_NUM",
"sortDirec": "1",
"pageNum": "1",
"pageSize": "500",
"p": "1",
"pageNo": "1",
}
r = requests.get(url, params=params)
data_json = r.json()
total_page = data_json["pages"]
big_df = pd.DataFrame()
for page in range(1, total_page + 1):
params = {
"date": date,
"type": symbol_map[symbol],
"zjc": "0",
"sortField": "HOULD_NUM",
"sortDirec": "1",
"pageNum": page,
"pageSize": "500",
"p": page,
"pageNo": page,
}
r = requests.get(url, params=params)
data_json = r.json()
temp_df = pd.DataFrame(data_json["data"])
big_df = pd.concat([big_df, temp_df], ignore_index=True)
big_df.reset_index(inplace=True)
big_df["index"] = list(range(1, len(big_df) + 1))
big_df.columns = [
"序号",
"_",
"股票简称",
"_",
"_",
"持有基金家数",
"持股总数",
"持股市值",
"_",
"持股变化",
"持股变动数值",
"持股变动比例",
"_",
"_",
"_",
"_",
"_",
"_",
"_",
"_",
"股票代码",
"_",
"_",
]
big_df = big_df[
[
"序号",
"股票代码",
"股票简称",
"持有基金家数",
"持股总数",
"持股市值",
"持股变化",
"持股变动数值",
"持股变动比例",
]
]
return big_df
|
https://github.com/akfamily/akshare/blob/087025d8d6f799b30ca114013e82c1ad22dc9294/project25/akshare/stock/stock_fund_hold.py#L14-L108
| 25 |
[
0
] | 1.052632 |
[
13,
21,
22,
23,
34,
35,
36,
37,
38,
39,
50,
51,
52,
53,
54,
55,
56,
81,
94
] | 20 | false | 12.727273 | 95 | 2 | 80 | 8 |
def stock_report_fund_hold(
symbol: str = "基金持仓", date: str = "20210331"
) -> pd.DataFrame:
symbol_map = {
"基金持仓": "1",
"QFII持仓": "2",
"社保持仓": "3",
"券商持仓": "4",
"保险持仓": "5",
"信托持仓": "6",
}
date = "-".join([date[:4], date[4:6], date[6:]])
url = "http://data.eastmoney.com/dataapi/zlsj/list"
params = {
"date": date,
"type": symbol_map[symbol],
"zjc": "0",
"sortField": "HOULD_NUM",
"sortDirec": "1",
"pageNum": "1",
"pageSize": "500",
"p": "1",
"pageNo": "1",
}
r = requests.get(url, params=params)
data_json = r.json()
total_page = data_json["pages"]
big_df = pd.DataFrame()
for page in range(1, total_page + 1):
params = {
"date": date,
"type": symbol_map[symbol],
"zjc": "0",
"sortField": "HOULD_NUM",
"sortDirec": "1",
"pageNum": page,
"pageSize": "500",
"p": page,
"pageNo": page,
}
r = requests.get(url, params=params)
data_json = r.json()
temp_df = pd.DataFrame(data_json["data"])
big_df = pd.concat([big_df, temp_df], ignore_index=True)
big_df.reset_index(inplace=True)
big_df["index"] = list(range(1, len(big_df) + 1))
big_df.columns = [
"序号",
"_",
"股票简称",
"_",
"_",
"持有基金家数",
"持股总数",
"持股市值",
"_",
"持股变化",
"持股变动数值",
"持股变动比例",
"_",
"_",
"_",
"_",
"_",
"_",
"_",
"_",
"股票代码",
"_",
"_",
]
big_df = big_df[
[
"序号",
"股票代码",
"股票简称",
"持有基金家数",
"持股总数",
"持股市值",
"持股变化",
"持股变动数值",
"持股变动比例",
]
]
return big_df
| 18,814 |
akfamily/akshare
|
087025d8d6f799b30ca114013e82c1ad22dc9294
|
akshare/stock/stock_fund_hold.py
|
stock_report_fund_hold_detail
|
(
symbol: str = "008286", date: str = "20220331"
)
|
return temp_df
|
东方财富网-数据中心-主力数据-基金持仓-明细
http://data.eastmoney.com/zlsj/ccjj/2020-12-31-008286.html
:param symbol: 基金代码
:type symbol: str
:param date: 财报发布日期, xxxx-03-31, xxxx-06-30, xxxx-09-30, xxxx-12-31
:type date: str
:return: 基金持仓-明细数据
:rtype: pandas.DataFrame
|
东方财富网-数据中心-主力数据-基金持仓-明细
http://data.eastmoney.com/zlsj/ccjj/2020-12-31-008286.html
:param symbol: 基金代码
:type symbol: str
:param date: 财报发布日期, xxxx-03-31, xxxx-06-30, xxxx-09-30, xxxx-12-31
:type date: str
:return: 基金持仓-明细数据
:rtype: pandas.DataFrame
| 111 | 179 |
def stock_report_fund_hold_detail(
symbol: str = "008286", date: str = "20220331"
) -> pd.DataFrame:
"""
东方财富网-数据中心-主力数据-基金持仓-明细
http://data.eastmoney.com/zlsj/ccjj/2020-12-31-008286.html
:param symbol: 基金代码
:type symbol: str
:param date: 财报发布日期, xxxx-03-31, xxxx-06-30, xxxx-09-30, xxxx-12-31
:type date: str
:return: 基金持仓-明细数据
:rtype: pandas.DataFrame
"""
date = "-".join([date[:4], date[4:6], date[6:]])
url = "https://datacenter-web.eastmoney.com/api/data/v1/get"
params = {
"sortColumns": "SECURITY_CODE",
"sortTypes": "-1",
"pageSize": "500",
"pageNumber": "1",
"reportName": "RPT_MAINDATA_MAIN_POSITIONDETAILS",
"columns": "ALL",
"quoteColumns": "",
"filter": f"""(HOLDER_CODE="{symbol}")(REPORT_DATE='{date}')""",
"source": "WEB",
"client": "WEB",
}
r = requests.get(url, params=params)
data_json = r.json()
temp_df = pd.DataFrame(data_json["result"]["data"])
temp_df.reset_index(inplace=True)
temp_df["index"] = temp_df.index + 1
temp_df.columns = [
"序号",
"-",
"股票代码",
"-",
"股票简称",
"_",
"_",
"_",
"_",
"_",
"_",
"_",
"_",
"持股数",
"持股市值",
"占总股本比例",
"占流通股本比例",
"_",
"_",
]
temp_df = temp_df[
[
"序号",
"股票代码",
"股票简称",
"持股数",
"持股市值",
"占总股本比例",
"占流通股本比例",
]
]
temp_df["持股数"] = pd.to_numeric(temp_df["持股数"])
temp_df["持股市值"] = pd.to_numeric(temp_df["持股市值"])
temp_df["占总股本比例"] = pd.to_numeric(temp_df["占总股本比例"])
temp_df["占流通股本比例"] = pd.to_numeric(temp_df["占流通股本比例"])
return temp_df
|
https://github.com/akfamily/akshare/blob/087025d8d6f799b30ca114013e82c1ad22dc9294/project25/akshare/stock/stock_fund_hold.py#L111-L179
| 25 |
[
0
] | 1.449275 |
[
13,
14,
15,
27,
28,
29,
30,
31,
32,
53,
64,
65,
66,
67,
68
] | 21.73913 | false | 12.727273 | 69 | 1 | 78.26087 | 8 |
def stock_report_fund_hold_detail(
symbol: str = "008286", date: str = "20220331"
) -> pd.DataFrame:
date = "-".join([date[:4], date[4:6], date[6:]])
url = "https://datacenter-web.eastmoney.com/api/data/v1/get"
params = {
"sortColumns": "SECURITY_CODE",
"sortTypes": "-1",
"pageSize": "500",
"pageNumber": "1",
"reportName": "RPT_MAINDATA_MAIN_POSITIONDETAILS",
"columns": "ALL",
"quoteColumns": "",
"filter": f"""(HOLDER_CODE="{symbol}")(REPORT_DATE='{date}')""",
"source": "WEB",
"client": "WEB",
}
r = requests.get(url, params=params)
data_json = r.json()
temp_df = pd.DataFrame(data_json["result"]["data"])
temp_df.reset_index(inplace=True)
temp_df["index"] = temp_df.index + 1
temp_df.columns = [
"序号",
"-",
"股票代码",
"-",
"股票简称",
"_",
"_",
"_",
"_",
"_",
"_",
"_",
"_",
"持股数",
"持股市值",
"占总股本比例",
"占流通股本比例",
"_",
"_",
]
temp_df = temp_df[
[
"序号",
"股票代码",
"股票简称",
"持股数",
"持股市值",
"占总股本比例",
"占流通股本比例",
]
]
temp_df["持股数"] = pd.to_numeric(temp_df["持股数"])
temp_df["持股市值"] = pd.to_numeric(temp_df["持股市值"])
temp_df["占总股本比例"] = pd.to_numeric(temp_df["占总股本比例"])
temp_df["占流通股本比例"] = pd.to_numeric(temp_df["占流通股本比例"])
return temp_df
| 18,815 |
akfamily/akshare
|
087025d8d6f799b30ca114013e82c1ad22dc9294
|
akshare/stock/stock_zh_zrbg_hx.py
|
stock_zh_a_scr_report
|
(year: str = "2018", need_page: str = "1")
|
return big_df
|
和讯财经-上市公司社会责任报告, 从 2010- 年至今
因为股票数量大, 所以获取某年需要遍历所有页
http://stockdata.stock.hexun.com/zrbg/Plate.aspx#
:param year: 报告年份
:type year: str
:param need_page: 需要获取的天数
:type need_page: str
:return: 上市公司社会责任报告数据
:rtype: pandas.DataFrame
|
和讯财经-上市公司社会责任报告, 从 2010- 年至今
因为股票数量大, 所以获取某年需要遍历所有页
http://stockdata.stock.hexun.com/zrbg/Plate.aspx#
:param year: 报告年份
:type year: str
:param need_page: 需要获取的天数
:type need_page: str
:return: 上市公司社会责任报告数据
:rtype: pandas.DataFrame
| 16 | 86 |
def stock_zh_a_scr_report(year: str = "2018", need_page: str = "1") -> pd.DataFrame:
"""
和讯财经-上市公司社会责任报告, 从 2010- 年至今
因为股票数量大, 所以获取某年需要遍历所有页
http://stockdata.stock.hexun.com/zrbg/Plate.aspx#
:param year: 报告年份
:type year: str
:param need_page: 需要获取的天数
:type need_page: str
:return: 上市公司社会责任报告数据
:rtype: pandas.DataFrame
"""
big_df = pd.DataFrame()
for page in tqdm(range(1, int(need_page)+1)):
hx_params_copy = hx_params.copy()
hx_params_copy.update({"date": "{}-12-31".format(year)})
hx_params_copy.update({"page": page})
r = requests.get(hx_url, headers=hx_headers, params=hx_params_copy)
data_text = r.text
temp_df = data_text[data_text.find("(") + 1: data_text.rfind(")")]
py_obj = demjson.decode(temp_df)
industry = [item["industry"] for item in py_obj["list"]]
stock_number = [item["stockNumber"] for item in py_obj["list"]]
industry_rate = [item["industryrate"] for item in py_obj["list"]]
price_limit = [item["Pricelimit"] for item in py_obj["list"]]
looting_chips = [item["lootingchips"] for item in py_obj["list"]]
r_scramble = [item["rscramble"] for item in py_obj["list"]]
strong_stock = [item["Strongstock"] for item in py_obj["list"]]
s_cramble = [item["Scramble"] for item in py_obj["list"]]
temp_df = pd.DataFrame(
[
industry,
stock_number,
industry_rate,
price_limit,
looting_chips,
r_scramble,
strong_stock,
s_cramble,
],
index=["股票名称", "股东责任", "总得分", "等级", "员工责任", "环境责任", "社会责任", "供应商、客户和消费者权益责任"],
).T
big_df = big_df.append(temp_df, ignore_index=True)
big_df.reset_index(inplace=True)
big_df["index"] = big_df.index + 1
big_df.rename({"index": "序号"}, axis="columns", inplace=True)
big_df["股票代码"] = (
big_df["股票名称"].str.split("(", expand=True).iloc[:, 1].str.strip(")")
)
big_df["股票名称"] = big_df["股票名称"].str.split("(", expand=True).iloc[:, 0]
big_df["股东责任"] = pd.to_numeric(big_df["股东责任"])
big_df["总得分"] = pd.to_numeric(big_df["总得分"])
big_df["员工责任"] = pd.to_numeric(big_df["员工责任"])
big_df["环境责任"] = pd.to_numeric(big_df["环境责任"])
big_df["社会责任"] = pd.to_numeric(big_df["社会责任"])
big_df["供应商、客户和消费者权益责任"] = pd.to_numeric(big_df["供应商、客户和消费者权益责任"])
big_df = big_df[
[
"序号",
"股票名称",
"股票代码",
"总得分",
"等级",
"股东责任",
"员工责任",
"供应商、客户和消费者权益责任",
"环境责任",
"社会责任",
]
]
return big_df
|
https://github.com/akfamily/akshare/blob/087025d8d6f799b30ca114013e82c1ad22dc9294/project25/akshare/stock/stock_zh_zrbg_hx.py#L16-L86
| 25 |
[
0,
1,
2,
3,
4,
5,
6,
7,
8,
9,
10,
11
] | 16.901408 |
[
12,
13,
14,
15,
16,
17,
18,
19,
20,
21,
22,
23,
24,
25,
26,
27,
28,
29,
42,
43,
44,
45,
46,
49,
50,
51,
52,
53,
54,
55,
56,
70
] | 45.070423 | false | 19.047619 | 71 | 10 | 54.929577 | 9 |
def stock_zh_a_scr_report(year: str = "2018", need_page: str = "1") -> pd.DataFrame:
big_df = pd.DataFrame()
for page in tqdm(range(1, int(need_page)+1)):
hx_params_copy = hx_params.copy()
hx_params_copy.update({"date": "{}-12-31".format(year)})
hx_params_copy.update({"page": page})
r = requests.get(hx_url, headers=hx_headers, params=hx_params_copy)
data_text = r.text
temp_df = data_text[data_text.find("(") + 1: data_text.rfind(")")]
py_obj = demjson.decode(temp_df)
industry = [item["industry"] for item in py_obj["list"]]
stock_number = [item["stockNumber"] for item in py_obj["list"]]
industry_rate = [item["industryrate"] for item in py_obj["list"]]
price_limit = [item["Pricelimit"] for item in py_obj["list"]]
looting_chips = [item["lootingchips"] for item in py_obj["list"]]
r_scramble = [item["rscramble"] for item in py_obj["list"]]
strong_stock = [item["Strongstock"] for item in py_obj["list"]]
s_cramble = [item["Scramble"] for item in py_obj["list"]]
temp_df = pd.DataFrame(
[
industry,
stock_number,
industry_rate,
price_limit,
looting_chips,
r_scramble,
strong_stock,
s_cramble,
],
index=["股票名称", "股东责任", "总得分", "等级", "员工责任", "环境责任", "社会责任", "供应商、客户和消费者权益责任"],
).T
big_df = big_df.append(temp_df, ignore_index=True)
big_df.reset_index(inplace=True)
big_df["index"] = big_df.index + 1
big_df.rename({"index": "序号"}, axis="columns", inplace=True)
big_df["股票代码"] = (
big_df["股票名称"].str.split("(", expand=True).iloc[:, 1].str.strip(")")
)
big_df["股票名称"] = big_df["股票名称"].str.split("(", expand=True).iloc[:, 0]
big_df["股东责任"] = pd.to_numeric(big_df["股东责任"])
big_df["总得分"] = pd.to_numeric(big_df["总得分"])
big_df["员工责任"] = pd.to_numeric(big_df["员工责任"])
big_df["环境责任"] = pd.to_numeric(big_df["环境责任"])
big_df["社会责任"] = pd.to_numeric(big_df["社会责任"])
big_df["供应商、客户和消费者权益责任"] = pd.to_numeric(big_df["供应商、客户和消费者权益责任"])
big_df = big_df[
[
"序号",
"股票名称",
"股票代码",
"总得分",
"等级",
"股东责任",
"员工责任",
"供应商、客户和消费者权益责任",
"环境责任",
"社会责任",
]
]
return big_df
| 18,816 |
akfamily/akshare
|
087025d8d6f799b30ca114013e82c1ad22dc9294
|
akshare/stock/stock_info.py
|
stock_info_sz_name_code
|
(indicator: str = "A股列表") -> pd
|
深圳证券交易所-股票列表
http://www.szse.cn/market/product/stock/list/index.html
:param indicator: choice of {"A股列表", "B股列表", "CDR列表", "AB股列表"}
:type indicator: str
:return: 指定 indicator 的数据
:rtype: pandas.DataFrame
|
深圳证券交易所-股票列表
http://www.szse.cn/market/product/stock/list/index.html
:param indicator: choice of {"A股列表", "B股列表", "CDR列表", "AB股列表"}
:type indicator: str
:return: 指定 indicator 的数据
:rtype: pandas.DataFrame
| 17 | 115 |
def stock_info_sz_name_code(indicator: str = "A股列表") -> pd.DataFrame:
"""
深圳证券交易所-股票列表
http://www.szse.cn/market/product/stock/list/index.html
:param indicator: choice of {"A股列表", "B股列表", "CDR列表", "AB股列表"}
:type indicator: str
:return: 指定 indicator 的数据
:rtype: pandas.DataFrame
"""
url = "http://www.szse.cn/api/report/ShowReport"
indicator_map = {
"A股列表": "tab1",
"B股列表": "tab2",
"CDR列表": "tab3",
"AB股列表": "tab4",
}
params = {
"SHOWTYPE": "xlsx",
"CATALOGID": "1110",
"TABKEY": indicator_map[indicator],
"random": "0.6935816432433362",
}
r = requests.get(url, params=params)
with warnings.catch_warnings(record=True):
warnings.simplefilter("always")
temp_df = pd.read_excel(BytesIO(r.content))
if len(temp_df) > 10:
if indicator == "A股列表":
temp_df["A股代码"] = (
temp_df["A股代码"]
.astype(str)
.str.split(".", expand=True)
.iloc[:, 0]
.str.zfill(6)
.str.replace("000nan", "")
)
temp_df = temp_df[
[
"板块",
"A股代码",
"A股简称",
"A股上市日期",
"A股总股本",
"A股流通股本",
"所属行业",
]
]
elif indicator == "B股列表":
temp_df["B股代码"] = (
temp_df["B股代码"]
.astype(str)
.str.split(".", expand=True)
.iloc[:, 0]
.str.zfill(6)
.str.replace("000nan", "")
)
temp_df = temp_df[
[
"板块",
"B股代码",
"B股简称",
"B股上市日期",
"B股总股本",
"B股流通股本",
"所属行业",
]
]
elif indicator == "AB股列表":
temp_df["A股代码"] = (
temp_df["A股代码"]
.astype(str)
.str.split(".", expand=True)
.iloc[:, 0]
.str.zfill(6)
.str.replace("000nan", "")
)
temp_df["B股代码"] = (
temp_df["B股代码"]
.astype(str)
.str.split(".", expand=True)
.iloc[:, 0]
.str.zfill(6)
.str.replace("000nan", "")
)
temp_df = temp_df[
[
"板块",
"A股代码",
"A股简称",
"A股上市日期",
"B股代码",
"B股简称",
"B股上市日期",
"所属行业",
]
]
return temp_df
else:
return temp_df
|
https://github.com/akfamily/akshare/blob/087025d8d6f799b30ca114013e82c1ad22dc9294/project25/akshare/stock/stock_info.py#L17-L115
| 25 |
[
0,
1,
2,
3,
4,
5,
6,
7,
8
] | 9.090909 |
[
9,
10,
16,
22,
23,
24,
25,
26,
27,
28,
36,
47,
48,
56,
67,
68,
76,
84,
96,
98
] | 20.20202 | false | 11.538462 | 99 | 6 | 79.79798 | 6 |
def stock_info_sz_name_code(indicator: str = "A股列表") -> pd.DataFrame:
url = "http://www.szse.cn/api/report/ShowReport"
indicator_map = {
"A股列表": "tab1",
"B股列表": "tab2",
"CDR列表": "tab3",
"AB股列表": "tab4",
}
params = {
"SHOWTYPE": "xlsx",
"CATALOGID": "1110",
"TABKEY": indicator_map[indicator],
"random": "0.6935816432433362",
}
r = requests.get(url, params=params)
with warnings.catch_warnings(record=True):
warnings.simplefilter("always")
temp_df = pd.read_excel(BytesIO(r.content))
if len(temp_df) > 10:
if indicator == "A股列表":
temp_df["A股代码"] = (
temp_df["A股代码"]
.astype(str)
.str.split(".", expand=True)
.iloc[:, 0]
.str.zfill(6)
.str.replace("000nan", "")
)
temp_df = temp_df[
[
"板块",
"A股代码",
"A股简称",
"A股上市日期",
"A股总股本",
"A股流通股本",
"所属行业",
]
]
elif indicator == "B股列表":
temp_df["B股代码"] = (
temp_df["B股代码"]
.astype(str)
.str.split(".", expand=True)
.iloc[:, 0]
.str.zfill(6)
.str.replace("000nan", "")
)
temp_df = temp_df[
[
"板块",
"B股代码",
"B股简称",
"B股上市日期",
"B股总股本",
"B股流通股本",
"所属行业",
]
]
elif indicator == "AB股列表":
temp_df["A股代码"] = (
temp_df["A股代码"]
.astype(str)
.str.split(".", expand=True)
.iloc[:, 0]
.str.zfill(6)
.str.replace("000nan", "")
)
temp_df["B股代码"] = (
temp_df["B股代码"]
.astype(str)
.str.split(".", expand=True)
.iloc[:, 0]
.str.zfill(6)
.str.replace("000nan", "")
)
temp_df = temp_df[
[
"板块",
"A股代码",
"A股简称",
"A股上市日期",
"B股代码",
"B股简称",
"B股上市日期",
"所属行业",
]
]
return temp_df
else:
return temp_df
| 18,817 |
|
akfamily/akshare
|
087025d8d6f799b30ca114013e82c1ad22dc9294
|
akshare/stock/stock_info.py
|
stock_info_sh_name_code
|
(indicator: str = "主板A股") -> pd
|
return temp_df
|
上海证券交易所-股票列表
http://www.sse.com.cn/assortment/stock/list/share/
:param indicator: choice of {"主板A股": "1", "主板B股": "2", "科创板": "8"}
:type indicator: str
:return: 指定 indicator 的数据
:rtype: pandas.DataFrame
|
上海证券交易所-股票列表
http://www.sse.com.cn/assortment/stock/list/share/
:param indicator: choice of {"主板A股": "1", "主板B股": "2", "科创板": "8"}
:type indicator: str
:return: 指定 indicator 的数据
:rtype: pandas.DataFrame
| 118 | 187 |
def stock_info_sh_name_code(indicator: str = "主板A股") -> pd.DataFrame:
"""
上海证券交易所-股票列表
http://www.sse.com.cn/assortment/stock/list/share/
:param indicator: choice of {"主板A股": "1", "主板B股": "2", "科创板": "8"}
:type indicator: str
:return: 指定 indicator 的数据
:rtype: pandas.DataFrame
"""
indicator_map = {"主板A股": "1", "主板B股": "2", "科创板": "8"}
url = "http://query.sse.com.cn/sseQuery/commonQuery.do"
headers = {
"Host": "query.sse.com.cn",
"Pragma": "no-cache",
"Referer": "http://www.sse.com.cn/assortment/stock/list/share/",
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/81.0.4044.138 Safari/537.36",
}
params = {
"STOCK_TYPE": indicator_map[indicator],
"REG_PROVINCE": "",
"CSRC_CODE": "",
"STOCK_CODE": "",
"sqlId": "COMMON_SSE_CP_GPJCTPZ_GPLB_GP_L",
"COMPANY_STATUS": "2,4,5,7,8",
"type": "inParams",
"isPagination": "true",
"pageHelp.cacheSize": "1",
"pageHelp.beginPage": "1",
"pageHelp.pageSize": "10000",
"pageHelp.pageNo": "1",
"pageHelp.endPage": "1",
"_": "1653291270045",
}
r = requests.get(url, params=params, headers=headers)
data_json = r.json()
temp_df = pd.DataFrame(data_json["result"])
columns = [
"-",
"-",
"-",
"-",
"-",
"-",
"证券简称",
"扩位证券简称",
"-",
"上市日期",
"-",
"-",
"-",
]
# column index 3=A_STOCK_CODE, 8=B_STOCK_CODE, 11=COMPANY_CODE
if indicator == "主板B股":
columns[8] = "证券代码"
else:
columns[3] = "证券代码"
temp_df.columns = columns
temp_df = temp_df[
[
"证券代码",
"证券简称",
"扩位证券简称",
"上市日期",
]
]
temp_df["上市日期"] = pd.to_datetime(temp_df["上市日期"]).dt.date
return temp_df
|
https://github.com/akfamily/akshare/blob/087025d8d6f799b30ca114013e82c1ad22dc9294/project25/akshare/stock/stock_info.py#L118-L187
| 25 |
[
0,
1,
2,
3,
4,
5,
6,
7,
8
] | 12.857143 |
[
9,
10,
11,
17,
33,
34,
35,
36,
53,
54,
56,
58,
60,
68,
69
] | 21.428571 | false | 11.538462 | 70 | 2 | 78.571429 | 6 |
def stock_info_sh_name_code(indicator: str = "主板A股") -> pd.DataFrame:
indicator_map = {"主板A股": "1", "主板B股": "2", "科创板": "8"}
url = "http://query.sse.com.cn/sseQuery/commonQuery.do"
headers = {
"Host": "query.sse.com.cn",
"Pragma": "no-cache",
"Referer": "http://www.sse.com.cn/assortment/stock/list/share/",
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/81.0.4044.138 Safari/537.36",
}
params = {
"STOCK_TYPE": indicator_map[indicator],
"REG_PROVINCE": "",
"CSRC_CODE": "",
"STOCK_CODE": "",
"sqlId": "COMMON_SSE_CP_GPJCTPZ_GPLB_GP_L",
"COMPANY_STATUS": "2,4,5,7,8",
"type": "inParams",
"isPagination": "true",
"pageHelp.cacheSize": "1",
"pageHelp.beginPage": "1",
"pageHelp.pageSize": "10000",
"pageHelp.pageNo": "1",
"pageHelp.endPage": "1",
"_": "1653291270045",
}
r = requests.get(url, params=params, headers=headers)
data_json = r.json()
temp_df = pd.DataFrame(data_json["result"])
columns = [
"-",
"-",
"-",
"-",
"-",
"-",
"证券简称",
"扩位证券简称",
"-",
"上市日期",
"-",
"-",
"-",
]
# column index 3=A_STOCK_CODE, 8=B_STOCK_CODE, 11=COMPANY_CODE
if indicator == "主板B股":
columns[8] = "证券代码"
else:
columns[3] = "证券代码"
temp_df.columns = columns
temp_df = temp_df[
[
"证券代码",
"证券简称",
"扩位证券简称",
"上市日期",
]
]
temp_df["上市日期"] = pd.to_datetime(temp_df["上市日期"]).dt.date
return temp_df
| 18,818 |
akfamily/akshare
|
087025d8d6f799b30ca114013e82c1ad22dc9294
|
akshare/stock/stock_info.py
|
stock_info_bj_name_code
|
()
|
return big_df
|
北京证券交易所-股票列表
https://www.bse.cn/nq/listedcompany.html
:return: 股票列表
:rtype: pandas.DataFrame
|
北京证券交易所-股票列表
https://www.bse.cn/nq/listedcompany.html
:return: 股票列表
:rtype: pandas.DataFrame
| 190 | 283 |
def stock_info_bj_name_code() -> pd.DataFrame:
"""
北京证券交易所-股票列表
https://www.bse.cn/nq/listedcompany.html
:return: 股票列表
:rtype: pandas.DataFrame
"""
url = "https://www.bse.cn/nqxxController/nqxxCnzq.do"
payload = {
"page": "0",
"typejb": "T",
"xxfcbj[]": "2",
"xxzqdm": "",
"sortfield": "xxzqdm",
"sorttype": "asc",
}
r = requests.post(url, data=payload)
data_text = r.text
data_json = json.loads(data_text[data_text.find("[") : -1])
total_page = data_json[0]["totalPages"]
big_df = pd.DataFrame()
for page in tqdm(range(total_page), leave=False):
payload.update({"page": page})
r = requests.post(url, data=payload)
data_text = r.text
data_json = json.loads(data_text[data_text.find("[") : -1])
temp_df = data_json[0]["content"]
temp_df = pd.DataFrame(temp_df)
big_df = pd.concat([big_df, temp_df], ignore_index=True)
big_df.columns = [
"上市日期",
"-",
"-",
"-",
"-",
"-",
"-",
"-",
"-",
"-",
"-",
"流通股本",
"-",
"-",
"-",
"-",
"-",
"所属行业",
"-",
"-",
"-",
"-",
"报告日期",
"-",
"-",
"-",
"-",
"-",
"-",
"地区",
"-",
"-",
"-",
"-",
"-",
"券商",
"总股本",
"-",
"证券代码",
"-",
"证券简称",
"-",
"-",
"-",
"-",
"-",
"-",
"-",
]
big_df = big_df[
[
"证券代码",
"证券简称",
"总股本",
"流通股本",
"上市日期",
"所属行业",
"地区",
"报告日期",
]
]
big_df["报告日期"] = pd.to_datetime(big_df["报告日期"]).dt.date
big_df["上市日期"] = pd.to_datetime(big_df["上市日期"]).dt.date
return big_df
|
https://github.com/akfamily/akshare/blob/087025d8d6f799b30ca114013e82c1ad22dc9294/project25/akshare/stock/stock_info.py#L190-L283
| 25 |
[
0,
1,
2,
3,
4,
5,
6
] | 7.446809 |
[
7,
8,
16,
17,
18,
19,
20,
21,
22,
23,
24,
25,
26,
27,
28,
29,
79,
91,
92,
93
] | 21.276596 | false | 11.538462 | 94 | 2 | 78.723404 | 4 |
def stock_info_bj_name_code() -> pd.DataFrame:
url = "https://www.bse.cn/nqxxController/nqxxCnzq.do"
payload = {
"page": "0",
"typejb": "T",
"xxfcbj[]": "2",
"xxzqdm": "",
"sortfield": "xxzqdm",
"sorttype": "asc",
}
r = requests.post(url, data=payload)
data_text = r.text
data_json = json.loads(data_text[data_text.find("[") : -1])
total_page = data_json[0]["totalPages"]
big_df = pd.DataFrame()
for page in tqdm(range(total_page), leave=False):
payload.update({"page": page})
r = requests.post(url, data=payload)
data_text = r.text
data_json = json.loads(data_text[data_text.find("[") : -1])
temp_df = data_json[0]["content"]
temp_df = pd.DataFrame(temp_df)
big_df = pd.concat([big_df, temp_df], ignore_index=True)
big_df.columns = [
"上市日期",
"-",
"-",
"-",
"-",
"-",
"-",
"-",
"-",
"-",
"-",
"流通股本",
"-",
"-",
"-",
"-",
"-",
"所属行业",
"-",
"-",
"-",
"-",
"报告日期",
"-",
"-",
"-",
"-",
"-",
"-",
"地区",
"-",
"-",
"-",
"-",
"-",
"券商",
"总股本",
"-",
"证券代码",
"-",
"证券简称",
"-",
"-",
"-",
"-",
"-",
"-",
"-",
]
big_df = big_df[
[
"证券代码",
"证券简称",
"总股本",
"流通股本",
"上市日期",
"所属行业",
"地区",
"报告日期",
]
]
big_df["报告日期"] = pd.to_datetime(big_df["报告日期"]).dt.date
big_df["上市日期"] = pd.to_datetime(big_df["上市日期"]).dt.date
return big_df
| 18,819 |
akfamily/akshare
|
087025d8d6f799b30ca114013e82c1ad22dc9294
|
akshare/stock/stock_info.py
|
stock_info_sh_delist
|
()
|
return temp_df
|
上海证券交易所-终止上市公司
http://www.sse.com.cn/assortment/stock/list/delisting/
:return: 终止上市公司
:rtype: pandas.DataFrame
|
上海证券交易所-终止上市公司
http://www.sse.com.cn/assortment/stock/list/delisting/
:return: 终止上市公司
:rtype: pandas.DataFrame
| 286 | 349 |
def stock_info_sh_delist() -> pd.DataFrame:
"""
上海证券交易所-终止上市公司
http://www.sse.com.cn/assortment/stock/list/delisting/
:return: 终止上市公司
:rtype: pandas.DataFrame
"""
url = "http://query.sse.com.cn/commonQuery.do"
headers = {
"Accept": "*/*",
"Accept-Encoding": "gzip, deflate",
"Accept-Language": "zh-CN,zh;q=0.9,en;q=0.8",
"Cache-Control": "no-cache",
"Connection": "keep-alive",
"Host": "query.sse.com.cn",
"Pragma": "no-cache",
"Referer": "http://www.sse.com.cn/",
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/97.0.4692.71 Safari/537.36",
}
params = {
"sqlId": "COMMON_SSE_CP_GPJCTPZ_GPLB_GP_L",
"isPagination": "true",
"STOCK_CODE": "",
"CSRC_CODE": "",
"REG_PROVINCE": "",
"STOCK_TYPE": "1,2",
"COMPANY_STATUS": "3",
"type": "inParams",
"pageHelp.cacheSize": "1",
"pageHelp.beginPage": "1",
"pageHelp.pageSize": "500",
"pageHelp.pageNo": "1",
"pageHelp.endPage": "1",
"_": "1643035608183",
}
r = requests.get(url, params=params, headers=headers)
data_json = r.json()
temp_df = pd.DataFrame(data_json["result"])
temp_df.columns = [
"-",
"-",
"公司简称",
"-",
"暂停上市日期",
"-",
"-",
"-",
"-",
"上市日期",
"-",
"公司代码",
"-",
]
temp_df = temp_df[
[
"公司代码",
"公司简称",
"上市日期",
"暂停上市日期",
]
]
temp_df["上市日期"] = pd.to_datetime(temp_df["上市日期"]).dt.date
temp_df["暂停上市日期"] = pd.to_datetime(temp_df["暂停上市日期"]).dt.date
return temp_df
|
https://github.com/akfamily/akshare/blob/087025d8d6f799b30ca114013e82c1ad22dc9294/project25/akshare/stock/stock_info.py#L286-L349
| 25 |
[
0,
1,
2,
3,
4,
5,
6
] | 10.9375 |
[
7,
8,
19,
35,
36,
37,
38,
53,
61,
62,
63
] | 17.1875 | false | 11.538462 | 64 | 1 | 82.8125 | 4 |
def stock_info_sh_delist() -> pd.DataFrame:
url = "http://query.sse.com.cn/commonQuery.do"
headers = {
"Accept": "*/*",
"Accept-Encoding": "gzip, deflate",
"Accept-Language": "zh-CN,zh;q=0.9,en;q=0.8",
"Cache-Control": "no-cache",
"Connection": "keep-alive",
"Host": "query.sse.com.cn",
"Pragma": "no-cache",
"Referer": "http://www.sse.com.cn/",
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/97.0.4692.71 Safari/537.36",
}
params = {
"sqlId": "COMMON_SSE_CP_GPJCTPZ_GPLB_GP_L",
"isPagination": "true",
"STOCK_CODE": "",
"CSRC_CODE": "",
"REG_PROVINCE": "",
"STOCK_TYPE": "1,2",
"COMPANY_STATUS": "3",
"type": "inParams",
"pageHelp.cacheSize": "1",
"pageHelp.beginPage": "1",
"pageHelp.pageSize": "500",
"pageHelp.pageNo": "1",
"pageHelp.endPage": "1",
"_": "1643035608183",
}
r = requests.get(url, params=params, headers=headers)
data_json = r.json()
temp_df = pd.DataFrame(data_json["result"])
temp_df.columns = [
"-",
"-",
"公司简称",
"-",
"暂停上市日期",
"-",
"-",
"-",
"-",
"上市日期",
"-",
"公司代码",
"-",
]
temp_df = temp_df[
[
"公司代码",
"公司简称",
"上市日期",
"暂停上市日期",
]
]
temp_df["上市日期"] = pd.to_datetime(temp_df["上市日期"]).dt.date
temp_df["暂停上市日期"] = pd.to_datetime(temp_df["暂停上市日期"]).dt.date
return temp_df
| 18,820 |
akfamily/akshare
|
087025d8d6f799b30ca114013e82c1ad22dc9294
|
akshare/stock/stock_info.py
|
stock_info_sz_delist
|
(indicator: str = "暂停上市公司") -> pd.DataF
|
深证证券交易所-暂停上市公司-终止上市公司
http://www.szse.cn/market/stock/suspend/index.html
:param indicator: choice of {"暂停上市公司", "终止上市公司"}
:type indicator: str
:return: 暂停上市公司 or 终止上市公司 的数据
:rtype: pandas.DataFrame
|
深证证券交易所-暂停上市公司-终止上市公司
http://www.szse.cn/market/stock/suspend/index.html
:param indicator: choice of {"暂停上市公司", "终止上市公司"}
:type indicator: str
:return: 暂停上市公司 or 终止上市公司 的数据
:rtype: pandas.DataFrame
| 352 | 374 |
def stock_info_sz_delist(indicator: str = "暂停上市公司") -> pd.DataFrame:
"""
深证证券交易所-暂停上市公司-终止上市公司
http://www.szse.cn/market/stock/suspend/index.html
:param indicator: choice of {"暂停上市公司", "终止上市公司"}
:type indicator: str
:return: 暂停上市公司 or 终止上市公司 的数据
:rtype: pandas.DataFrame
"""
indicator_map = {"暂停上市公司": "tab1", "终止上市公司": "tab2"}
url = "http://www.szse.cn/api/report/ShowReport"
params = {
"SHOWTYPE": "xlsx",
"CATALOGID": "1793_ssgs",
"TABKEY": indicator_map[indicator],
"random": "0.6935816432433362",
}
r = requests.get(url, params=params)
with warnings.catch_warnings(record=True):
warnings.simplefilter("always")
temp_df = pd.read_excel(BytesIO(r.content))
temp_df["证券代码"] = temp_df["证券代码"].astype("str").str.zfill(6)
return temp_df
|
https://github.com/akfamily/akshare/blob/087025d8d6f799b30ca114013e82c1ad22dc9294/project25/akshare/stock/stock_info.py#L352-L374
| 25 |
[
0,
1,
2,
3,
4,
5,
6,
7,
8
] | 39.130435 |
[
9,
10,
11,
17,
18,
19,
20,
21,
22
] | 39.130435 | false | 11.538462 | 23 | 2 | 60.869565 | 6 |
def stock_info_sz_delist(indicator: str = "暂停上市公司") -> pd.DataFrame:
indicator_map = {"暂停上市公司": "tab1", "终止上市公司": "tab2"}
url = "http://www.szse.cn/api/report/ShowReport"
params = {
"SHOWTYPE": "xlsx",
"CATALOGID": "1793_ssgs",
"TABKEY": indicator_map[indicator],
"random": "0.6935816432433362",
}
r = requests.get(url, params=params)
with warnings.catch_warnings(record=True):
warnings.simplefilter("always")
temp_df = pd.read_excel(BytesIO(r.content))
temp_df["证券代码"] = temp_df["证券代码"].astype("str").str.zfill(6)
return temp_df
| 18,821 |
|
akfamily/akshare
|
087025d8d6f799b30ca114013e82c1ad22dc9294
|
akshare/stock/stock_info.py
|
stock_info_sz_change_name
|
(indicator: str = "全称变更") -> pd.D
|
深证证券交易所-更名公司
http://www.szse.cn/market/companys/changename/index.html
:param indicator: choice of {"全称变更": "tab1", "简称变更": "tab2"}
:type indicator: str
:return: 全称变更 or 简称变更 的数据
:rtype: pandas.DataFrame
|
深证证券交易所-更名公司
http://www.szse.cn/market/companys/changename/index.html
:param indicator: choice of {"全称变更": "tab1", "简称变更": "tab2"}
:type indicator: str
:return: 全称变更 or 简称变更 的数据
:rtype: pandas.DataFrame
| 377 | 399 |
def stock_info_sz_change_name(indicator: str = "全称变更") -> pd.DataFrame:
"""
深证证券交易所-更名公司
http://www.szse.cn/market/companys/changename/index.html
:param indicator: choice of {"全称变更": "tab1", "简称变更": "tab2"}
:type indicator: str
:return: 全称变更 or 简称变更 的数据
:rtype: pandas.DataFrame
"""
indicator_map = {"全称变更": "tab1", "简称变更": "tab2"}
url = "http://www.szse.cn/api/report/ShowReport"
params = {
"SHOWTYPE": "xlsx",
"CATALOGID": "SSGSGMXX",
"TABKEY": indicator_map[indicator],
"random": "0.6935816432433362",
}
r = requests.get(url, params=params)
with warnings.catch_warnings(record=True):
warnings.simplefilter("always")
temp_df = pd.read_excel(BytesIO(r.content))
temp_df["证券代码"] = temp_df["证券代码"].astype("str").str.zfill(6)
return temp_df
|
https://github.com/akfamily/akshare/blob/087025d8d6f799b30ca114013e82c1ad22dc9294/project25/akshare/stock/stock_info.py#L377-L399
| 25 |
[
0,
1,
2,
3,
4,
5,
6,
7,
8
] | 39.130435 |
[
9,
10,
11,
17,
18,
19,
20,
21,
22
] | 39.130435 | false | 11.538462 | 23 | 2 | 60.869565 | 6 |
def stock_info_sz_change_name(indicator: str = "全称变更") -> pd.DataFrame:
indicator_map = {"全称变更": "tab1", "简称变更": "tab2"}
url = "http://www.szse.cn/api/report/ShowReport"
params = {
"SHOWTYPE": "xlsx",
"CATALOGID": "SSGSGMXX",
"TABKEY": indicator_map[indicator],
"random": "0.6935816432433362",
}
r = requests.get(url, params=params)
with warnings.catch_warnings(record=True):
warnings.simplefilter("always")
temp_df = pd.read_excel(BytesIO(r.content))
temp_df["证券代码"] = temp_df["证券代码"].astype("str").str.zfill(6)
return temp_df
| 18,822 |
|
akfamily/akshare
|
087025d8d6f799b30ca114013e82c1ad22dc9294
|
akshare/stock/stock_info.py
|
stock_info_change_name
|
(symbol: str = "000503")
|
新浪财经-股票曾用名
http://vip.stock.finance.sina.com.cn/corp/go.php/vCI_CorpInfo/stockid/300378.phtml
:param symbol: 股票代码
:type symbol: str
:return: 股票曾用名
:rtype: list
|
新浪财经-股票曾用名
http://vip.stock.finance.sina.com.cn/corp/go.php/vCI_CorpInfo/stockid/300378.phtml
:param symbol: 股票代码
:type symbol: str
:return: 股票曾用名
:rtype: list
| 402 | 427 |
def stock_info_change_name(symbol: str = "000503") -> pd.DataFrame:
"""
新浪财经-股票曾用名
http://vip.stock.finance.sina.com.cn/corp/go.php/vCI_CorpInfo/stockid/300378.phtml
:param symbol: 股票代码
:type symbol: str
:return: 股票曾用名
:rtype: list
"""
url = f"http://vip.stock.finance.sina.com.cn/corp/go.php/vCI_CorpInfo/stockid/{symbol}.phtml"
r = requests.get(url)
temp_df = pd.read_html(r.text)[3].iloc[:, :2]
temp_df.dropna(inplace=True)
temp_df.columns = ["item", "value"]
temp_df["item"] = temp_df["item"].str.split(":", expand=True)[0]
try:
name_list = (
temp_df[temp_df["item"] == "证券简称更名历史"].value.tolist()[0].split(" ")
)
big_df = pd.DataFrame(name_list)
big_df.reset_index(inplace=True)
big_df["index"] = big_df.index + 1
big_df.columns = ["index", "name"]
return big_df
except IndexError as e:
return pd.DataFrame()
|
https://github.com/akfamily/akshare/blob/087025d8d6f799b30ca114013e82c1ad22dc9294/project25/akshare/stock/stock_info.py#L402-L427
| 25 |
[
0,
1,
2,
3,
4,
5,
6,
7,
8
] | 34.615385 |
[
9,
10,
11,
12,
13,
14,
15,
16,
19,
20,
21,
22,
23,
24,
25
] | 57.692308 | false | 11.538462 | 26 | 2 | 42.307692 | 6 |
def stock_info_change_name(symbol: str = "000503") -> pd.DataFrame:
url = f"http://vip.stock.finance.sina.com.cn/corp/go.php/vCI_CorpInfo/stockid/{symbol}.phtml"
r = requests.get(url)
temp_df = pd.read_html(r.text)[3].iloc[:, :2]
temp_df.dropna(inplace=True)
temp_df.columns = ["item", "value"]
temp_df["item"] = temp_df["item"].str.split(":", expand=True)[0]
try:
name_list = (
temp_df[temp_df["item"] == "证券简称更名历史"].value.tolist()[0].split(" ")
)
big_df = pd.DataFrame(name_list)
big_df.reset_index(inplace=True)
big_df["index"] = big_df.index + 1
big_df.columns = ["index", "name"]
return big_df
except IndexError as e:
return pd.DataFrame()
| 18,823 |
|
akfamily/akshare
|
087025d8d6f799b30ca114013e82c1ad22dc9294
|
akshare/stock/stock_info.py
|
stock_info_a_code_name
|
()
|
return big_df
|
沪深京 A 股列表
:return: 沪深京 A 股数据
:rtype: pandas.DataFrame
|
沪深京 A 股列表
:return: 沪深京 A 股数据
:rtype: pandas.DataFrame
| 431 | 457 |
def stock_info_a_code_name() -> pd.DataFrame:
"""
沪深京 A 股列表
:return: 沪深京 A 股数据
:rtype: pandas.DataFrame
"""
big_df = pd.DataFrame()
stock_sh = stock_info_sh_name_code(indicator="主板A股")
stock_sh = stock_sh[["证券代码", "证券简称"]]
stock_sz = stock_info_sz_name_code(indicator="A股列表")
stock_sz["A股代码"] = stock_sz["A股代码"].astype(str).str.zfill(6)
big_df = pd.concat([big_df, stock_sz[["A股代码", "A股简称"]]], ignore_index=True)
big_df.columns = ["证券代码", "证券简称"]
stock_kcb = stock_info_sh_name_code(indicator="科创板")
stock_kcb = stock_kcb[["证券代码", "证券简称"]]
stock_bse = stock_info_bj_name_code()
stock_bse = stock_bse[["证券代码", "证券简称"]]
stock_bse.columns = ["证券代码", "证券简称"]
big_df = pd.concat([big_df, stock_sh], ignore_index=True)
big_df = pd.concat([big_df, stock_kcb], ignore_index=True)
big_df = pd.concat([big_df, stock_bse], ignore_index=True)
big_df.columns = ["code", "name"]
return big_df
|
https://github.com/akfamily/akshare/blob/087025d8d6f799b30ca114013e82c1ad22dc9294/project25/akshare/stock/stock_info.py#L431-L457
| 25 |
[
0,
1,
2,
3,
4,
5
] | 22.222222 |
[
6,
7,
8,
10,
11,
12,
13,
15,
16,
18,
19,
20,
22,
23,
24,
25,
26
] | 62.962963 | false | 11.538462 | 27 | 1 | 37.037037 | 3 |
def stock_info_a_code_name() -> pd.DataFrame:
big_df = pd.DataFrame()
stock_sh = stock_info_sh_name_code(indicator="主板A股")
stock_sh = stock_sh[["证券代码", "证券简称"]]
stock_sz = stock_info_sz_name_code(indicator="A股列表")
stock_sz["A股代码"] = stock_sz["A股代码"].astype(str).str.zfill(6)
big_df = pd.concat([big_df, stock_sz[["A股代码", "A股简称"]]], ignore_index=True)
big_df.columns = ["证券代码", "证券简称"]
stock_kcb = stock_info_sh_name_code(indicator="科创板")
stock_kcb = stock_kcb[["证券代码", "证券简称"]]
stock_bse = stock_info_bj_name_code()
stock_bse = stock_bse[["证券代码", "证券简称"]]
stock_bse.columns = ["证券代码", "证券简称"]
big_df = pd.concat([big_df, stock_sh], ignore_index=True)
big_df = pd.concat([big_df, stock_kcb], ignore_index=True)
big_df = pd.concat([big_df, stock_bse], ignore_index=True)
big_df.columns = ["code", "name"]
return big_df
| 18,824 |
akfamily/akshare
|
087025d8d6f799b30ca114013e82c1ad22dc9294
|
akshare/stock/stock_zh_a_sina.py
|
_get_zh_a_page_count
|
()
|
所有股票的总页数
http://vip.stock.finance.sina.com.cn/mkt/#hs_a
:return: 需要采集的股票总页数
:rtype: int
|
所有股票的总页数
http://vip.stock.finance.sina.com.cn/mkt/#hs_a
:return: 需要采集的股票总页数
:rtype: int
| 29 | 41 |
def _get_zh_a_page_count() -> int:
"""
所有股票的总页数
http://vip.stock.finance.sina.com.cn/mkt/#hs_a
:return: 需要采集的股票总页数
:rtype: int
"""
res = requests.get(zh_sina_a_stock_count_url)
page_count = int(re.findall(re.compile(r"\d+"), res.text)[0]) / 80
if isinstance(page_count, int):
return page_count
else:
return int(page_count) + 1
|
https://github.com/akfamily/akshare/blob/087025d8d6f799b30ca114013e82c1ad22dc9294/project25/akshare/stock/stock_zh_a_sina.py#L29-L41
| 25 |
[
0,
1,
2,
3,
4,
5,
6
] | 53.846154 |
[
7,
8,
9,
10,
12
] | 38.461538 | false | 6.550218 | 13 | 2 | 61.538462 | 4 |
def _get_zh_a_page_count() -> int:
res = requests.get(zh_sina_a_stock_count_url)
page_count = int(re.findall(re.compile(r"\d+"), res.text)[0]) / 80
if isinstance(page_count, int):
return page_count
else:
return int(page_count) + 1
| 18,825 |
|
akfamily/akshare
|
087025d8d6f799b30ca114013e82c1ad22dc9294
|
akshare/stock/stock_zh_a_sina.py
|
stock_zh_a_spot
|
()
|
return big_df
|
新浪财经-所有 A 股的实时行情数据; 重复运行本函数会被新浪暂时封 IP
http://vip.stock.finance.sina.com.cn/mkt/#hs_a
:return: 所有股票的实时行情数据
:rtype: pandas.DataFrame
|
新浪财经-所有 A 股的实时行情数据; 重复运行本函数会被新浪暂时封 IP
http://vip.stock.finance.sina.com.cn/mkt/#hs_a
:return: 所有股票的实时行情数据
:rtype: pandas.DataFrame
| 44 | 125 |
def stock_zh_a_spot() -> pd.DataFrame:
"""
新浪财经-所有 A 股的实时行情数据; 重复运行本函数会被新浪暂时封 IP
http://vip.stock.finance.sina.com.cn/mkt/#hs_a
:return: 所有股票的实时行情数据
:rtype: pandas.DataFrame
"""
big_df = pd.DataFrame()
page_count = _get_zh_a_page_count()
zh_sina_stock_payload_copy = zh_sina_a_stock_payload.copy()
for page in tqdm(
range(1, page_count + 1), leave=False, desc="Please wait for a moment"
):
zh_sina_stock_payload_copy.update({"page": page})
r = requests.get(
zh_sina_a_stock_url, params=zh_sina_stock_payload_copy
)
data_json = demjson.decode(r.text)
big_df = pd.concat(
[big_df, pd.DataFrame(data_json)], ignore_index=True
)
big_df = big_df.astype(
{
"trade": "float",
"pricechange": "float",
"changepercent": "float",
"buy": "float",
"sell": "float",
"settlement": "float",
"open": "float",
"high": "float",
"low": "float",
"volume": "float",
"amount": "float",
"per": "float",
"pb": "float",
"mktcap": "float",
"nmc": "float",
"turnoverratio": "float",
}
)
big_df.columns = [
"代码",
"_",
"名称",
"最新价",
"涨跌额",
"涨跌幅",
"买入",
"卖出",
"昨收",
"今开",
"最高",
"最低",
"成交量",
"成交额",
"_",
"_",
"_",
"_",
"_",
"_",
]
big_df = big_df[
[
"代码",
"名称",
"最新价",
"涨跌额",
"涨跌幅",
"买入",
"卖出",
"昨收",
"今开",
"最高",
"最低",
"成交量",
"成交额",
]
]
return big_df
|
https://github.com/akfamily/akshare/blob/087025d8d6f799b30ca114013e82c1ad22dc9294/project25/akshare/stock/stock_zh_a_sina.py#L44-L125
| 25 |
[
0,
1,
2,
3,
4,
5,
6
] | 8.536585 |
[
7,
8,
9,
10,
13,
14,
17,
18,
22,
42,
64,
81
] | 14.634146 | false | 6.550218 | 82 | 2 | 85.365854 | 4 |
def stock_zh_a_spot() -> pd.DataFrame:
big_df = pd.DataFrame()
page_count = _get_zh_a_page_count()
zh_sina_stock_payload_copy = zh_sina_a_stock_payload.copy()
for page in tqdm(
range(1, page_count + 1), leave=False, desc="Please wait for a moment"
):
zh_sina_stock_payload_copy.update({"page": page})
r = requests.get(
zh_sina_a_stock_url, params=zh_sina_stock_payload_copy
)
data_json = demjson.decode(r.text)
big_df = pd.concat(
[big_df, pd.DataFrame(data_json)], ignore_index=True
)
big_df = big_df.astype(
{
"trade": "float",
"pricechange": "float",
"changepercent": "float",
"buy": "float",
"sell": "float",
"settlement": "float",
"open": "float",
"high": "float",
"low": "float",
"volume": "float",
"amount": "float",
"per": "float",
"pb": "float",
"mktcap": "float",
"nmc": "float",
"turnoverratio": "float",
}
)
big_df.columns = [
"代码",
"_",
"名称",
"最新价",
"涨跌额",
"涨跌幅",
"买入",
"卖出",
"昨收",
"今开",
"最高",
"最低",
"成交量",
"成交额",
"_",
"_",
"_",
"_",
"_",
"_",
]
big_df = big_df[
[
"代码",
"名称",
"最新价",
"涨跌额",
"涨跌幅",
"买入",
"卖出",
"昨收",
"今开",
"最高",
"最低",
"成交量",
"成交额",
]
]
return big_df
| 18,826 |
akfamily/akshare
|
087025d8d6f799b30ca114013e82c1ad22dc9294
|
akshare/stock/stock_zh_a_sina.py
|
stock_zh_a_daily
|
(
symbol: str = "sh603843",
start_date: str = "19900101",
end_date: str = "21000118",
adjust: str = "",
)
|
新浪财经-A 股-个股的历史行情数据, 大量抓取容易封 IP
https://finance.sina.com.cn/realstock/company/sh603843/nc.shtml
:param start_date: 20201103; 开始日期
:type start_date: str
:param end_date: 20201103; 结束日期
:type end_date: str
:param symbol: sh600000
:type symbol: str
:param adjust: 默认为空: 返回不复权的数据; qfq: 返回前复权后的数据; hfq: 返回后复权后的数据; hfq-factor: 返回后复权因子; hfq-factor: 返回前复权因子
:type adjust: str
:return: specific data
:rtype: pandas.DataFrame
|
新浪财经-A 股-个股的历史行情数据, 大量抓取容易封 IP
https://finance.sina.com.cn/realstock/company/sh603843/nc.shtml
:param start_date: 20201103; 开始日期
:type start_date: str
:param end_date: 20201103; 结束日期
:type end_date: str
:param symbol: sh600000
:type symbol: str
:param adjust: 默认为空: 返回不复权的数据; qfq: 返回前复权后的数据; hfq: 返回后复权后的数据; hfq-factor: 返回后复权因子; hfq-factor: 返回前复权因子
:type adjust: str
:return: specific data
:rtype: pandas.DataFrame
| 128 | 293 |
def stock_zh_a_daily(
symbol: str = "sh603843",
start_date: str = "19900101",
end_date: str = "21000118",
adjust: str = "",
) -> pd.DataFrame:
"""
新浪财经-A 股-个股的历史行情数据, 大量抓取容易封 IP
https://finance.sina.com.cn/realstock/company/sh603843/nc.shtml
:param start_date: 20201103; 开始日期
:type start_date: str
:param end_date: 20201103; 结束日期
:type end_date: str
:param symbol: sh600000
:type symbol: str
:param adjust: 默认为空: 返回不复权的数据; qfq: 返回前复权后的数据; hfq: 返回后复权后的数据; hfq-factor: 返回后复权因子; hfq-factor: 返回前复权因子
:type adjust: str
:return: specific data
:rtype: pandas.DataFrame
"""
def _fq_factor(method: str) -> pd.DataFrame:
if method == "hfq":
res = requests.get(zh_sina_a_stock_hfq_url.format(symbol))
hfq_factor_df = pd.DataFrame(
eval(res.text.split("=")[1].split("\n")[0])["data"]
)
if hfq_factor_df.shape[0] == 0:
raise ValueError("sina hfq factor not available")
hfq_factor_df.columns = ["date", "hfq_factor"]
hfq_factor_df.index = pd.to_datetime(hfq_factor_df.date)
del hfq_factor_df["date"]
hfq_factor_df.reset_index(inplace=True)
return hfq_factor_df
else:
res = requests.get(zh_sina_a_stock_qfq_url.format(symbol))
qfq_factor_df = pd.DataFrame(
eval(res.text.split("=")[1].split("\n")[0])["data"]
)
if qfq_factor_df.shape[0] == 0:
raise ValueError("sina hfq factor not available")
qfq_factor_df.columns = ["date", "qfq_factor"]
qfq_factor_df.index = pd.to_datetime(qfq_factor_df.date)
del qfq_factor_df["date"]
qfq_factor_df.reset_index(inplace=True)
return qfq_factor_df
if adjust in ("hfq-factor", "qfq-factor"):
return _fq_factor(adjust.split("-")[0])
res = requests.get(zh_sina_a_stock_hist_url.format(symbol))
js_code = py_mini_racer.MiniRacer()
js_code.eval(hk_js_decode)
dict_list = js_code.call(
"d", res.text.split("=")[1].split(";")[0].replace('"', "")
) # 执行js解密代码
data_df = pd.DataFrame(dict_list)
data_df.index = pd.to_datetime(data_df["date"]).dt.date
del data_df["date"]
data_df = data_df.astype("float")
r = requests.get(zh_sina_a_stock_amount_url.format(symbol, symbol))
amount_data_json = demjson.decode(
r.text[r.text.find("[") : r.text.rfind("]") + 1]
)
amount_data_df = pd.DataFrame(amount_data_json)
amount_data_df.index = pd.to_datetime(amount_data_df.date)
del amount_data_df["date"]
temp_df = pd.merge(
data_df, amount_data_df, left_index=True, right_index=True, how="outer"
)
temp_df.fillna(method="ffill", inplace=True)
temp_df = temp_df.astype(float)
temp_df["amount"] = temp_df["amount"] * 10000
temp_df["turnover"] = temp_df["volume"] / temp_df["amount"]
temp_df.columns = [
"open",
"high",
"low",
"close",
"volume",
"outstanding_share",
"turnover",
]
if adjust == "":
temp_df = temp_df[start_date:end_date]
temp_df.drop_duplicates(
subset=["open", "high", "low", "close", "volume"], inplace=True
)
temp_df["open"] = round(temp_df["open"], 2)
temp_df["high"] = round(temp_df["high"], 2)
temp_df["low"] = round(temp_df["low"], 2)
temp_df["close"] = round(temp_df["close"], 2)
temp_df.dropna(inplace=True)
temp_df.drop_duplicates(inplace=True)
temp_df.reset_index(inplace=True)
return temp_df
if adjust == "hfq":
res = requests.get(zh_sina_a_stock_hfq_url.format(symbol))
hfq_factor_df = pd.DataFrame(
eval(res.text.split("=")[1].split("\n")[0])["data"]
)
hfq_factor_df.columns = ["date", "hfq_factor"]
hfq_factor_df.index = pd.to_datetime(hfq_factor_df.date)
del hfq_factor_df["date"]
temp_df = pd.merge(
temp_df,
hfq_factor_df,
left_index=True,
right_index=True,
how="outer",
)
temp_df.fillna(method="ffill", inplace=True)
temp_df = temp_df.astype(float)
temp_df.dropna(inplace=True)
temp_df.drop_duplicates(
subset=["open", "high", "low", "close", "volume"], inplace=True
)
temp_df["open"] = temp_df["open"] * temp_df["hfq_factor"]
temp_df["high"] = temp_df["high"] * temp_df["hfq_factor"]
temp_df["close"] = temp_df["close"] * temp_df["hfq_factor"]
temp_df["low"] = temp_df["low"] * temp_df["hfq_factor"]
temp_df = temp_df.iloc[:, :-1]
temp_df = temp_df[start_date:end_date]
temp_df["open"] = round(temp_df["open"], 2)
temp_df["high"] = round(temp_df["high"], 2)
temp_df["low"] = round(temp_df["low"], 2)
temp_df["close"] = round(temp_df["close"], 2)
temp_df.dropna(inplace=True)
temp_df.reset_index(inplace=True)
return temp_df
if adjust == "qfq":
res = requests.get(zh_sina_a_stock_qfq_url.format(symbol))
qfq_factor_df = pd.DataFrame(
eval(res.text.split("=")[1].split("\n")[0])["data"]
)
qfq_factor_df.columns = ["date", "qfq_factor"]
qfq_factor_df.index = pd.to_datetime(qfq_factor_df.date)
del qfq_factor_df["date"]
temp_df = pd.merge(
temp_df,
qfq_factor_df,
left_index=True,
right_index=True,
how="outer",
)
temp_df.fillna(method="ffill", inplace=True)
temp_df = temp_df.astype(float)
temp_df.dropna(inplace=True)
temp_df.drop_duplicates(
subset=["open", "high", "low", "close", "volume"], inplace=True
)
temp_df["open"] = temp_df["open"] / temp_df["qfq_factor"]
temp_df["high"] = temp_df["high"] / temp_df["qfq_factor"]
temp_df["close"] = temp_df["close"] / temp_df["qfq_factor"]
temp_df["low"] = temp_df["low"] / temp_df["qfq_factor"]
temp_df = temp_df.iloc[:, :-1]
temp_df = temp_df[start_date:end_date]
temp_df["open"] = round(temp_df["open"], 2)
temp_df["high"] = round(temp_df["high"], 2)
temp_df["low"] = round(temp_df["low"], 2)
temp_df["close"] = round(temp_df["close"], 2)
temp_df.dropna(inplace=True)
temp_df.reset_index(inplace=True)
return temp_df
|
https://github.com/akfamily/akshare/blob/087025d8d6f799b30ca114013e82c1ad22dc9294/project25/akshare/stock/stock_zh_a_sina.py#L128-L293
| 25 |
[
0
] | 0.60241 |
[
21,
22,
23,
24,
27,
28,
29,
30,
31,
32,
33,
35,
36,
39,
40,
41,
42,
43,
44,
45,
47,
48,
50,
51,
52,
53,
56,
57,
58,
59,
60,
61,
64,
65,
66,
67,
70,
71,
72,
73,
74,
83,
84,
85,
88,
89,
90,
91,
92,
93,
94,
95,
96,
97,
98,
101,
102,
103,
104,
111,
112,
113,
114,
117,
118,
119,
120,
121,
122,
123,
124,
125,
126,
127,
128,
129,
131,
132,
133,
136,
137,
138,
140,
147,
148,
149,
150,
153,
154,
155,
156,
157,
158,
159,
160,
161,
162,
163,
164,
165
] | 60.240964 | false | 6.550218 | 166 | 9 | 39.759036 | 12 |
def stock_zh_a_daily(
symbol: str = "sh603843",
start_date: str = "19900101",
end_date: str = "21000118",
adjust: str = "",
) -> pd.DataFrame:
def _fq_factor(method: str) -> pd.DataFrame:
if method == "hfq":
res = requests.get(zh_sina_a_stock_hfq_url.format(symbol))
hfq_factor_df = pd.DataFrame(
eval(res.text.split("=")[1].split("\n")[0])["data"]
)
if hfq_factor_df.shape[0] == 0:
raise ValueError("sina hfq factor not available")
hfq_factor_df.columns = ["date", "hfq_factor"]
hfq_factor_df.index = pd.to_datetime(hfq_factor_df.date)
del hfq_factor_df["date"]
hfq_factor_df.reset_index(inplace=True)
return hfq_factor_df
else:
res = requests.get(zh_sina_a_stock_qfq_url.format(symbol))
qfq_factor_df = pd.DataFrame(
eval(res.text.split("=")[1].split("\n")[0])["data"]
)
if qfq_factor_df.shape[0] == 0:
raise ValueError("sina hfq factor not available")
qfq_factor_df.columns = ["date", "qfq_factor"]
qfq_factor_df.index = pd.to_datetime(qfq_factor_df.date)
del qfq_factor_df["date"]
qfq_factor_df.reset_index(inplace=True)
return qfq_factor_df
if adjust in ("hfq-factor", "qfq-factor"):
return _fq_factor(adjust.split("-")[0])
res = requests.get(zh_sina_a_stock_hist_url.format(symbol))
js_code = py_mini_racer.MiniRacer()
js_code.eval(hk_js_decode)
dict_list = js_code.call(
"d", res.text.split("=")[1].split(";")[0].replace('"', "")
) # 执行js解密代码
data_df = pd.DataFrame(dict_list)
data_df.index = pd.to_datetime(data_df["date"]).dt.date
del data_df["date"]
data_df = data_df.astype("float")
r = requests.get(zh_sina_a_stock_amount_url.format(symbol, symbol))
amount_data_json = demjson.decode(
r.text[r.text.find("[") : r.text.rfind("]") + 1]
)
amount_data_df = pd.DataFrame(amount_data_json)
amount_data_df.index = pd.to_datetime(amount_data_df.date)
del amount_data_df["date"]
temp_df = pd.merge(
data_df, amount_data_df, left_index=True, right_index=True, how="outer"
)
temp_df.fillna(method="ffill", inplace=True)
temp_df = temp_df.astype(float)
temp_df["amount"] = temp_df["amount"] * 10000
temp_df["turnover"] = temp_df["volume"] / temp_df["amount"]
temp_df.columns = [
"open",
"high",
"low",
"close",
"volume",
"outstanding_share",
"turnover",
]
if adjust == "":
temp_df = temp_df[start_date:end_date]
temp_df.drop_duplicates(
subset=["open", "high", "low", "close", "volume"], inplace=True
)
temp_df["open"] = round(temp_df["open"], 2)
temp_df["high"] = round(temp_df["high"], 2)
temp_df["low"] = round(temp_df["low"], 2)
temp_df["close"] = round(temp_df["close"], 2)
temp_df.dropna(inplace=True)
temp_df.drop_duplicates(inplace=True)
temp_df.reset_index(inplace=True)
return temp_df
if adjust == "hfq":
res = requests.get(zh_sina_a_stock_hfq_url.format(symbol))
hfq_factor_df = pd.DataFrame(
eval(res.text.split("=")[1].split("\n")[0])["data"]
)
hfq_factor_df.columns = ["date", "hfq_factor"]
hfq_factor_df.index = pd.to_datetime(hfq_factor_df.date)
del hfq_factor_df["date"]
temp_df = pd.merge(
temp_df,
hfq_factor_df,
left_index=True,
right_index=True,
how="outer",
)
temp_df.fillna(method="ffill", inplace=True)
temp_df = temp_df.astype(float)
temp_df.dropna(inplace=True)
temp_df.drop_duplicates(
subset=["open", "high", "low", "close", "volume"], inplace=True
)
temp_df["open"] = temp_df["open"] * temp_df["hfq_factor"]
temp_df["high"] = temp_df["high"] * temp_df["hfq_factor"]
temp_df["close"] = temp_df["close"] * temp_df["hfq_factor"]
temp_df["low"] = temp_df["low"] * temp_df["hfq_factor"]
temp_df = temp_df.iloc[:, :-1]
temp_df = temp_df[start_date:end_date]
temp_df["open"] = round(temp_df["open"], 2)
temp_df["high"] = round(temp_df["high"], 2)
temp_df["low"] = round(temp_df["low"], 2)
temp_df["close"] = round(temp_df["close"], 2)
temp_df.dropna(inplace=True)
temp_df.reset_index(inplace=True)
return temp_df
if adjust == "qfq":
res = requests.get(zh_sina_a_stock_qfq_url.format(symbol))
qfq_factor_df = pd.DataFrame(
eval(res.text.split("=")[1].split("\n")[0])["data"]
)
qfq_factor_df.columns = ["date", "qfq_factor"]
qfq_factor_df.index = pd.to_datetime(qfq_factor_df.date)
del qfq_factor_df["date"]
temp_df = pd.merge(
temp_df,
qfq_factor_df,
left_index=True,
right_index=True,
how="outer",
)
temp_df.fillna(method="ffill", inplace=True)
temp_df = temp_df.astype(float)
temp_df.dropna(inplace=True)
temp_df.drop_duplicates(
subset=["open", "high", "low", "close", "volume"], inplace=True
)
temp_df["open"] = temp_df["open"] / temp_df["qfq_factor"]
temp_df["high"] = temp_df["high"] / temp_df["qfq_factor"]
temp_df["close"] = temp_df["close"] / temp_df["qfq_factor"]
temp_df["low"] = temp_df["low"] / temp_df["qfq_factor"]
temp_df = temp_df.iloc[:, :-1]
temp_df = temp_df[start_date:end_date]
temp_df["open"] = round(temp_df["open"], 2)
temp_df["high"] = round(temp_df["high"], 2)
temp_df["low"] = round(temp_df["low"], 2)
temp_df["close"] = round(temp_df["close"], 2)
temp_df.dropna(inplace=True)
temp_df.reset_index(inplace=True)
return temp_df
| 18,827 |
|
akfamily/akshare
|
087025d8d6f799b30ca114013e82c1ad22dc9294
|
akshare/stock/stock_zh_a_sina.py
|
stock_zh_a_cdr_daily
|
(
symbol: str = "sh689009",
start_date: str = "19900101",
end_date: str = "22201116",
)
|
return temp_df
|
新浪财经-A股-CDR个股的历史行情数据, 大量抓取容易封 IP
# TODO 观察复权情况
https://finance.sina.com.cn/realstock/company/sh689009/nc.shtml
:param start_date: 20201103; 开始日期
:type start_date: str
:param end_date: 20201103; 结束日期
:type end_date: str
:param symbol: sh689009
:type symbol: str
:return: specific data
:rtype: pandas.DataFrame
|
新浪财经-A股-CDR个股的历史行情数据, 大量抓取容易封 IP
# TODO 观察复权情况
https://finance.sina.com.cn/realstock/company/sh689009/nc.shtml
:param start_date: 20201103; 开始日期
:type start_date: str
:param end_date: 20201103; 结束日期
:type end_date: str
:param symbol: sh689009
:type symbol: str
:return: specific data
:rtype: pandas.DataFrame
| 296 | 331 |
def stock_zh_a_cdr_daily(
symbol: str = "sh689009",
start_date: str = "19900101",
end_date: str = "22201116",
) -> pd.DataFrame:
"""
新浪财经-A股-CDR个股的历史行情数据, 大量抓取容易封 IP
# TODO 观察复权情况
https://finance.sina.com.cn/realstock/company/sh689009/nc.shtml
:param start_date: 20201103; 开始日期
:type start_date: str
:param end_date: 20201103; 结束日期
:type end_date: str
:param symbol: sh689009
:type symbol: str
:return: specific data
:rtype: pandas.DataFrame
"""
res = requests.get(zh_sina_a_stock_hist_url.format(symbol))
js_code = py_mini_racer.MiniRacer()
js_code.eval(hk_js_decode)
dict_list = js_code.call(
"d", res.text.split("=")[1].split(";")[0].replace('"', "")
) # 执行js解密代码
data_df = pd.DataFrame(dict_list)
data_df.index = pd.to_datetime(data_df["date"])
del data_df["date"]
data_df = data_df.astype("float")
temp_df = data_df[start_date:end_date].copy()
temp_df["open"] = pd.to_numeric(temp_df["open"])
temp_df["high"] = pd.to_numeric(temp_df["high"])
temp_df["low"] = pd.to_numeric(temp_df["low"])
temp_df["close"] = pd.to_numeric(temp_df["close"])
temp_df.reset_index(inplace=True)
temp_df["date"] = temp_df["date"].dt.date
return temp_df
|
https://github.com/akfamily/akshare/blob/087025d8d6f799b30ca114013e82c1ad22dc9294/project25/akshare/stock/stock_zh_a_sina.py#L296-L331
| 25 |
[
0
] | 2.777778 |
[
18,
19,
20,
21,
24,
25,
26,
27,
28,
29,
30,
31,
32,
33,
34,
35
] | 44.444444 | false | 6.550218 | 36 | 1 | 55.555556 | 11 |
def stock_zh_a_cdr_daily(
symbol: str = "sh689009",
start_date: str = "19900101",
end_date: str = "22201116",
) -> pd.DataFrame:
res = requests.get(zh_sina_a_stock_hist_url.format(symbol))
js_code = py_mini_racer.MiniRacer()
js_code.eval(hk_js_decode)
dict_list = js_code.call(
"d", res.text.split("=")[1].split(";")[0].replace('"', "")
) # 执行js解密代码
data_df = pd.DataFrame(dict_list)
data_df.index = pd.to_datetime(data_df["date"])
del data_df["date"]
data_df = data_df.astype("float")
temp_df = data_df[start_date:end_date].copy()
temp_df["open"] = pd.to_numeric(temp_df["open"])
temp_df["high"] = pd.to_numeric(temp_df["high"])
temp_df["low"] = pd.to_numeric(temp_df["low"])
temp_df["close"] = pd.to_numeric(temp_df["close"])
temp_df.reset_index(inplace=True)
temp_df["date"] = temp_df["date"].dt.date
return temp_df
| 18,828 |
akfamily/akshare
|
087025d8d6f799b30ca114013e82c1ad22dc9294
|
akshare/stock/stock_zh_a_sina.py
|
stock_zh_a_minute
|
(
symbol: str = "sh600519", period: str = "1", adjust: str = ""
)
|
股票及股票指数历史行情数据-分钟数据
http://finance.sina.com.cn/realstock/company/sh600519/nc.shtml
:param symbol: sh000300
:type symbol: str
:param period: 1, 5, 15, 30, 60 分钟的数据
:type period: str
:param adjust: 默认为空: 返回不复权的数据; qfq: 返回前复权后的数据; hfq: 返回后复权后的数据;
:type adjust: str
:return: specific data
:rtype: pandas.DataFrame
|
股票及股票指数历史行情数据-分钟数据
http://finance.sina.com.cn/realstock/company/sh600519/nc.shtml
:param symbol: sh000300
:type symbol: str
:param period: 1, 5, 15, 30, 60 分钟的数据
:type period: str
:param adjust: 默认为空: 返回不复权的数据; qfq: 返回前复权后的数据; hfq: 返回后复权后的数据;
:type adjust: str
:return: specific data
:rtype: pandas.DataFrame
| 334 | 457 |
def stock_zh_a_minute(
symbol: str = "sh600519", period: str = "1", adjust: str = ""
) -> pd.DataFrame:
"""
股票及股票指数历史行情数据-分钟数据
http://finance.sina.com.cn/realstock/company/sh600519/nc.shtml
:param symbol: sh000300
:type symbol: str
:param period: 1, 5, 15, 30, 60 分钟的数据
:type period: str
:param adjust: 默认为空: 返回不复权的数据; qfq: 返回前复权后的数据; hfq: 返回后复权后的数据;
:type adjust: str
:return: specific data
:rtype: pandas.DataFrame
"""
url = "https://quotes.sina.cn/cn/api/jsonp_v2.php/=/CN_MarketDataService.getKLineData"
params = {
"symbol": symbol,
"scale": period,
"ma": "no",
"datalen": "36580",
}
r = requests.get(url, params=params)
data_text = r.text
try:
data_json = json.loads(data_text.split("=(")[1].split(");")[0])
temp_df = pd.DataFrame(data_json).iloc[:, :6]
except:
url = f"https://quotes.sina.cn/cn/api/jsonp_v2.php/var%20_{symbol}_{period}_1658852984203=/CN_MarketDataService.getKLineData"
params = {
"symbol": symbol,
"scale": period,
"ma": "no",
"datalen": "30000",
}
r = requests.get(url, params=params)
data_text = r.text
data_json = json.loads(data_text.split("=(")[1].split(");")[0])
temp_df = pd.DataFrame(data_json).iloc[:, :6]
if temp_df.empty:
print(f"{symbol} 股票数据不存在,请检查是否已退市")
return
try:
stock_zh_a_daily(symbol=symbol, adjust="qfq")
except:
return temp_df
if adjust == "":
return temp_df
if adjust == "qfq":
temp_df[["date", "time"]] = temp_df["day"].str.split(" ", expand=True)
# 处理没有最后一分钟的情况
need_df = temp_df[
[
True if "09:31:00" <= item <= "15:00:00" else False
for item in temp_df["time"]
]
]
need_df.drop_duplicates(subset=["date"], keep="last", inplace=True)
need_df.index = pd.to_datetime(need_df["date"])
stock_zh_a_daily_qfq_df = stock_zh_a_daily(symbol=symbol, adjust="qfq")
stock_zh_a_daily_qfq_df.index = pd.to_datetime(
stock_zh_a_daily_qfq_df["date"]
)
result_df = stock_zh_a_daily_qfq_df.iloc[-len(need_df) :, :][
"close"
].astype(float) / need_df["close"].astype(float)
temp_df.index = pd.to_datetime(temp_df["date"])
merged_df = pd.merge(
temp_df, result_df, left_index=True, right_index=True
)
merged_df["open"] = (
merged_df["open"].astype(float) * merged_df["close_y"]
)
merged_df["high"] = (
merged_df["high"].astype(float) * merged_df["close_y"]
)
merged_df["low"] = (
merged_df["low"].astype(float) * merged_df["close_y"]
)
merged_df["close"] = (
merged_df["close_x"].astype(float) * merged_df["close_y"]
)
temp_df = merged_df[["day", "open", "high", "low", "close", "volume"]]
temp_df.reset_index(drop=True, inplace=True)
return temp_df
if adjust == "hfq":
temp_df[["date", "time"]] = temp_df["day"].str.split(" ", expand=True)
# 处理没有最后一分钟的情况
need_df = temp_df[
[
True if "09:31:00" <= item <= "15:00:00" else False
for item in temp_df["time"]
]
]
need_df.drop_duplicates(subset=["date"], keep="last", inplace=True)
need_df.index = pd.to_datetime(need_df["date"])
stock_zh_a_daily_hfq_df = stock_zh_a_daily(symbol=symbol, adjust="hfq")
stock_zh_a_daily_hfq_df.index = pd.to_datetime(
stock_zh_a_daily_hfq_df["date"]
)
result_df = stock_zh_a_daily_hfq_df.iloc[-len(need_df) :, :][
"close"
].astype(float) / need_df["close"].astype(float)
temp_df.index = pd.to_datetime(temp_df["date"])
merged_df = pd.merge(
temp_df, result_df, left_index=True, right_index=True
)
merged_df["open"] = (
merged_df["open"].astype(float) * merged_df["close_y"]
)
merged_df["high"] = (
merged_df["high"].astype(float) * merged_df["close_y"]
)
merged_df["low"] = (
merged_df["low"].astype(float) * merged_df["close_y"]
)
merged_df["close"] = (
merged_df["close_x"].astype(float) * merged_df["close_y"]
)
temp_df = merged_df[["day", "open", "high", "low", "close", "volume"]]
temp_df.reset_index(drop=True, inplace=True)
return temp_df
|
https://github.com/akfamily/akshare/blob/087025d8d6f799b30ca114013e82c1ad22dc9294/project25/akshare/stock/stock_zh_a_sina.py#L334-L457
| 25 |
[
0
] | 0.806452 |
[
15,
16,
22,
23,
24,
25,
26,
27,
28,
29,
35,
36,
37,
38,
39,
40,
41,
42,
43,
44,
45,
47,
48,
50,
51,
53,
59,
60,
61,
62,
65,
68,
69,
72,
75,
78,
81,
84,
85,
86,
87,
88,
90,
96,
97,
98,
99,
102,
105,
106,
109,
112,
115,
118,
121,
122,
123
] | 45.967742 | false | 6.550218 | 124 | 9 | 54.032258 | 10 |
def stock_zh_a_minute(
symbol: str = "sh600519", period: str = "1", adjust: str = ""
) -> pd.DataFrame:
url = "https://quotes.sina.cn/cn/api/jsonp_v2.php/=/CN_MarketDataService.getKLineData"
params = {
"symbol": symbol,
"scale": period,
"ma": "no",
"datalen": "36580",
}
r = requests.get(url, params=params)
data_text = r.text
try:
data_json = json.loads(data_text.split("=(")[1].split(");")[0])
temp_df = pd.DataFrame(data_json).iloc[:, :6]
except:
url = f"https://quotes.sina.cn/cn/api/jsonp_v2.php/var%20_{symbol}_{period}_1658852984203=/CN_MarketDataService.getKLineData"
params = {
"symbol": symbol,
"scale": period,
"ma": "no",
"datalen": "30000",
}
r = requests.get(url, params=params)
data_text = r.text
data_json = json.loads(data_text.split("=(")[1].split(");")[0])
temp_df = pd.DataFrame(data_json).iloc[:, :6]
if temp_df.empty:
print(f"{symbol} 股票数据不存在,请检查是否已退市")
return
try:
stock_zh_a_daily(symbol=symbol, adjust="qfq")
except:
return temp_df
if adjust == "":
return temp_df
if adjust == "qfq":
temp_df[["date", "time"]] = temp_df["day"].str.split(" ", expand=True)
# 处理没有最后一分钟的情况
need_df = temp_df[
[
True if "09:31:00" <= item <= "15:00:00" else False
for item in temp_df["time"]
]
]
need_df.drop_duplicates(subset=["date"], keep="last", inplace=True)
need_df.index = pd.to_datetime(need_df["date"])
stock_zh_a_daily_qfq_df = stock_zh_a_daily(symbol=symbol, adjust="qfq")
stock_zh_a_daily_qfq_df.index = pd.to_datetime(
stock_zh_a_daily_qfq_df["date"]
)
result_df = stock_zh_a_daily_qfq_df.iloc[-len(need_df) :, :][
"close"
].astype(float) / need_df["close"].astype(float)
temp_df.index = pd.to_datetime(temp_df["date"])
merged_df = pd.merge(
temp_df, result_df, left_index=True, right_index=True
)
merged_df["open"] = (
merged_df["open"].astype(float) * merged_df["close_y"]
)
merged_df["high"] = (
merged_df["high"].astype(float) * merged_df["close_y"]
)
merged_df["low"] = (
merged_df["low"].astype(float) * merged_df["close_y"]
)
merged_df["close"] = (
merged_df["close_x"].astype(float) * merged_df["close_y"]
)
temp_df = merged_df[["day", "open", "high", "low", "close", "volume"]]
temp_df.reset_index(drop=True, inplace=True)
return temp_df
if adjust == "hfq":
temp_df[["date", "time"]] = temp_df["day"].str.split(" ", expand=True)
# 处理没有最后一分钟的情况
need_df = temp_df[
[
True if "09:31:00" <= item <= "15:00:00" else False
for item in temp_df["time"]
]
]
need_df.drop_duplicates(subset=["date"], keep="last", inplace=True)
need_df.index = pd.to_datetime(need_df["date"])
stock_zh_a_daily_hfq_df = stock_zh_a_daily(symbol=symbol, adjust="hfq")
stock_zh_a_daily_hfq_df.index = pd.to_datetime(
stock_zh_a_daily_hfq_df["date"]
)
result_df = stock_zh_a_daily_hfq_df.iloc[-len(need_df) :, :][
"close"
].astype(float) / need_df["close"].astype(float)
temp_df.index = pd.to_datetime(temp_df["date"])
merged_df = pd.merge(
temp_df, result_df, left_index=True, right_index=True
)
merged_df["open"] = (
merged_df["open"].astype(float) * merged_df["close_y"]
)
merged_df["high"] = (
merged_df["high"].astype(float) * merged_df["close_y"]
)
merged_df["low"] = (
merged_df["low"].astype(float) * merged_df["close_y"]
)
merged_df["close"] = (
merged_df["close_x"].astype(float) * merged_df["close_y"]
)
temp_df = merged_df[["day", "open", "high", "low", "close", "volume"]]
temp_df.reset_index(drop=True, inplace=True)
return temp_df
| 18,829 |
|
akfamily/akshare
|
087025d8d6f799b30ca114013e82c1ad22dc9294
|
akshare/stock/stock_hot_search_baidu.py
|
stock_hot_search_baidu
|
(symbol: str = "A股", date: str = "20221014", time: str = "0"):
|
return temp_df
|
百度股市通-热搜股票
https://gushitong.baidu.com/expressnews
:param symbol: choice of {"全部", "A股", "港股", "美股"}
:type symbol: str
:param date: 日期
:type date: str
:param time: 默认 time=0,则为当天的排行;如 time="16",则为 date 的 16 点的热门股票排行
:type time: str
:return: 股东人数及持股集中度
:rtype: pandas.DataFrame
|
百度股市通-热搜股票
https://gushitong.baidu.com/expressnews
:param symbol: choice of {"全部", "A股", "港股", "美股"}
:type symbol: str
:param date: 日期
:type date: str
:param time: 默认 time=0,则为当天的排行;如 time="16",则为 date 的 16 点的热门股票排行
:type time: str
:return: 股东人数及持股集中度
:rtype: pandas.DataFrame
| 12 | 52 |
def stock_hot_search_baidu(symbol: str = "A股", date: str = "20221014", time: str = "0"):
"""
百度股市通-热搜股票
https://gushitong.baidu.com/expressnews
:param symbol: choice of {"全部", "A股", "港股", "美股"}
:type symbol: str
:param date: 日期
:type date: str
:param time: 默认 time=0,则为当天的排行;如 time="16",则为 date 的 16 点的热门股票排行
:type time: str
:return: 股东人数及持股集中度
:rtype: pandas.DataFrame
"""
symbol_map = {
"全部": "all",
"A股": "ab",
"港股": "hk",
"美股": "us",
}
url = "https://finance.pae.baidu.com/vapi/v1/hotrank"
params = {
"tn": "wisexmlnew",
"dsp": "iphone",
"product": "stock",
"day": date,
"hour": time,
"pn": "0",
"rn": "1000",
"market": symbol_map[symbol],
"type": "day" if time == 0 else "hour",
"finClientType": "pc",
}
r = requests.get(url, params=params)
data_json = r.json()
temp_df = pd.DataFrame(
data_json["Result"]["body"], columns=data_json["Result"]["header"]
)
temp_df["综合热度"] = pd.to_numeric(temp_df["综合热度"])
temp_df["排名变化"] = pd.to_numeric(temp_df["排名变化"])
temp_df["是否连续上榜"] = pd.to_numeric(temp_df["是否连续上榜"])
return temp_df
|
https://github.com/akfamily/akshare/blob/087025d8d6f799b30ca114013e82c1ad22dc9294/project25/akshare/stock/stock_hot_search_baidu.py#L12-L52
| 25 |
[
0,
1,
2,
3,
4,
5,
6,
7,
8,
9,
10,
11,
12
] | 31.707317 |
[
13,
19,
20,
32,
33,
34,
37,
38,
39,
40
] | 24.390244 | false | 29.411765 | 41 | 1 | 75.609756 | 10 |
def stock_hot_search_baidu(symbol: str = "A股", date: str = "20221014", time: str = "0"):
symbol_map = {
"全部": "all",
"A股": "ab",
"港股": "hk",
"美股": "us",
}
url = "https://finance.pae.baidu.com/vapi/v1/hotrank"
params = {
"tn": "wisexmlnew",
"dsp": "iphone",
"product": "stock",
"day": date,
"hour": time,
"pn": "0",
"rn": "1000",
"market": symbol_map[symbol],
"type": "day" if time == 0 else "hour",
"finClientType": "pc",
}
r = requests.get(url, params=params)
data_json = r.json()
temp_df = pd.DataFrame(
data_json["Result"]["body"], columns=data_json["Result"]["header"]
)
temp_df["综合热度"] = pd.to_numeric(temp_df["综合热度"])
temp_df["排名变化"] = pd.to_numeric(temp_df["排名变化"])
temp_df["是否连续上榜"] = pd.to_numeric(temp_df["是否连续上榜"])
return temp_df
| 18,830 |
akfamily/akshare
|
087025d8d6f799b30ca114013e82c1ad22dc9294
|
akshare/stock/stock_info_em.py
|
stock_individual_info_em
|
(symbol: str = "603777")
|
return temp_df
|
东方财富-个股-股票信息
https://quote.eastmoney.com/concept/sh603777.html?from=classic
:param symbol: 股票代码
:type symbol: str
:return: 股票信息
:rtype: pandas.DataFrame
|
东方财富-个股-股票信息
https://quote.eastmoney.com/concept/sh603777.html?from=classic
:param symbol: 股票代码
:type symbol: str
:return: 股票信息
:rtype: pandas.DataFrame
| 14 | 61 |
def stock_individual_info_em(symbol: str = "603777") -> pd.DataFrame:
"""
东方财富-个股-股票信息
https://quote.eastmoney.com/concept/sh603777.html?from=classic
:param symbol: 股票代码
:type symbol: str
:return: 股票信息
:rtype: pandas.DataFrame
"""
code_id_dict = code_id_map_em()
url = "http://push2.eastmoney.com/api/qt/stock/get"
params = {
"ut": "fa5fd1943c7b386f172d6893dbfba10b",
"fltt": "2",
"invt": "2",
"fields": "f120,f121,f122,f174,f175,f59,f163,f43,f57,f58,f169,f170,f46,f44,f51,f168,f47,f164,f116,f60,f45,f52,f50,f48,f167,f117,f71,f161,f49,f530,f135,f136,f137,f138,f139,f141,f142,f144,f145,f147,f148,f140,f143,f146,f149,f55,f62,f162,f92,f173,f104,f105,f84,f85,f183,f184,f185,f186,f187,f188,f189,f190,f191,f192,f107,f111,f86,f177,f78,f110,f262,f263,f264,f267,f268,f255,f256,f257,f258,f127,f199,f128,f198,f259,f260,f261,f171,f277,f278,f279,f288,f152,f250,f251,f252,f253,f254,f269,f270,f271,f272,f273,f274,f275,f276,f265,f266,f289,f290,f286,f285,f292,f293,f294,f295",
"secid": f"{code_id_dict[symbol]}.{symbol}",
"_": "1640157544804",
}
r = requests.get(url, params=params)
data_json = r.json()
temp_df = pd.DataFrame(data_json)
temp_df.reset_index(inplace=True)
del temp_df["rc"]
del temp_df["rt"]
del temp_df["svr"]
del temp_df["lt"]
del temp_df["full"]
code_name_map = {
"f57": "股票代码",
"f58": "股票简称",
"f84": "总股本",
"f85": "流通股",
"f127": "行业",
"f116": "总市值",
"f117": "流通市值",
"f189": "上市时间",
}
temp_df["index"] = temp_df["index"].map(code_name_map)
temp_df = temp_df[pd.notna(temp_df["index"])]
if "dlmkts" in temp_df.columns:
del temp_df["dlmkts"]
temp_df.columns = [
"item",
"value",
]
temp_df.reset_index(inplace=True, drop=True)
return temp_df
|
https://github.com/akfamily/akshare/blob/087025d8d6f799b30ca114013e82c1ad22dc9294/project25/akshare/stock/stock_info_em.py#L14-L61
| 25 |
[
0,
1,
2,
3,
4,
5,
6,
7,
8
] | 18.75 |
[
9,
10,
11,
19,
20,
21,
22,
23,
24,
25,
26,
27,
28,
38,
39,
40,
41,
42,
46,
47
] | 41.666667 | false | 21.428571 | 48 | 2 | 58.333333 | 6 |
def stock_individual_info_em(symbol: str = "603777") -> pd.DataFrame:
code_id_dict = code_id_map_em()
url = "http://push2.eastmoney.com/api/qt/stock/get"
params = {
"ut": "fa5fd1943c7b386f172d6893dbfba10b",
"fltt": "2",
"invt": "2",
"fields": "f120,f121,f122,f174,f175,f59,f163,f43,f57,f58,f169,f170,f46,f44,f51,f168,f47,f164,f116,f60,f45,f52,f50,f48,f167,f117,f71,f161,f49,f530,f135,f136,f137,f138,f139,f141,f142,f144,f145,f147,f148,f140,f143,f146,f149,f55,f62,f162,f92,f173,f104,f105,f84,f85,f183,f184,f185,f186,f187,f188,f189,f190,f191,f192,f107,f111,f86,f177,f78,f110,f262,f263,f264,f267,f268,f255,f256,f257,f258,f127,f199,f128,f198,f259,f260,f261,f171,f277,f278,f279,f288,f152,f250,f251,f252,f253,f254,f269,f270,f271,f272,f273,f274,f275,f276,f265,f266,f289,f290,f286,f285,f292,f293,f294,f295",
"secid": f"{code_id_dict[symbol]}.{symbol}",
"_": "1640157544804",
}
r = requests.get(url, params=params)
data_json = r.json()
temp_df = pd.DataFrame(data_json)
temp_df.reset_index(inplace=True)
del temp_df["rc"]
del temp_df["rt"]
del temp_df["svr"]
del temp_df["lt"]
del temp_df["full"]
code_name_map = {
"f57": "股票代码",
"f58": "股票简称",
"f84": "总股本",
"f85": "流通股",
"f127": "行业",
"f116": "总市值",
"f117": "流通市值",
"f189": "上市时间",
}
temp_df["index"] = temp_df["index"].map(code_name_map)
temp_df = temp_df[pd.notna(temp_df["index"])]
if "dlmkts" in temp_df.columns:
del temp_df["dlmkts"]
temp_df.columns = [
"item",
"value",
]
temp_df.reset_index(inplace=True, drop=True)
return temp_df
| 18,831 |
akfamily/akshare
|
087025d8d6f799b30ca114013e82c1ad22dc9294
|
akshare/stock/stock_profile_cninfo.py
|
stock_profile_cninfo
|
(symbol: str = "600030")
|
return temp_df
|
巨潮资讯-个股-公司概况
http://webapi.cninfo.com.cn/#/company
:param symbol: 股票代码
:type symbol: str
:return: 公司概况
:rtype: pandas.DataFrame
:raise: Exception,如果服务器返回的数据无法被解析
|
巨潮资讯-个股-公司概况
http://webapi.cninfo.com.cn/#/company
:param symbol: 股票代码
:type symbol: str
:return: 公司概况
:rtype: pandas.DataFrame
:raise: Exception,如果服务器返回的数据无法被解析
| 45 | 126 |
def stock_profile_cninfo(symbol: str = "600030") -> pd.DataFrame:
"""
巨潮资讯-个股-公司概况
http://webapi.cninfo.com.cn/#/company
:param symbol: 股票代码
:type symbol: str
:return: 公司概况
:rtype: pandas.DataFrame
:raise: Exception,如果服务器返回的数据无法被解析
"""
url = "http://webapi.cninfo.com.cn/api/sysapi/p_sysapi1133"
params = {
"scode": symbol,
}
random_time_str = str(int(time.time()))
js_code = py_mini_racer.MiniRacer()
js_code.eval(js_str)
mcode = js_code.call("mcode", random_time_str)
headers = {
"Accept": "*/*",
"Accept-Encoding": "gzip, deflate",
"Accept-Language": "zh-CN,zh;q=0.9,en;q=0.8",
"Cache-Control": "no-cache",
"Content-Length": "0",
"Host": "webapi.cninfo.com.cn",
"mcode": mcode,
"Origin": "http://webapi.cninfo.com.cn",
"Pragma": "no-cache",
"Proxy-Connection": "keep-alive",
"Referer": "http://webapi.cninfo.com.cn/",
"X-Requested-With": "XMLHttpRequest",
}
r = requests.post(url, params=params, headers=headers)
data_json = r.json()
columns = [
"公司名称",
"英文名称",
"曾用简称",
"A股代码",
"A股简称",
"B股代码",
"B股简称",
"H股代码",
"H股简称",
"入选指数",
"所属市场",
"所属行业",
"法人代表",
"注册资金",
"成立日期",
"上市日期",
"官方网站",
"电子邮箱",
"联系电话",
"传真",
"注册地址",
"办公地址",
"邮政编码",
"主营业务",
"经营范围",
"机构简介",
]
count = data_json["count"]
if count == 1:
# 有公司概况的
redundant_json = data_json["records"][0]
records_json = {}
i = 0
for k, v in redundant_json.items():
if i == (len(redundant_json) - 4):
break
records_json[k] = v
i += 1
del i
temp_df = pd.Series(records_json).to_frame().T
temp_df.columns = columns
elif count == 0:
# 没公司概况的
temp_df = pd.DataFrame(columns=columns)
else:
raise Exception("数据错误!")
return temp_df
|
https://github.com/akfamily/akshare/blob/087025d8d6f799b30ca114013e82c1ad22dc9294/project25/akshare/stock/stock_profile_cninfo.py#L45-L126
| 25 |
[
0,
1,
2,
3,
4,
5,
6,
7,
8,
9
] | 12.195122 |
[
10,
11,
14,
15,
16,
17,
18,
32,
33,
34,
62,
63,
65,
66,
67,
68,
69,
70,
71,
72,
73,
74,
75,
76,
78,
80,
81
] | 32.926829 | false | 21.621622 | 82 | 5 | 67.073171 | 7 |
def stock_profile_cninfo(symbol: str = "600030") -> pd.DataFrame:
url = "http://webapi.cninfo.com.cn/api/sysapi/p_sysapi1133"
params = {
"scode": symbol,
}
random_time_str = str(int(time.time()))
js_code = py_mini_racer.MiniRacer()
js_code.eval(js_str)
mcode = js_code.call("mcode", random_time_str)
headers = {
"Accept": "*/*",
"Accept-Encoding": "gzip, deflate",
"Accept-Language": "zh-CN,zh;q=0.9,en;q=0.8",
"Cache-Control": "no-cache",
"Content-Length": "0",
"Host": "webapi.cninfo.com.cn",
"mcode": mcode,
"Origin": "http://webapi.cninfo.com.cn",
"Pragma": "no-cache",
"Proxy-Connection": "keep-alive",
"Referer": "http://webapi.cninfo.com.cn/",
"X-Requested-With": "XMLHttpRequest",
}
r = requests.post(url, params=params, headers=headers)
data_json = r.json()
columns = [
"公司名称",
"英文名称",
"曾用简称",
"A股代码",
"A股简称",
"B股代码",
"B股简称",
"H股代码",
"H股简称",
"入选指数",
"所属市场",
"所属行业",
"法人代表",
"注册资金",
"成立日期",
"上市日期",
"官方网站",
"电子邮箱",
"联系电话",
"传真",
"注册地址",
"办公地址",
"邮政编码",
"主营业务",
"经营范围",
"机构简介",
]
count = data_json["count"]
if count == 1:
# 有公司概况的
redundant_json = data_json["records"][0]
records_json = {}
i = 0
for k, v in redundant_json.items():
if i == (len(redundant_json) - 4):
break
records_json[k] = v
i += 1
del i
temp_df = pd.Series(records_json).to_frame().T
temp_df.columns = columns
elif count == 0:
# 没公司概况的
temp_df = pd.DataFrame(columns=columns)
else:
raise Exception("数据错误!")
return temp_df
| 18,832 |
akfamily/akshare
|
087025d8d6f799b30ca114013e82c1ad22dc9294
|
akshare/stock/stock_hist_163.py
|
stock_zh_a_hist_163
|
(
symbol: str = "sh601318",
start_date: str = "10700101",
end_date: str = "20500101",
)
|
return temp_df
|
网易财经-行情首页-沪深 A 股-每日行情
注意:该接口只返回未复权数据
https://quotes.money.163.com/trade/lsjysj_601318.html?year=2022&season=2
:param symbol: 带市场表示的股票代码
:type symbol: str
:param start_date: 开始时间
:type start_date: str
:param end_date: 结束时间
:type end_date: str
:return: 每日行情
:rtype: pandas.DataFrame
|
网易财经-行情首页-沪深 A 股-每日行情
注意:该接口只返回未复权数据
https://quotes.money.163.com/trade/lsjysj_601318.html?year=2022&season=2
:param symbol: 带市场表示的股票代码
:type symbol: str
:param start_date: 开始时间
:type start_date: str
:param end_date: 结束时间
:type end_date: str
:return: 每日行情
:rtype: pandas.DataFrame
| 12 | 94 |
def stock_zh_a_hist_163(
symbol: str = "sh601318",
start_date: str = "10700101",
end_date: str = "20500101",
) -> pd.DataFrame:
"""
网易财经-行情首页-沪深 A 股-每日行情
注意:该接口只返回未复权数据
https://quotes.money.163.com/trade/lsjysj_601318.html?year=2022&season=2
:param symbol: 带市场表示的股票代码
:type symbol: str
:param start_date: 开始时间
:type start_date: str
:param end_date: 结束时间
:type end_date: str
:return: 每日行情
:rtype: pandas.DataFrame
"""
url = "http://quotes.money.163.com/service/chddata.html"
params = {
"code": "0601318",
"start": start_date,
"end": end_date,
"fields": "TCLOSE;HIGH;LOW;TOPEN;LCLOSE;CHG;PCHG;TURNOVER;VOTURNOVER;VATURNOVER;TCAP;MCAP",
}
if "sh" in symbol:
params.update({"code": f"0{symbol[2:]}"})
else:
params.update({"code": f"1{symbol[2:]}"})
headers = {
"Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9",
"Accept-Encoding": "gzip, deflate",
"Accept-Language": "zh-CN,zh;q=0.9,en;q=0.8",
"Cache-Control": "no-cache",
"Connection": "keep-alive",
"Host": "quotes.money.163.com",
"Pragma": "no-cache",
"Referer": "http://quotes.money.163.com/trade/lsjysj_300254.html",
"Upgrade-Insecure-Requests": "1",
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/99.0.4844.51 Safari/537.36",
}
r = requests.get(url, params=params, headers=headers)
r.encoding = "gbk"
data_text = r.text
temp_df = pd.DataFrame(
[item.split(",") for item in data_text.split("\r\n")[1:]]
)
temp_df.columns = [
"日期",
"股票代码",
"名称",
"收盘价",
"最高价",
"最低价",
"开盘价",
"前收盘",
"涨跌额",
"涨跌幅",
"换手率",
"成交量",
"成交金额",
"总市值",
"流通市值",
]
temp_df["股票代码"] = temp_df["股票代码"].str.strip("'").str.strip()
temp_df["名称"] = temp_df["名称"].str.strip()
temp_df["日期"] = pd.to_datetime(temp_df["日期"]).dt.date
temp_df["收盘价"] = pd.to_numeric(temp_df["收盘价"], errors="coerce")
temp_df["最高价"] = pd.to_numeric(temp_df["最高价"], errors="coerce")
temp_df["最低价"] = pd.to_numeric(temp_df["最低价"], errors="coerce")
temp_df["开盘价"] = pd.to_numeric(temp_df["开盘价"], errors="coerce")
temp_df["前收盘"] = pd.to_numeric(temp_df["前收盘"], errors="coerce")
temp_df["涨跌额"] = pd.to_numeric(temp_df["涨跌额"], errors="coerce")
temp_df["涨跌幅"] = pd.to_numeric(temp_df["涨跌幅"], errors="coerce")
temp_df["换手率"] = pd.to_numeric(temp_df["换手率"], errors="coerce")
temp_df["成交量"] = pd.to_numeric(temp_df["成交量"], errors="coerce")
temp_df["成交金额"] = pd.to_numeric(temp_df["成交金额"], errors="coerce")
temp_df["总市值"] = pd.to_numeric(temp_df["总市值"], errors="coerce")
temp_df["流通市值"] = pd.to_numeric(temp_df["流通市值"], errors="coerce")
temp_df.dropna(subset=["日期"], inplace=True)
temp_df.sort_values("日期", inplace=True)
temp_df.reset_index(inplace=True, drop=True)
return temp_df
|
https://github.com/akfamily/akshare/blob/087025d8d6f799b30ca114013e82c1ad22dc9294/project25/akshare/stock/stock_hist_163.py#L12-L94
| 25 |
[
0
] | 1.204819 |
[
18,
19,
25,
26,
28,
29,
41,
42,
43,
44,
47,
64,
65,
66,
67,
68,
69,
70,
71,
72,
73,
74,
75,
76,
77,
78,
79,
80,
81,
82
] | 36.144578 | false | 12.820513 | 83 | 3 | 63.855422 | 11 |
def stock_zh_a_hist_163(
symbol: str = "sh601318",
start_date: str = "10700101",
end_date: str = "20500101",
) -> pd.DataFrame:
url = "http://quotes.money.163.com/service/chddata.html"
params = {
"code": "0601318",
"start": start_date,
"end": end_date,
"fields": "TCLOSE;HIGH;LOW;TOPEN;LCLOSE;CHG;PCHG;TURNOVER;VOTURNOVER;VATURNOVER;TCAP;MCAP",
}
if "sh" in symbol:
params.update({"code": f"0{symbol[2:]}"})
else:
params.update({"code": f"1{symbol[2:]}"})
headers = {
"Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9",
"Accept-Encoding": "gzip, deflate",
"Accept-Language": "zh-CN,zh;q=0.9,en;q=0.8",
"Cache-Control": "no-cache",
"Connection": "keep-alive",
"Host": "quotes.money.163.com",
"Pragma": "no-cache",
"Referer": "http://quotes.money.163.com/trade/lsjysj_300254.html",
"Upgrade-Insecure-Requests": "1",
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/99.0.4844.51 Safari/537.36",
}
r = requests.get(url, params=params, headers=headers)
r.encoding = "gbk"
data_text = r.text
temp_df = pd.DataFrame(
[item.split(",") for item in data_text.split("\r\n")[1:]]
)
temp_df.columns = [
"日期",
"股票代码",
"名称",
"收盘价",
"最高价",
"最低价",
"开盘价",
"前收盘",
"涨跌额",
"涨跌幅",
"换手率",
"成交量",
"成交金额",
"总市值",
"流通市值",
]
temp_df["股票代码"] = temp_df["股票代码"].str.strip("'").str.strip()
temp_df["名称"] = temp_df["名称"].str.strip()
temp_df["日期"] = pd.to_datetime(temp_df["日期"]).dt.date
temp_df["收盘价"] = pd.to_numeric(temp_df["收盘价"], errors="coerce")
temp_df["最高价"] = pd.to_numeric(temp_df["最高价"], errors="coerce")
temp_df["最低价"] = pd.to_numeric(temp_df["最低价"], errors="coerce")
temp_df["开盘价"] = pd.to_numeric(temp_df["开盘价"], errors="coerce")
temp_df["前收盘"] = pd.to_numeric(temp_df["前收盘"], errors="coerce")
temp_df["涨跌额"] = pd.to_numeric(temp_df["涨跌额"], errors="coerce")
temp_df["涨跌幅"] = pd.to_numeric(temp_df["涨跌幅"], errors="coerce")
temp_df["换手率"] = pd.to_numeric(temp_df["换手率"], errors="coerce")
temp_df["成交量"] = pd.to_numeric(temp_df["成交量"], errors="coerce")
temp_df["成交金额"] = pd.to_numeric(temp_df["成交金额"], errors="coerce")
temp_df["总市值"] = pd.to_numeric(temp_df["总市值"], errors="coerce")
temp_df["流通市值"] = pd.to_numeric(temp_df["流通市值"], errors="coerce")
temp_df.dropna(subset=["日期"], inplace=True)
temp_df.sort_values("日期", inplace=True)
temp_df.reset_index(inplace=True, drop=True)
return temp_df
| 18,833 |
akfamily/akshare
|
087025d8d6f799b30ca114013e82c1ad22dc9294
|
akshare/stock/stock_hold_num_cninfo.py
|
stock_hold_num_cninfo
|
(date: str = "20210630")
|
return temp_df
|
巨潮资讯-数据中心-专题统计-股东股本-股东人数及持股集中度
http://webapi.cninfo.com.cn/#/thematicStatistics
:param date: choice of {"XXXX0331", "XXXX0630", "XXXX0930", "XXXX1231"}; 从 20170331 开始
:type date: str
:return: 股东人数及持股集中度
:rtype: pandas.DataFrame
|
巨潮资讯-数据中心-专题统计-股东股本-股东人数及持股集中度
http://webapi.cninfo.com.cn/#/thematicStatistics
:param date: choice of {"XXXX0331", "XXXX0630", "XXXX0930", "XXXX1231"}; 从 20170331 开始
:type date: str
:return: 股东人数及持股集中度
:rtype: pandas.DataFrame
| 45 | 111 |
def stock_hold_num_cninfo(date: str = "20210630") -> pd.DataFrame:
"""
巨潮资讯-数据中心-专题统计-股东股本-股东人数及持股集中度
http://webapi.cninfo.com.cn/#/thematicStatistics
:param date: choice of {"XXXX0331", "XXXX0630", "XXXX0930", "XXXX1231"}; 从 20170331 开始
:type date: str
:return: 股东人数及持股集中度
:rtype: pandas.DataFrame
"""
url = "http://webapi.cninfo.com.cn/api/sysapi/p_sysapi1034"
random_time_str = str(int(time.time()))
js_code = py_mini_racer.MiniRacer()
js_code.eval(js_str)
mcode = js_code.call("mcode", random_time_str)
headers = {
"Accept": "*/*",
"Accept-Encoding": "gzip, deflate",
"Accept-Language": "zh-CN,zh;q=0.9,en;q=0.8",
"Cache-Control": "no-cache",
"Content-Length": "0",
"Host": "webapi.cninfo.com.cn",
"mcode": mcode,
"Origin": "http://webapi.cninfo.com.cn",
"Pragma": "no-cache",
"Proxy-Connection": "keep-alive",
"Referer": "http://webapi.cninfo.com.cn/",
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/93.0.4577.63 Safari/537.36",
"X-Requested-With": "XMLHttpRequest",
}
params = {
"rdate": date,
}
r = requests.post(url, headers=headers, params=params)
data_json = r.json()
temp_df = pd.DataFrame(data_json["records"])
temp_df.columns = [
"本期人均持股数量",
"股东人数增幅",
"上期股东人数",
"本期股东人数",
"证券简称",
"证券代码",
"人均持股数量增幅",
"变动日期",
"上期人均持股数量",
]
temp_df = temp_df[
[
"证券代码",
"证券简称",
"变动日期",
"本期股东人数",
"上期股东人数",
"股东人数增幅",
"本期人均持股数量",
"上期人均持股数量",
"人均持股数量增幅",
]
]
temp_df["变动日期"] = pd.to_datetime(temp_df["变动日期"]).dt.date
temp_df["本期人均持股数量"] = pd.to_numeric(temp_df["本期人均持股数量"])
temp_df["股东人数增幅"] = pd.to_numeric(temp_df["股东人数增幅"])
temp_df["上期股东人数"] = pd.to_numeric(temp_df["上期股东人数"])
temp_df["本期股东人数"] = pd.to_numeric(temp_df["本期股东人数"])
temp_df["人均持股数量增幅"] = pd.to_numeric(temp_df["人均持股数量增幅"])
temp_df["上期人均持股数量"] = pd.to_numeric(temp_df["上期人均持股数量"])
return temp_df
|
https://github.com/akfamily/akshare/blob/087025d8d6f799b30ca114013e82c1ad22dc9294/project25/akshare/stock/stock_hold_num_cninfo.py#L45-L111
| 25 |
[
0,
1,
2,
3,
4,
5,
6,
7,
8
] | 13.432836 |
[
9,
10,
11,
12,
13,
14,
29,
32,
33,
34,
35,
46,
59,
60,
61,
62,
63,
64,
65,
66
] | 29.850746 | false | 26.666667 | 67 | 1 | 70.149254 | 6 |
def stock_hold_num_cninfo(date: str = "20210630") -> pd.DataFrame:
url = "http://webapi.cninfo.com.cn/api/sysapi/p_sysapi1034"
random_time_str = str(int(time.time()))
js_code = py_mini_racer.MiniRacer()
js_code.eval(js_str)
mcode = js_code.call("mcode", random_time_str)
headers = {
"Accept": "*/*",
"Accept-Encoding": "gzip, deflate",
"Accept-Language": "zh-CN,zh;q=0.9,en;q=0.8",
"Cache-Control": "no-cache",
"Content-Length": "0",
"Host": "webapi.cninfo.com.cn",
"mcode": mcode,
"Origin": "http://webapi.cninfo.com.cn",
"Pragma": "no-cache",
"Proxy-Connection": "keep-alive",
"Referer": "http://webapi.cninfo.com.cn/",
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/93.0.4577.63 Safari/537.36",
"X-Requested-With": "XMLHttpRequest",
}
params = {
"rdate": date,
}
r = requests.post(url, headers=headers, params=params)
data_json = r.json()
temp_df = pd.DataFrame(data_json["records"])
temp_df.columns = [
"本期人均持股数量",
"股东人数增幅",
"上期股东人数",
"本期股东人数",
"证券简称",
"证券代码",
"人均持股数量增幅",
"变动日期",
"上期人均持股数量",
]
temp_df = temp_df[
[
"证券代码",
"证券简称",
"变动日期",
"本期股东人数",
"上期股东人数",
"股东人数增幅",
"本期人均持股数量",
"上期人均持股数量",
"人均持股数量增幅",
]
]
temp_df["变动日期"] = pd.to_datetime(temp_df["变动日期"]).dt.date
temp_df["本期人均持股数量"] = pd.to_numeric(temp_df["本期人均持股数量"])
temp_df["股东人数增幅"] = pd.to_numeric(temp_df["股东人数增幅"])
temp_df["上期股东人数"] = pd.to_numeric(temp_df["上期股东人数"])
temp_df["本期股东人数"] = pd.to_numeric(temp_df["本期股东人数"])
temp_df["人均持股数量增幅"] = pd.to_numeric(temp_df["人均持股数量增幅"])
temp_df["上期人均持股数量"] = pd.to_numeric(temp_df["上期人均持股数量"])
return temp_df
| 18,834 |
akfamily/akshare
|
087025d8d6f799b30ca114013e82c1ad22dc9294
|
akshare/stock/stock_rank_forecast.py
|
stock_rank_forecast_cninfo
|
(date: str = "20210910")
|
return temp_df
|
巨潮资讯-数据中心-评级预测-投资评级
http://webapi.cninfo.com.cn/#/thematicStatistics?name=%E6%8A%95%E8%B5%84%E8%AF%84%E7%BA%A7
:param date: 查询日期
:type date: str
:return: 投资评级
:rtype: pandas.DataFrame
|
巨潮资讯-数据中心-评级预测-投资评级
http://webapi.cninfo.com.cn/#/thematicStatistics?name=%E6%8A%95%E8%B5%84%E8%AF%84%E7%BA%A7
:param date: 查询日期
:type date: str
:return: 投资评级
:rtype: pandas.DataFrame
| 44 | 105 |
def stock_rank_forecast_cninfo(date: str = "20210910") -> pd.DataFrame:
"""
巨潮资讯-数据中心-评级预测-投资评级
http://webapi.cninfo.com.cn/#/thematicStatistics?name=%E6%8A%95%E8%B5%84%E8%AF%84%E7%BA%A7
:param date: 查询日期
:type date: str
:return: 投资评级
:rtype: pandas.DataFrame
"""
url = "http://webapi.cninfo.com.cn/api/sysapi/p_sysapi1089"
params = {"tdate": "-".join([date[:4], date[4:6], date[6:]])}
random_time_str = str(int(time.time()))
js_code = py_mini_racer.MiniRacer()
js_code.eval(js_str)
mcode = js_code.call("mcode", random_time_str)
headers = {
"Accept": "*/*",
"Accept-Encoding": "gzip, deflate",
"Accept-Language": "zh-CN,zh;q=0.9,en;q=0.8",
"Cache-Control": "no-cache",
"Content-Length": "0",
"Host": "webapi.cninfo.com.cn",
"mcode": mcode,
"Origin": "http://webapi.cninfo.com.cn",
"Pragma": "no-cache",
"Proxy-Connection": "keep-alive",
"Referer": "http://webapi.cninfo.com.cn/",
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/93.0.4577.63 Safari/537.36",
"X-Requested-With": "XMLHttpRequest",
}
r = requests.post(url, params=params, headers=headers)
data_json = r.json()
temp_df = pd.DataFrame(data_json["records"])
temp_df.columns = [
"证券简称",
"发布日期",
"前一次投资评级",
"评级变化",
"目标价格-上限",
"是否首次评级",
"投资评级",
"研究员名称",
"研究机构简称",
"目标价格-下限",
"证券代码",
]
temp_df = temp_df[[
"证券代码",
"证券简称",
"发布日期",
"研究机构简称",
"研究员名称",
"投资评级",
"是否首次评级",
"评级变化",
"前一次投资评级",
"目标价格-下限",
"目标价格-上限",
]]
temp_df["目标价格-上限"] = pd.to_numeric(temp_df["目标价格-上限"], errors="coerce")
temp_df["目标价格-下限"] = pd.to_numeric(temp_df["目标价格-下限"], errors="coerce")
return temp_df
|
https://github.com/akfamily/akshare/blob/087025d8d6f799b30ca114013e82c1ad22dc9294/project25/akshare/stock/stock_rank_forecast.py#L44-L105
| 25 |
[
0,
1,
2,
3,
4,
5,
6,
7,
8
] | 14.516129 |
[
9,
10,
11,
12,
13,
14,
15,
30,
31,
32,
33,
46,
59,
60,
61
] | 24.193548 | false | 32 | 62 | 1 | 75.806452 | 6 |
def stock_rank_forecast_cninfo(date: str = "20210910") -> pd.DataFrame:
url = "http://webapi.cninfo.com.cn/api/sysapi/p_sysapi1089"
params = {"tdate": "-".join([date[:4], date[4:6], date[6:]])}
random_time_str = str(int(time.time()))
js_code = py_mini_racer.MiniRacer()
js_code.eval(js_str)
mcode = js_code.call("mcode", random_time_str)
headers = {
"Accept": "*/*",
"Accept-Encoding": "gzip, deflate",
"Accept-Language": "zh-CN,zh;q=0.9,en;q=0.8",
"Cache-Control": "no-cache",
"Content-Length": "0",
"Host": "webapi.cninfo.com.cn",
"mcode": mcode,
"Origin": "http://webapi.cninfo.com.cn",
"Pragma": "no-cache",
"Proxy-Connection": "keep-alive",
"Referer": "http://webapi.cninfo.com.cn/",
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/93.0.4577.63 Safari/537.36",
"X-Requested-With": "XMLHttpRequest",
}
r = requests.post(url, params=params, headers=headers)
data_json = r.json()
temp_df = pd.DataFrame(data_json["records"])
temp_df.columns = [
"证券简称",
"发布日期",
"前一次投资评级",
"评级变化",
"目标价格-上限",
"是否首次评级",
"投资评级",
"研究员名称",
"研究机构简称",
"目标价格-下限",
"证券代码",
]
temp_df = temp_df[[
"证券代码",
"证券简称",
"发布日期",
"研究机构简称",
"研究员名称",
"投资评级",
"是否首次评级",
"评级变化",
"前一次投资评级",
"目标价格-下限",
"目标价格-上限",
]]
temp_df["目标价格-上限"] = pd.to_numeric(temp_df["目标价格-上限"], errors="coerce")
temp_df["目标价格-下限"] = pd.to_numeric(temp_df["目标价格-下限"], errors="coerce")
return temp_df
| 18,835 |
akfamily/akshare
|
087025d8d6f799b30ca114013e82c1ad22dc9294
|
akshare/stock/stock_board_concept_em.py
|
stock_board_concept_name_em
|
()
|
return temp_df
|
东方财富网-沪深板块-概念板块-名称
https://quote.eastmoney.com/center/boardlist.html#concept_board
:return: 概念板块-名称
:rtype: pandas.DataFrame
|
东方财富网-沪深板块-概念板块-名称
https://quote.eastmoney.com/center/boardlist.html#concept_board
:return: 概念板块-名称
:rtype: pandas.DataFrame
| 12 | 91 |
def stock_board_concept_name_em() -> pd.DataFrame:
"""
东方财富网-沪深板块-概念板块-名称
https://quote.eastmoney.com/center/boardlist.html#concept_board
:return: 概念板块-名称
:rtype: pandas.DataFrame
"""
url = "http://79.push2.eastmoney.com/api/qt/clist/get"
params = {
"pn": "1",
"pz": "2000",
"po": "1",
"np": "1",
"ut": "bd1d9ddb04089700cf9c27f6f7426281",
"fltt": "2",
"invt": "2",
"fid": "f3",
"fs": "m:90 t:3 f:!50",
"fields": "f2,f3,f4,f8,f12,f14,f15,f16,f17,f18,f20,f21,f24,f25,f22,f33,f11,f62,f128,f124,f107,f104,f105,f136",
"_": "1626075887768",
}
r = requests.get(url, params=params)
data_json = r.json()
temp_df = pd.DataFrame(data_json["data"]["diff"])
temp_df.reset_index(inplace=True)
temp_df["index"] = range(1, len(temp_df) + 1)
temp_df.columns = [
"排名",
"最新价",
"涨跌幅",
"涨跌额",
"换手率",
"_",
"板块代码",
"板块名称",
"_",
"_",
"_",
"_",
"总市值",
"_",
"_",
"_",
"_",
"_",
"_",
"上涨家数",
"下跌家数",
"_",
"_",
"领涨股票",
"_",
"_",
"领涨股票-涨跌幅",
]
temp_df = temp_df[
[
"排名",
"板块名称",
"板块代码",
"最新价",
"涨跌额",
"涨跌幅",
"总市值",
"换手率",
"上涨家数",
"下跌家数",
"领涨股票",
"领涨股票-涨跌幅",
]
]
temp_df["最新价"] = pd.to_numeric(temp_df["最新价"], errors="coerce")
temp_df["涨跌额"] = pd.to_numeric(temp_df["涨跌额"], errors="coerce")
temp_df["涨跌幅"] = pd.to_numeric(temp_df["涨跌幅"], errors="coerce")
temp_df["总市值"] = pd.to_numeric(temp_df["总市值"], errors="coerce")
temp_df["换手率"] = pd.to_numeric(temp_df["换手率"], errors="coerce")
temp_df["上涨家数"] = pd.to_numeric(temp_df["上涨家数"], errors="coerce")
temp_df["下跌家数"] = pd.to_numeric(temp_df["下跌家数"], errors="coerce")
temp_df["领涨股票-涨跌幅"] = pd.to_numeric(temp_df["领涨股票-涨跌幅"], errors="coerce")
return temp_df
|
https://github.com/akfamily/akshare/blob/087025d8d6f799b30ca114013e82c1ad22dc9294/project25/akshare/stock/stock_board_concept_em.py#L12-L91
| 25 |
[
0,
1,
2,
3,
4,
5,
6
] | 8.75 |
[
7,
8,
21,
22,
23,
24,
25,
26,
55,
71,
72,
73,
74,
75,
76,
77,
78,
79
] | 22.5 | false | 7.920792 | 80 | 1 | 77.5 | 4 |
def stock_board_concept_name_em() -> pd.DataFrame:
url = "http://79.push2.eastmoney.com/api/qt/clist/get"
params = {
"pn": "1",
"pz": "2000",
"po": "1",
"np": "1",
"ut": "bd1d9ddb04089700cf9c27f6f7426281",
"fltt": "2",
"invt": "2",
"fid": "f3",
"fs": "m:90 t:3 f:!50",
"fields": "f2,f3,f4,f8,f12,f14,f15,f16,f17,f18,f20,f21,f24,f25,f22,f33,f11,f62,f128,f124,f107,f104,f105,f136",
"_": "1626075887768",
}
r = requests.get(url, params=params)
data_json = r.json()
temp_df = pd.DataFrame(data_json["data"]["diff"])
temp_df.reset_index(inplace=True)
temp_df["index"] = range(1, len(temp_df) + 1)
temp_df.columns = [
"排名",
"最新价",
"涨跌幅",
"涨跌额",
"换手率",
"_",
"板块代码",
"板块名称",
"_",
"_",
"_",
"_",
"总市值",
"_",
"_",
"_",
"_",
"_",
"_",
"上涨家数",
"下跌家数",
"_",
"_",
"领涨股票",
"_",
"_",
"领涨股票-涨跌幅",
]
temp_df = temp_df[
[
"排名",
"板块名称",
"板块代码",
"最新价",
"涨跌额",
"涨跌幅",
"总市值",
"换手率",
"上涨家数",
"下跌家数",
"领涨股票",
"领涨股票-涨跌幅",
]
]
temp_df["最新价"] = pd.to_numeric(temp_df["最新价"], errors="coerce")
temp_df["涨跌额"] = pd.to_numeric(temp_df["涨跌额"], errors="coerce")
temp_df["涨跌幅"] = pd.to_numeric(temp_df["涨跌幅"], errors="coerce")
temp_df["总市值"] = pd.to_numeric(temp_df["总市值"], errors="coerce")
temp_df["换手率"] = pd.to_numeric(temp_df["换手率"], errors="coerce")
temp_df["上涨家数"] = pd.to_numeric(temp_df["上涨家数"], errors="coerce")
temp_df["下跌家数"] = pd.to_numeric(temp_df["下跌家数"], errors="coerce")
temp_df["领涨股票-涨跌幅"] = pd.to_numeric(temp_df["领涨股票-涨跌幅"], errors="coerce")
return temp_df
| 18,836 |
akfamily/akshare
|
087025d8d6f799b30ca114013e82c1ad22dc9294
|
akshare/stock/stock_board_concept_em.py
|
stock_board_concept_hist_em
|
(
symbol: str = "数字货币",
period: str = "daily",
start_date: str = "20220101",
end_date: str = "20221128",
adjust: str = "",
)
|
return temp_df
|
东方财富网-沪深板块-概念板块-历史行情
https://quote.eastmoney.com/bk/90.BK0715.html
:param symbol: 板块名称
:type symbol: str
:type period: 周期; choice of {"daily", "weekly", "monthly"}
:param period: 板块名称
:param start_date: 开始时间
:type start_date: str
:param end_date: 结束时间
:type end_date: str
:param adjust: choice of {'': 不复权, "qfq": 前复权, "hfq": 后复权}
:type adjust: str
:return: 历史行情
:rtype: pandas.DataFrame
|
东方财富网-沪深板块-概念板块-历史行情
https://quote.eastmoney.com/bk/90.BK0715.html
:param symbol: 板块名称
:type symbol: str
:type period: 周期; choice of {"daily", "weekly", "monthly"}
:param period: 板块名称
:param start_date: 开始时间
:type start_date: str
:param end_date: 结束时间
:type end_date: str
:param adjust: choice of {'': 不复权, "qfq": 前复权, "hfq": 后复权}
:type adjust: str
:return: 历史行情
:rtype: pandas.DataFrame
| 94 | 182 |
def stock_board_concept_hist_em(
symbol: str = "数字货币",
period: str = "daily",
start_date: str = "20220101",
end_date: str = "20221128",
adjust: str = "",
) -> pd.DataFrame:
"""
东方财富网-沪深板块-概念板块-历史行情
https://quote.eastmoney.com/bk/90.BK0715.html
:param symbol: 板块名称
:type symbol: str
:type period: 周期; choice of {"daily", "weekly", "monthly"}
:param period: 板块名称
:param start_date: 开始时间
:type start_date: str
:param end_date: 结束时间
:type end_date: str
:param adjust: choice of {'': 不复权, "qfq": 前复权, "hfq": 后复权}
:type adjust: str
:return: 历史行情
:rtype: pandas.DataFrame
"""
period_map = {
"daily": "101",
"weekly": "102",
"monthly": "103",
}
stock_board_concept_em_map = stock_board_concept_name_em()
stock_board_code = stock_board_concept_em_map[
stock_board_concept_em_map["板块名称"] == symbol
]["板块代码"].values[0]
adjust_map = {"": "0", "qfq": "1", "hfq": "2"}
url = "http://91.push2his.eastmoney.com/api/qt/stock/kline/get"
params = {
"secid": f"90.{stock_board_code}",
"ut": "fa5fd1943c7b386f172d6893dbfba10b",
"fields1": "f1,f2,f3,f4,f5,f6",
"fields2": "f51,f52,f53,f54,f55,f56,f57,f58,f59,f60,f61",
"klt": period_map[period],
"fqt": adjust_map[adjust],
"beg": start_date,
"end": end_date,
"smplmt": "10000",
"lmt": "1000000",
"_": "1626079488673",
}
r = requests.get(url, params=params)
data_json = r.json()
temp_df = pd.DataFrame([item.split(",") for item in data_json["data"]["klines"]])
temp_df.columns = [
"日期",
"开盘",
"收盘",
"最高",
"最低",
"成交量",
"成交额",
"振幅",
"涨跌幅",
"涨跌额",
"换手率",
]
temp_df = temp_df[
[
"日期",
"开盘",
"收盘",
"最高",
"最低",
"涨跌幅",
"涨跌额",
"成交量",
"成交额",
"振幅",
"换手率",
]
]
temp_df["开盘"] = pd.to_numeric(temp_df["开盘"], errors="coerce")
temp_df["收盘"] = pd.to_numeric(temp_df["收盘"], errors="coerce")
temp_df["最高"] = pd.to_numeric(temp_df["最高"], errors="coerce")
temp_df["最低"] = pd.to_numeric(temp_df["最低"], errors="coerce")
temp_df["涨跌幅"] = pd.to_numeric(temp_df["涨跌幅"], errors="coerce")
temp_df["涨跌额"] = pd.to_numeric(temp_df["涨跌额"], errors="coerce")
temp_df["成交量"] = pd.to_numeric(temp_df["成交量"], errors="coerce")
temp_df["成交额"] = pd.to_numeric(temp_df["成交额"], errors="coerce")
temp_df["振幅"] = pd.to_numeric(temp_df["振幅"], errors="coerce")
temp_df["换手率"] = pd.to_numeric(temp_df["换手率"], errors="coerce")
return temp_df
|
https://github.com/akfamily/akshare/blob/087025d8d6f799b30ca114013e82c1ad22dc9294/project25/akshare/stock/stock_board_concept_em.py#L94-L182
| 25 |
[
0
] | 1.123596 |
[
23,
28,
29,
32,
33,
34,
47,
48,
49,
50,
63,
78,
79,
80,
81,
82,
83,
84,
85,
86,
87,
88
] | 24.719101 | false | 7.920792 | 89 | 2 | 75.280899 | 14 |
def stock_board_concept_hist_em(
symbol: str = "数字货币",
period: str = "daily",
start_date: str = "20220101",
end_date: str = "20221128",
adjust: str = "",
) -> pd.DataFrame:
period_map = {
"daily": "101",
"weekly": "102",
"monthly": "103",
}
stock_board_concept_em_map = stock_board_concept_name_em()
stock_board_code = stock_board_concept_em_map[
stock_board_concept_em_map["板块名称"] == symbol
]["板块代码"].values[0]
adjust_map = {"": "0", "qfq": "1", "hfq": "2"}
url = "http://91.push2his.eastmoney.com/api/qt/stock/kline/get"
params = {
"secid": f"90.{stock_board_code}",
"ut": "fa5fd1943c7b386f172d6893dbfba10b",
"fields1": "f1,f2,f3,f4,f5,f6",
"fields2": "f51,f52,f53,f54,f55,f56,f57,f58,f59,f60,f61",
"klt": period_map[period],
"fqt": adjust_map[adjust],
"beg": start_date,
"end": end_date,
"smplmt": "10000",
"lmt": "1000000",
"_": "1626079488673",
}
r = requests.get(url, params=params)
data_json = r.json()
temp_df = pd.DataFrame([item.split(",") for item in data_json["data"]["klines"]])
temp_df.columns = [
"日期",
"开盘",
"收盘",
"最高",
"最低",
"成交量",
"成交额",
"振幅",
"涨跌幅",
"涨跌额",
"换手率",
]
temp_df = temp_df[
[
"日期",
"开盘",
"收盘",
"最高",
"最低",
"涨跌幅",
"涨跌额",
"成交量",
"成交额",
"振幅",
"换手率",
]
]
temp_df["开盘"] = pd.to_numeric(temp_df["开盘"], errors="coerce")
temp_df["收盘"] = pd.to_numeric(temp_df["收盘"], errors="coerce")
temp_df["最高"] = pd.to_numeric(temp_df["最高"], errors="coerce")
temp_df["最低"] = pd.to_numeric(temp_df["最低"], errors="coerce")
temp_df["涨跌幅"] = pd.to_numeric(temp_df["涨跌幅"], errors="coerce")
temp_df["涨跌额"] = pd.to_numeric(temp_df["涨跌额"], errors="coerce")
temp_df["成交量"] = pd.to_numeric(temp_df["成交量"], errors="coerce")
temp_df["成交额"] = pd.to_numeric(temp_df["成交额"], errors="coerce")
temp_df["振幅"] = pd.to_numeric(temp_df["振幅"], errors="coerce")
temp_df["换手率"] = pd.to_numeric(temp_df["换手率"], errors="coerce")
return temp_df
| 18,837 |
akfamily/akshare
|
087025d8d6f799b30ca114013e82c1ad22dc9294
|
akshare/stock/stock_board_concept_em.py
|
stock_board_concept_hist_min_em
|
(
symbol: str = "长寿药", period: str = "5"
)
|
return temp_df
|
东方财富网-沪深板块-概念板块-分时历史行情
https://quote.eastmoney.com/bk/90.BK0715.html
:param symbol: 板块名称
:type symbol: str
:param period: choice of {"1", "5", "15", "30", "60"}
:type period: str
:return: 分时历史行情
:rtype: pandas.DataFrame
|
东方财富网-沪深板块-概念板块-分时历史行情
https://quote.eastmoney.com/bk/90.BK0715.html
:param symbol: 板块名称
:type symbol: str
:param period: choice of {"1", "5", "15", "30", "60"}
:type period: str
:return: 分时历史行情
:rtype: pandas.DataFrame
| 185 | 255 |
def stock_board_concept_hist_min_em(
symbol: str = "长寿药", period: str = "5"
) -> pd.DataFrame:
"""
东方财富网-沪深板块-概念板块-分时历史行情
https://quote.eastmoney.com/bk/90.BK0715.html
:param symbol: 板块名称
:type symbol: str
:param period: choice of {"1", "5", "15", "30", "60"}
:type period: str
:return: 分时历史行情
:rtype: pandas.DataFrame
"""
stock_board_concept_em_map = stock_board_concept_name_em()
stock_board_code = stock_board_concept_em_map[
stock_board_concept_em_map["板块名称"] == symbol
]["板块代码"].values[0]
url = "http://91.push2his.eastmoney.com/api/qt/stock/kline/get"
params = {
"secid": f"90.{stock_board_code}",
"ut": "fa5fd1943c7b386f172d6893dbfba10b",
"fields1": "f1,f2,f3,f4,f5,f6",
"fields2": "f51,f52,f53,f54,f55,f56,f57,f58,f59,f60,f61",
"klt": period,
"fqt": "1",
"end": "20500101",
"lmt": "1000000",
"_": "1647760607065",
}
r = requests.get(url, params=params)
data_json = r.json()
temp_df = pd.DataFrame([item.split(",") for item in data_json["data"]["klines"]])
temp_df.columns = [
"日期时间",
"开盘",
"收盘",
"最高",
"最低",
"成交量",
"成交额",
"振幅",
"涨跌幅",
"涨跌额",
"换手率",
]
temp_df = temp_df[
[
"日期时间",
"开盘",
"收盘",
"最高",
"最低",
"涨跌幅",
"涨跌额",
"成交量",
"成交额",
"振幅",
"换手率",
]
]
temp_df["开盘"] = pd.to_numeric(temp_df["开盘"], errors="coerce")
temp_df["收盘"] = pd.to_numeric(temp_df["收盘"], errors="coerce")
temp_df["最高"] = pd.to_numeric(temp_df["最高"], errors="coerce")
temp_df["最低"] = pd.to_numeric(temp_df["最低"], errors="coerce")
temp_df["涨跌幅"] = pd.to_numeric(temp_df["涨跌幅"], errors="coerce")
temp_df["涨跌额"] = pd.to_numeric(temp_df["涨跌额"], errors="coerce")
temp_df["成交量"] = pd.to_numeric(temp_df["成交量"], errors="coerce")
temp_df["成交额"] = pd.to_numeric(temp_df["成交额"], errors="coerce")
temp_df["振幅"] = pd.to_numeric(temp_df["振幅"], errors="coerce")
temp_df["换手率"] = pd.to_numeric(temp_df["换手率"], errors="coerce")
return temp_df
|
https://github.com/akfamily/akshare/blob/087025d8d6f799b30ca114013e82c1ad22dc9294/project25/akshare/stock/stock_board_concept_em.py#L185-L255
| 25 |
[
0
] | 1.408451 |
[
13,
14,
17,
18,
29,
30,
31,
32,
45,
60,
61,
62,
63,
64,
65,
66,
67,
68,
69,
70
] | 28.169014 | false | 7.920792 | 71 | 2 | 71.830986 | 8 |
def stock_board_concept_hist_min_em(
symbol: str = "长寿药", period: str = "5"
) -> pd.DataFrame:
stock_board_concept_em_map = stock_board_concept_name_em()
stock_board_code = stock_board_concept_em_map[
stock_board_concept_em_map["板块名称"] == symbol
]["板块代码"].values[0]
url = "http://91.push2his.eastmoney.com/api/qt/stock/kline/get"
params = {
"secid": f"90.{stock_board_code}",
"ut": "fa5fd1943c7b386f172d6893dbfba10b",
"fields1": "f1,f2,f3,f4,f5,f6",
"fields2": "f51,f52,f53,f54,f55,f56,f57,f58,f59,f60,f61",
"klt": period,
"fqt": "1",
"end": "20500101",
"lmt": "1000000",
"_": "1647760607065",
}
r = requests.get(url, params=params)
data_json = r.json()
temp_df = pd.DataFrame([item.split(",") for item in data_json["data"]["klines"]])
temp_df.columns = [
"日期时间",
"开盘",
"收盘",
"最高",
"最低",
"成交量",
"成交额",
"振幅",
"涨跌幅",
"涨跌额",
"换手率",
]
temp_df = temp_df[
[
"日期时间",
"开盘",
"收盘",
"最高",
"最低",
"涨跌幅",
"涨跌额",
"成交量",
"成交额",
"振幅",
"换手率",
]
]
temp_df["开盘"] = pd.to_numeric(temp_df["开盘"], errors="coerce")
temp_df["收盘"] = pd.to_numeric(temp_df["收盘"], errors="coerce")
temp_df["最高"] = pd.to_numeric(temp_df["最高"], errors="coerce")
temp_df["最低"] = pd.to_numeric(temp_df["最低"], errors="coerce")
temp_df["涨跌幅"] = pd.to_numeric(temp_df["涨跌幅"], errors="coerce")
temp_df["涨跌额"] = pd.to_numeric(temp_df["涨跌额"], errors="coerce")
temp_df["成交量"] = pd.to_numeric(temp_df["成交量"], errors="coerce")
temp_df["成交额"] = pd.to_numeric(temp_df["成交额"], errors="coerce")
temp_df["振幅"] = pd.to_numeric(temp_df["振幅"], errors="coerce")
temp_df["换手率"] = pd.to_numeric(temp_df["换手率"], errors="coerce")
return temp_df
| 18,838 |
akfamily/akshare
|
087025d8d6f799b30ca114013e82c1ad22dc9294
|
akshare/stock/stock_board_concept_em.py
|
stock_board_concept_cons_em
|
(symbol: str = "车联网") -> pd
|
return temp_df
|
东方财富-沪深板块-概念板块-板块成份
https://quote.eastmoney.com/center/boardlist.html#boards-BK06551
:param symbol: 板块名称
:type symbol: str
:return: 板块成份
:rtype: pandas.DataFrame
|
东方财富-沪深板块-概念板块-板块成份
https://quote.eastmoney.com/center/boardlist.html#boards-BK06551
:param symbol: 板块名称
:type symbol: str
:return: 板块成份
:rtype: pandas.DataFrame
| 258 | 359 |
def stock_board_concept_cons_em(symbol: str = "车联网") -> pd.DataFrame:
"""
东方财富-沪深板块-概念板块-板块成份
https://quote.eastmoney.com/center/boardlist.html#boards-BK06551
:param symbol: 板块名称
:type symbol: str
:return: 板块成份
:rtype: pandas.DataFrame
"""
stock_board_concept_em_map = stock_board_concept_name_em()
stock_board_code = stock_board_concept_em_map[
stock_board_concept_em_map["板块名称"] == symbol
]["板块代码"].values[0]
url = "http://29.push2.eastmoney.com/api/qt/clist/get"
params = {
"pn": "1",
"pz": "2000",
"po": "1",
"np": "1",
"ut": "bd1d9ddb04089700cf9c27f6f7426281",
"fltt": "2",
"invt": "2",
"fid": "f3",
"fs": f"b:{stock_board_code} f:!50",
"fields": "f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f12,f13,f14,f15,f16,f17,f18,f20,f21,f23,f24,f25,f22,f11,f62,f128,f136,f115,f152,f45",
"_": "1626081702127",
}
r = requests.get(url, params=params)
data_json = r.json()
temp_df = pd.DataFrame(data_json["data"]["diff"])
temp_df.reset_index(inplace=True)
temp_df["index"] = range(1, len(temp_df) + 1)
temp_df.columns = [
"序号",
"_",
"最新价",
"涨跌幅",
"涨跌额",
"成交量",
"成交额",
"振幅",
"换手率",
"市盈率-动态",
"_",
"_",
"代码",
"_",
"名称",
"最高",
"最低",
"今开",
"昨收",
"_",
"_",
"_",
"市净率",
"_",
"_",
"_",
"_",
"_",
"_",
"_",
"_",
"_",
"_",
]
temp_df = temp_df[
[
"序号",
"代码",
"名称",
"最新价",
"涨跌幅",
"涨跌额",
"成交量",
"成交额",
"振幅",
"最高",
"最低",
"今开",
"昨收",
"换手率",
"市盈率-动态",
"市净率",
]
]
temp_df["最新价"] = pd.to_numeric(temp_df["最新价"], errors="coerce")
temp_df["涨跌幅"] = pd.to_numeric(temp_df["涨跌幅"], errors="coerce")
temp_df["涨跌额"] = pd.to_numeric(temp_df["涨跌额"], errors="coerce")
temp_df["成交量"] = pd.to_numeric(temp_df["成交量"], errors="coerce")
temp_df["成交额"] = pd.to_numeric(temp_df["成交额"], errors="coerce")
temp_df["振幅"] = pd.to_numeric(temp_df["振幅"], errors="coerce")
temp_df["最高"] = pd.to_numeric(temp_df["最高"], errors="coerce")
temp_df["最低"] = pd.to_numeric(temp_df["最低"], errors="coerce")
temp_df["今开"] = pd.to_numeric(temp_df["今开"], errors="coerce")
temp_df["昨收"] = pd.to_numeric(temp_df["昨收"], errors="coerce")
temp_df["换手率"] = pd.to_numeric(temp_df["换手率"], errors="coerce")
temp_df["市盈率-动态"] = pd.to_numeric(temp_df["市盈率-动态"], errors="coerce")
temp_df["市净率"] = pd.to_numeric(temp_df["市净率"], errors="coerce")
return temp_df
|
https://github.com/akfamily/akshare/blob/087025d8d6f799b30ca114013e82c1ad22dc9294/project25/akshare/stock/stock_board_concept_em.py#L258-L359
| 25 |
[
0,
1,
2,
3,
4,
5,
6,
7,
8
] | 8.823529 |
[
9,
10,
13,
14,
27,
28,
30,
31,
32,
33,
68,
88,
89,
90,
91,
92,
93,
94,
95,
96,
97,
98,
99,
100,
101
] | 24.509804 | false | 7.920792 | 102 | 1 | 75.490196 | 6 |
def stock_board_concept_cons_em(symbol: str = "车联网") -> pd.DataFrame:
stock_board_concept_em_map = stock_board_concept_name_em()
stock_board_code = stock_board_concept_em_map[
stock_board_concept_em_map["板块名称"] == symbol
]["板块代码"].values[0]
url = "http://29.push2.eastmoney.com/api/qt/clist/get"
params = {
"pn": "1",
"pz": "2000",
"po": "1",
"np": "1",
"ut": "bd1d9ddb04089700cf9c27f6f7426281",
"fltt": "2",
"invt": "2",
"fid": "f3",
"fs": f"b:{stock_board_code} f:!50",
"fields": "f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f12,f13,f14,f15,f16,f17,f18,f20,f21,f23,f24,f25,f22,f11,f62,f128,f136,f115,f152,f45",
"_": "1626081702127",
}
r = requests.get(url, params=params)
data_json = r.json()
temp_df = pd.DataFrame(data_json["data"]["diff"])
temp_df.reset_index(inplace=True)
temp_df["index"] = range(1, len(temp_df) + 1)
temp_df.columns = [
"序号",
"_",
"最新价",
"涨跌幅",
"涨跌额",
"成交量",
"成交额",
"振幅",
"换手率",
"市盈率-动态",
"_",
"_",
"代码",
"_",
"名称",
"最高",
"最低",
"今开",
"昨收",
"_",
"_",
"_",
"市净率",
"_",
"_",
"_",
"_",
"_",
"_",
"_",
"_",
"_",
"_",
]
temp_df = temp_df[
[
"序号",
"代码",
"名称",
"最新价",
"涨跌幅",
"涨跌额",
"成交量",
"成交额",
"振幅",
"最高",
"最低",
"今开",
"昨收",
"换手率",
"市盈率-动态",
"市净率",
]
]
temp_df["最新价"] = pd.to_numeric(temp_df["最新价"], errors="coerce")
temp_df["涨跌幅"] = pd.to_numeric(temp_df["涨跌幅"], errors="coerce")
temp_df["涨跌额"] = pd.to_numeric(temp_df["涨跌额"], errors="coerce")
temp_df["成交量"] = pd.to_numeric(temp_df["成交量"], errors="coerce")
temp_df["成交额"] = pd.to_numeric(temp_df["成交额"], errors="coerce")
temp_df["振幅"] = pd.to_numeric(temp_df["振幅"], errors="coerce")
temp_df["最高"] = pd.to_numeric(temp_df["最高"], errors="coerce")
temp_df["最低"] = pd.to_numeric(temp_df["最低"], errors="coerce")
temp_df["今开"] = pd.to_numeric(temp_df["今开"], errors="coerce")
temp_df["昨收"] = pd.to_numeric(temp_df["昨收"], errors="coerce")
temp_df["换手率"] = pd.to_numeric(temp_df["换手率"], errors="coerce")
temp_df["市盈率-动态"] = pd.to_numeric(temp_df["市盈率-动态"], errors="coerce")
temp_df["市净率"] = pd.to_numeric(temp_df["市净率"], errors="coerce")
return temp_df
| 18,839 |
akfamily/akshare
|
087025d8d6f799b30ca114013e82c1ad22dc9294
|
akshare/stock/stock_us_sina.py
|
get_us_page_count
|
()
|
return page_count
|
新浪财经-美股-总页数
https://finance.sina.com.cn/stock/usstock/sector.shtml
:return: 美股总页数
:rtype: int
|
新浪财经-美股-总页数
https://finance.sina.com.cn/stock/usstock/sector.shtml
:return: 美股总页数
:rtype: int
| 25 | 49 |
def get_us_page_count() -> int:
"""
新浪财经-美股-总页数
https://finance.sina.com.cn/stock/usstock/sector.shtml
:return: 美股总页数
:rtype: int
"""
page = "1"
us_js_decode = f"US_CategoryService.getList?page={page}&num=20&sort=&asc=0&market=&id="
js_code = py_mini_racer.MiniRacer()
js_code.eval(js_hash_text)
dict_list = js_code.call("d", us_js_decode) # 执行js解密代码
us_sina_stock_dict_payload.update({"page": "{}".format(page)})
res = requests.get(
us_sina_stock_list_url.format(dict_list),
params=us_sina_stock_dict_payload,
)
data_json = json.loads(
res.text[res.text.find("({") + 1 : res.text.rfind(");")]
)
if not isinstance(int(data_json["count"]) / 20, int):
page_count = int(int(data_json["count"]) / 20) + 1
else:
page_count = int(int(data_json["count"]) / 20)
return page_count
|
https://github.com/akfamily/akshare/blob/087025d8d6f799b30ca114013e82c1ad22dc9294/project25/akshare/stock/stock_us_sina.py#L25-L49
| 25 |
[
0,
1,
2,
3,
4,
5,
6
] | 28 |
[
7,
8,
9,
10,
11,
12,
13,
17,
20,
21,
23,
24
] | 48 | false | 10.447761 | 25 | 2 | 52 | 4 |
def get_us_page_count() -> int:
page = "1"
us_js_decode = f"US_CategoryService.getList?page={page}&num=20&sort=&asc=0&market=&id="
js_code = py_mini_racer.MiniRacer()
js_code.eval(js_hash_text)
dict_list = js_code.call("d", us_js_decode) # 执行js解密代码
us_sina_stock_dict_payload.update({"page": "{}".format(page)})
res = requests.get(
us_sina_stock_list_url.format(dict_list),
params=us_sina_stock_dict_payload,
)
data_json = json.loads(
res.text[res.text.find("({") + 1 : res.text.rfind(");")]
)
if not isinstance(int(data_json["count"]) / 20, int):
page_count = int(int(data_json["count"]) / 20) + 1
else:
page_count = int(int(data_json["count"]) / 20)
return page_count
| 18,840 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.