nwo
stringlengths 10
28
| sha
stringlengths 40
40
| path
stringlengths 11
97
| identifier
stringlengths 1
64
| parameters
stringlengths 2
2.24k
| return_statement
stringlengths 0
2.17k
| docstring
stringlengths 0
5.45k
| docstring_summary
stringlengths 0
3.83k
| func_begin
int64 1
13.4k
| func_end
int64 2
13.4k
| function
stringlengths 28
56.4k
| url
stringlengths 106
209
| project
int64 1
48
| executed_lines
list | executed_lines_pc
float64 0
153
| missing_lines
list | missing_lines_pc
float64 0
100
| covered
bool 2
classes | filecoverage
float64 2.53
100
| function_lines
int64 2
1.46k
| mccabe
int64 1
253
| coverage
float64 0
100
| docstring_lines
int64 0
112
| function_nodoc
stringlengths 9
56.4k
| id
int64 0
29.8k
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
akfamily/akshare
|
087025d8d6f799b30ca114013e82c1ad22dc9294
|
akshare/index/index_sw.py
|
sw_index_first_info
|
()
|
return temp_df
|
乐咕乐股-申万一级-分类
https://legulegu.com/stockdata/sw-industry-overview#level1
:return: 分类
:rtype: pandas.DataFrame
|
乐咕乐股-申万一级-分类
https://legulegu.com/stockdata/sw-industry-overview#level1
:return: 分类
:rtype: pandas.DataFrame
| 351 | 404 |
def sw_index_first_info() -> pd.DataFrame:
"""
乐咕乐股-申万一级-分类
https://legulegu.com/stockdata/sw-industry-overview#level1
:return: 分类
:rtype: pandas.DataFrame
"""
url = "https://legulegu.com/stockdata/sw-industry-overview"
r = requests.get(url)
soup = BeautifulSoup(r.text, "lxml")
code_raw = soup.find("div", attrs={"id": "level1Items"}).find_all(
"div", attrs={"class": "lg-industries-item-chinese-title"}
)
name_raw = soup.find("div", attrs={"id": "level1Items"}).find_all(
"div", attrs={"class": "lg-industries-item-number"}
)
value_raw = soup.find("div", attrs={"id": "level1Items"}).find_all(
"div", attrs={"class": "lg-sw-industries-item-value"}
)
code = [item.get_text() for item in code_raw]
name = [item.get_text().split("(")[0] for item in name_raw]
num = [item.get_text().split("(")[1].split(")")[0] for item in name_raw]
num_1 = [
item.find_all("span", attrs={"class": "value"})[0].get_text().strip()
for item in value_raw
]
num_2 = [
item.find_all("span", attrs={"class": "value"})[1].get_text().strip()
for item in value_raw
]
num_3 = [
item.find_all("span", attrs={"class": "value"})[2].get_text().strip()
for item in value_raw
]
num_4 = [
item.find_all("span", attrs={"class": "value"})[3].get_text().strip()
for item in value_raw
]
temp_df = pd.DataFrame([code, name, num, num_1, num_2, num_3, num_4]).T
temp_df.columns = [
"行业代码",
"行业名称",
"成份个数",
"静态市盈率",
"TTM(滚动)市盈率",
"市净率",
"静态股息率",
]
temp_df["成份个数"] = pd.to_numeric(temp_df["成份个数"])
temp_df["静态市盈率"] = pd.to_numeric(temp_df["静态市盈率"])
temp_df["TTM(滚动)市盈率"] = pd.to_numeric(temp_df["TTM(滚动)市盈率"])
temp_df["市净率"] = pd.to_numeric(temp_df["市净率"])
temp_df["静态股息率"] = pd.to_numeric(temp_df["静态股息率"])
return temp_df
|
https://github.com/akfamily/akshare/blob/087025d8d6f799b30ca114013e82c1ad22dc9294/project25/akshare/index/index_sw.py#L351-L404
| 25 |
[
0,
1,
2,
3,
4,
5,
6
] | 12.962963 |
[
7,
8,
9,
10,
13,
16,
19,
20,
21,
22,
26,
30,
34,
38,
39,
48,
49,
50,
51,
52,
53
] | 38.888889 | false | 7.006369 | 54 | 8 | 61.111111 | 4 |
def sw_index_first_info() -> pd.DataFrame:
url = "https://legulegu.com/stockdata/sw-industry-overview"
r = requests.get(url)
soup = BeautifulSoup(r.text, "lxml")
code_raw = soup.find("div", attrs={"id": "level1Items"}).find_all(
"div", attrs={"class": "lg-industries-item-chinese-title"}
)
name_raw = soup.find("div", attrs={"id": "level1Items"}).find_all(
"div", attrs={"class": "lg-industries-item-number"}
)
value_raw = soup.find("div", attrs={"id": "level1Items"}).find_all(
"div", attrs={"class": "lg-sw-industries-item-value"}
)
code = [item.get_text() for item in code_raw]
name = [item.get_text().split("(")[0] for item in name_raw]
num = [item.get_text().split("(")[1].split(")")[0] for item in name_raw]
num_1 = [
item.find_all("span", attrs={"class": "value"})[0].get_text().strip()
for item in value_raw
]
num_2 = [
item.find_all("span", attrs={"class": "value"})[1].get_text().strip()
for item in value_raw
]
num_3 = [
item.find_all("span", attrs={"class": "value"})[2].get_text().strip()
for item in value_raw
]
num_4 = [
item.find_all("span", attrs={"class": "value"})[3].get_text().strip()
for item in value_raw
]
temp_df = pd.DataFrame([code, name, num, num_1, num_2, num_3, num_4]).T
temp_df.columns = [
"行业代码",
"行业名称",
"成份个数",
"静态市盈率",
"TTM(滚动)市盈率",
"市净率",
"静态股息率",
]
temp_df["成份个数"] = pd.to_numeric(temp_df["成份个数"])
temp_df["静态市盈率"] = pd.to_numeric(temp_df["静态市盈率"])
temp_df["TTM(滚动)市盈率"] = pd.to_numeric(temp_df["TTM(滚动)市盈率"])
temp_df["市净率"] = pd.to_numeric(temp_df["市净率"])
temp_df["静态股息率"] = pd.to_numeric(temp_df["静态股息率"])
return temp_df
| 18,339 |
akfamily/akshare
|
087025d8d6f799b30ca114013e82c1ad22dc9294
|
akshare/index/index_sw.py
|
sw_index_second_info
|
()
|
return temp_df
|
乐咕乐股-申万二级-分类
https://legulegu.com/stockdata/sw-industry-overview#level1
:return: 分类
:rtype: pandas.DataFrame
|
乐咕乐股-申万二级-分类
https://legulegu.com/stockdata/sw-industry-overview#level1
:return: 分类
:rtype: pandas.DataFrame
| 407 | 461 |
def sw_index_second_info() -> pd.DataFrame:
"""
乐咕乐股-申万二级-分类
https://legulegu.com/stockdata/sw-industry-overview#level1
:return: 分类
:rtype: pandas.DataFrame
"""
url = "https://legulegu.com/stockdata/sw-industry-overview"
r = requests.get(url)
soup = BeautifulSoup(r.text, "lxml")
code_raw = soup.find("div", attrs={"id": "level2Items"}).find_all(
"div", attrs={"class": "lg-industries-item-chinese-title"}
)
name_raw = soup.find("div", attrs={"id": "level2Items"}).find_all(
"div", attrs={"class": "lg-industries-item-number"}
)
value_raw = soup.find("div", attrs={"id": "level2Items"}).find_all(
"div", attrs={"class": "lg-sw-industries-item-value"}
)
code = [item.get_text() for item in code_raw]
name = [item.get_text().split("(")[0] for item in name_raw]
num = [item.get_text().split("(")[1].split(")")[0] for item in name_raw]
num_1 = [
item.find_all("span", attrs={"class": "value"})[0].get_text().strip()
for item in value_raw
]
num_2 = [
item.find_all("span", attrs={"class": "value"})[1].get_text().strip()
for item in value_raw
]
num_3 = [
item.find_all("span", attrs={"class": "value"})[2].get_text().strip()
for item in value_raw
]
num_4 = [
item.find_all("span", attrs={"class": "value"})[3].get_text().strip()
for item in value_raw
]
temp_df = pd.DataFrame([code, name, num, num_1, num_2, num_3, num_4]).T
temp_df.columns = [
"行业代码",
"行业名称",
"成份个数",
"静态市盈率",
"TTM(滚动)市盈率",
"市净率",
"静态股息率",
]
temp_df["成份个数"] = pd.to_numeric(temp_df["成份个数"])
temp_df["静态市盈率"] = pd.to_numeric(temp_df["静态市盈率"])
temp_df["TTM(滚动)市盈率"] = pd.to_numeric(temp_df["TTM(滚动)市盈率"])
temp_df["市净率"] = pd.to_numeric(temp_df["市净率"])
temp_df["静态股息率"] = pd.to_numeric(temp_df["静态股息率"])
return temp_df
|
https://github.com/akfamily/akshare/blob/087025d8d6f799b30ca114013e82c1ad22dc9294/project25/akshare/index/index_sw.py#L407-L461
| 25 |
[
0,
1,
2,
3,
4,
5,
6
] | 12.727273 |
[
7,
8,
9,
10,
13,
16,
19,
20,
21,
22,
26,
30,
34,
38,
39,
48,
49,
50,
51,
52,
54
] | 38.181818 | false | 7.006369 | 55 | 8 | 61.818182 | 4 |
def sw_index_second_info() -> pd.DataFrame:
url = "https://legulegu.com/stockdata/sw-industry-overview"
r = requests.get(url)
soup = BeautifulSoup(r.text, "lxml")
code_raw = soup.find("div", attrs={"id": "level2Items"}).find_all(
"div", attrs={"class": "lg-industries-item-chinese-title"}
)
name_raw = soup.find("div", attrs={"id": "level2Items"}).find_all(
"div", attrs={"class": "lg-industries-item-number"}
)
value_raw = soup.find("div", attrs={"id": "level2Items"}).find_all(
"div", attrs={"class": "lg-sw-industries-item-value"}
)
code = [item.get_text() for item in code_raw]
name = [item.get_text().split("(")[0] for item in name_raw]
num = [item.get_text().split("(")[1].split(")")[0] for item in name_raw]
num_1 = [
item.find_all("span", attrs={"class": "value"})[0].get_text().strip()
for item in value_raw
]
num_2 = [
item.find_all("span", attrs={"class": "value"})[1].get_text().strip()
for item in value_raw
]
num_3 = [
item.find_all("span", attrs={"class": "value"})[2].get_text().strip()
for item in value_raw
]
num_4 = [
item.find_all("span", attrs={"class": "value"})[3].get_text().strip()
for item in value_raw
]
temp_df = pd.DataFrame([code, name, num, num_1, num_2, num_3, num_4]).T
temp_df.columns = [
"行业代码",
"行业名称",
"成份个数",
"静态市盈率",
"TTM(滚动)市盈率",
"市净率",
"静态股息率",
]
temp_df["成份个数"] = pd.to_numeric(temp_df["成份个数"])
temp_df["静态市盈率"] = pd.to_numeric(temp_df["静态市盈率"])
temp_df["TTM(滚动)市盈率"] = pd.to_numeric(temp_df["TTM(滚动)市盈率"])
temp_df["市净率"] = pd.to_numeric(temp_df["市净率"])
temp_df["静态股息率"] = pd.to_numeric(temp_df["静态股息率"])
return temp_df
| 18,340 |
akfamily/akshare
|
087025d8d6f799b30ca114013e82c1ad22dc9294
|
akshare/index/index_sw.py
|
sw_index_third_info
|
()
|
return temp_df
|
乐咕乐股-申万三级-分类
https://legulegu.com/stockdata/sw-industry-overview#level1
:return: 分类
:rtype: pandas.DataFrame
|
乐咕乐股-申万三级-分类
https://legulegu.com/stockdata/sw-industry-overview#level1
:return: 分类
:rtype: pandas.DataFrame
| 464 | 517 |
def sw_index_third_info() -> pd.DataFrame:
"""
乐咕乐股-申万三级-分类
https://legulegu.com/stockdata/sw-industry-overview#level1
:return: 分类
:rtype: pandas.DataFrame
"""
url = "https://legulegu.com/stockdata/sw-industry-overview"
r = requests.get(url)
soup = BeautifulSoup(r.text, "lxml")
code_raw = soup.find("div", attrs={"id": "level3Items"}).find_all(
"div", attrs={"class": "lg-industries-item-chinese-title"}
)
name_raw = soup.find("div", attrs={"id": "level3Items"}).find_all(
"div", attrs={"class": "lg-industries-item-number"}
)
value_raw = soup.find("div", attrs={"id": "level3Items"}).find_all(
"div", attrs={"class": "lg-sw-industries-item-value"}
)
code = [item.get_text() for item in code_raw]
name = [item.get_text().split("(")[0] for item in name_raw]
num = [item.get_text().split("(")[1].split(")")[0] for item in name_raw]
num_1 = [
item.find_all("span", attrs={"class": "value"})[0].get_text().strip()
for item in value_raw
]
num_2 = [
item.find_all("span", attrs={"class": "value"})[1].get_text().strip()
for item in value_raw
]
num_3 = [
item.find_all("span", attrs={"class": "value"})[2].get_text().strip()
for item in value_raw
]
num_4 = [
item.find_all("span", attrs={"class": "value"})[3].get_text().strip()
for item in value_raw
]
temp_df = pd.DataFrame([code, name, num, num_1, num_2, num_3, num_4]).T
temp_df.columns = [
"行业代码",
"行业名称",
"成份个数",
"静态市盈率",
"TTM(滚动)市盈率",
"市净率",
"静态股息率",
]
temp_df["成份个数"] = pd.to_numeric(temp_df["成份个数"])
temp_df["静态市盈率"] = pd.to_numeric(temp_df["静态市盈率"])
temp_df["TTM(滚动)市盈率"] = pd.to_numeric(temp_df["TTM(滚动)市盈率"])
temp_df["市净率"] = pd.to_numeric(temp_df["市净率"])
temp_df["静态股息率"] = pd.to_numeric(temp_df["静态股息率"])
return temp_df
|
https://github.com/akfamily/akshare/blob/087025d8d6f799b30ca114013e82c1ad22dc9294/project25/akshare/index/index_sw.py#L464-L517
| 25 |
[
0,
1,
2,
3,
4,
5,
6
] | 12.962963 |
[
7,
8,
9,
10,
13,
16,
19,
20,
21,
22,
26,
30,
34,
38,
39,
48,
49,
50,
51,
52,
53
] | 38.888889 | false | 7.006369 | 54 | 8 | 61.111111 | 4 |
def sw_index_third_info() -> pd.DataFrame:
url = "https://legulegu.com/stockdata/sw-industry-overview"
r = requests.get(url)
soup = BeautifulSoup(r.text, "lxml")
code_raw = soup.find("div", attrs={"id": "level3Items"}).find_all(
"div", attrs={"class": "lg-industries-item-chinese-title"}
)
name_raw = soup.find("div", attrs={"id": "level3Items"}).find_all(
"div", attrs={"class": "lg-industries-item-number"}
)
value_raw = soup.find("div", attrs={"id": "level3Items"}).find_all(
"div", attrs={"class": "lg-sw-industries-item-value"}
)
code = [item.get_text() for item in code_raw]
name = [item.get_text().split("(")[0] for item in name_raw]
num = [item.get_text().split("(")[1].split(")")[0] for item in name_raw]
num_1 = [
item.find_all("span", attrs={"class": "value"})[0].get_text().strip()
for item in value_raw
]
num_2 = [
item.find_all("span", attrs={"class": "value"})[1].get_text().strip()
for item in value_raw
]
num_3 = [
item.find_all("span", attrs={"class": "value"})[2].get_text().strip()
for item in value_raw
]
num_4 = [
item.find_all("span", attrs={"class": "value"})[3].get_text().strip()
for item in value_raw
]
temp_df = pd.DataFrame([code, name, num, num_1, num_2, num_3, num_4]).T
temp_df.columns = [
"行业代码",
"行业名称",
"成份个数",
"静态市盈率",
"TTM(滚动)市盈率",
"市净率",
"静态股息率",
]
temp_df["成份个数"] = pd.to_numeric(temp_df["成份个数"])
temp_df["静态市盈率"] = pd.to_numeric(temp_df["静态市盈率"])
temp_df["TTM(滚动)市盈率"] = pd.to_numeric(temp_df["TTM(滚动)市盈率"])
temp_df["市净率"] = pd.to_numeric(temp_df["市净率"])
temp_df["静态股息率"] = pd.to_numeric(temp_df["静态股息率"])
return temp_df
| 18,341 |
akfamily/akshare
|
087025d8d6f799b30ca114013e82c1ad22dc9294
|
akshare/index/index_sw.py
|
sw_index_third_cons
|
(symbol: str = "801120.SI")
|
return temp_df
|
乐咕乐股-申万三级-行业成份
https://legulegu.com/stockdata/index-composition?industryCode=801120.SI
:param symbol: 三级行业的行业代码
:type symbol: str
:return: 行业成份
:rtype: pandas.DataFrame
|
乐咕乐股-申万三级-行业成份
https://legulegu.com/stockdata/index-composition?industryCode=801120.SI
:param symbol: 三级行业的行业代码
:type symbol: str
:return: 行业成份
:rtype: pandas.DataFrame
| 520 | 568 |
def sw_index_third_cons(symbol: str = "801120.SI") -> pd.DataFrame:
"""
乐咕乐股-申万三级-行业成份
https://legulegu.com/stockdata/index-composition?industryCode=801120.SI
:param symbol: 三级行业的行业代码
:type symbol: str
:return: 行业成份
:rtype: pandas.DataFrame
"""
url = f"https://legulegu.com/stockdata/index-composition?industryCode={symbol}"
temp_df = pd.read_html(url)[0]
temp_df.columns = [
"序号",
"股票代码",
"股票简称",
"纳入时间",
"申万1级",
"申万2级",
"申万3级",
"价格",
"市盈率",
"市盈率ttm",
"市净率",
"股息率",
"市值",
"归母净利润同比增长(09-30)",
"归母净利润同比增长(06-30)",
"营业收入同比增长(09-30)",
"营业收入同比增长(06-30)",
]
temp_df["价格"] = pd.to_numeric(temp_df["价格"], errors="coerce")
temp_df["市盈率"] = pd.to_numeric(temp_df["市盈率"], errors="coerce")
temp_df["市盈率ttm"] = pd.to_numeric(temp_df["市盈率ttm"], errors="coerce")
temp_df["市净率"] = pd.to_numeric(temp_df["市净率"], errors="coerce")
temp_df["股息率"] = pd.to_numeric(
temp_df["股息率"].str.strip("%"), errors="coerce"
)
temp_df["市值"] = pd.to_numeric(temp_df["市值"], errors="coerce")
temp_df["归母净利润同比增长(09-30)"] = temp_df["归母净利润同比增长(09-30)"].str.strip("%")
temp_df["归母净利润同比增长(06-30)"] = temp_df["归母净利润同比增长(06-30)"].str.strip("%")
temp_df["营业收入同比增长(09-30)"] = temp_df["营业收入同比增长(09-30)"].str.strip("%")
temp_df["营业收入同比增长(06-30)"] = temp_df["营业收入同比增长(06-30)"].str.strip("%")
temp_df["归母净利润同比增长(09-30)"] = pd.to_numeric(temp_df["归母净利润同比增长(09-30)"], errors="coerce")
temp_df["归母净利润同比增长(06-30)"] = pd.to_numeric(temp_df["归母净利润同比增长(06-30)"], errors="coerce")
temp_df["营业收入同比增长(09-30)"] = pd.to_numeric(temp_df["营业收入同比增长(09-30)"], errors="coerce")
temp_df["营业收入同比增长(06-30)"] = pd.to_numeric(temp_df["营业收入同比增长(06-30)"], errors="coerce")
return temp_df
|
https://github.com/akfamily/akshare/blob/087025d8d6f799b30ca114013e82c1ad22dc9294/project25/akshare/index/index_sw.py#L520-L568
| 25 |
[
0,
1,
2,
3,
4,
5,
6,
7,
8
] | 18.367347 |
[
9,
10,
11,
30,
31,
32,
33,
34,
37,
39,
40,
41,
42,
44,
45,
46,
47,
48
] | 36.734694 | false | 7.006369 | 49 | 1 | 63.265306 | 6 |
def sw_index_third_cons(symbol: str = "801120.SI") -> pd.DataFrame:
url = f"https://legulegu.com/stockdata/index-composition?industryCode={symbol}"
temp_df = pd.read_html(url)[0]
temp_df.columns = [
"序号",
"股票代码",
"股票简称",
"纳入时间",
"申万1级",
"申万2级",
"申万3级",
"价格",
"市盈率",
"市盈率ttm",
"市净率",
"股息率",
"市值",
"归母净利润同比增长(09-30)",
"归母净利润同比增长(06-30)",
"营业收入同比增长(09-30)",
"营业收入同比增长(06-30)",
]
temp_df["价格"] = pd.to_numeric(temp_df["价格"], errors="coerce")
temp_df["市盈率"] = pd.to_numeric(temp_df["市盈率"], errors="coerce")
temp_df["市盈率ttm"] = pd.to_numeric(temp_df["市盈率ttm"], errors="coerce")
temp_df["市净率"] = pd.to_numeric(temp_df["市净率"], errors="coerce")
temp_df["股息率"] = pd.to_numeric(
temp_df["股息率"].str.strip("%"), errors="coerce"
)
temp_df["市值"] = pd.to_numeric(temp_df["市值"], errors="coerce")
temp_df["归母净利润同比增长(09-30)"] = temp_df["归母净利润同比增长(09-30)"].str.strip("%")
temp_df["归母净利润同比增长(06-30)"] = temp_df["归母净利润同比增长(06-30)"].str.strip("%")
temp_df["营业收入同比增长(09-30)"] = temp_df["营业收入同比增长(09-30)"].str.strip("%")
temp_df["营业收入同比增长(06-30)"] = temp_df["营业收入同比增长(06-30)"].str.strip("%")
temp_df["归母净利润同比增长(09-30)"] = pd.to_numeric(temp_df["归母净利润同比增长(09-30)"], errors="coerce")
temp_df["归母净利润同比增长(06-30)"] = pd.to_numeric(temp_df["归母净利润同比增长(06-30)"], errors="coerce")
temp_df["营业收入同比增长(09-30)"] = pd.to_numeric(temp_df["营业收入同比增长(09-30)"], errors="coerce")
temp_df["营业收入同比增长(06-30)"] = pd.to_numeric(temp_df["营业收入同比增长(06-30)"], errors="coerce")
return temp_df
| 18,342 |
akfamily/akshare
|
087025d8d6f799b30ca114013e82c1ad22dc9294
|
akshare/index/index_sw.py
|
index_level_one_hist_sw
|
(symbol: str = "801010")
|
return temp_df
|
申万指数-指数发布-指数体系-一级行业
http://www.swsindex.com/idx0110.aspx
:param symbol: 一级行业
:type symbol: str
:return: 一级行业
:rtype: pandas.DataFrame
|
申万指数-指数发布-指数体系-一级行业
http://www.swsindex.com/idx0110.aspx
:param symbol: 一级行业
:type symbol: str
:return: 一级行业
:rtype: pandas.DataFrame
| 571 | 624 |
def index_level_one_hist_sw(symbol: str = "801010") -> pd.DataFrame:
"""
申万指数-指数发布-指数体系-一级行业
http://www.swsindex.com/idx0110.aspx
:param symbol: 一级行业
:type symbol: str
:return: 一级行业
:rtype: pandas.DataFrame
"""
url = "http://www.swsindex.com/downloadfiles.aspx"
params = {
"swindexcode": symbol,
"type": "510",
"columnid": "8890",
}
headers = {
"Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9",
"Accept-Encoding": "gzip, deflate",
"Accept-Language": "en,zh;q=0.9",
"Cache-Control": "no-cache",
"Host": "www.swsindex.com",
"Pragma": "no-cache",
"Proxy-Connection": "keep-alive",
"Referer": "http://www.swsindex.com/idx0110.aspx",
"Upgrade-Insecure-Requests": "1",
"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/104.0.0.0 Safari/537.36",
}
r = requests.get(url, params=params, headers=headers)
r.encoding = "utf-8"
temp_df = pd.read_html(r.text)[0]
temp_df.columns = [
"指数代码",
"指数名称",
"发布日期",
"开盘指数",
"最高指数",
"最低指数",
"收盘指数",
"成交量",
"成交额",
"涨跌幅",
"换手率",
"市盈率",
"市净率",
"均价",
"成交额占比",
"流通市值",
"平均流通市值",
"股息率",
]
temp_df["发布日期"] = pd.to_datetime(temp_df["发布日期"]).dt.date
temp_df.sort_values(["发布日期"], inplace=True, ignore_index=True)
temp_df["指数代码"] = temp_df["指数代码"].astype(str)
return temp_df
|
https://github.com/akfamily/akshare/blob/087025d8d6f799b30ca114013e82c1ad22dc9294/project25/akshare/index/index_sw.py#L571-L624
| 25 |
[
0,
1,
2,
3,
4,
5,
6,
7,
8
] | 16.666667 |
[
9,
10,
15,
27,
28,
29,
30,
50,
51,
52,
53
] | 20.37037 | false | 7.006369 | 54 | 1 | 79.62963 | 6 |
def index_level_one_hist_sw(symbol: str = "801010") -> pd.DataFrame:
url = "http://www.swsindex.com/downloadfiles.aspx"
params = {
"swindexcode": symbol,
"type": "510",
"columnid": "8890",
}
headers = {
"Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9",
"Accept-Encoding": "gzip, deflate",
"Accept-Language": "en,zh;q=0.9",
"Cache-Control": "no-cache",
"Host": "www.swsindex.com",
"Pragma": "no-cache",
"Proxy-Connection": "keep-alive",
"Referer": "http://www.swsindex.com/idx0110.aspx",
"Upgrade-Insecure-Requests": "1",
"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/104.0.0.0 Safari/537.36",
}
r = requests.get(url, params=params, headers=headers)
r.encoding = "utf-8"
temp_df = pd.read_html(r.text)[0]
temp_df.columns = [
"指数代码",
"指数名称",
"发布日期",
"开盘指数",
"最高指数",
"最低指数",
"收盘指数",
"成交量",
"成交额",
"涨跌幅",
"换手率",
"市盈率",
"市净率",
"均价",
"成交额占比",
"流通市值",
"平均流通市值",
"股息率",
]
temp_df["发布日期"] = pd.to_datetime(temp_df["发布日期"]).dt.date
temp_df.sort_values(["发布日期"], inplace=True, ignore_index=True)
temp_df["指数代码"] = temp_df["指数代码"].astype(str)
return temp_df
| 18,343 |
akfamily/akshare
|
087025d8d6f799b30ca114013e82c1ad22dc9294
|
akshare/index/index_sw.py
|
index_market_representation_hist_sw
|
(
symbol: str = "801001",
)
|
return temp_df
|
申万指数-指数发布-指数体系-市场表征
http://www.swsindex.com/idx0110.aspx
:param symbol: 市场表征代码
:type symbol: str
:return: 市场表征
:rtype: pandas.DataFrame
|
申万指数-指数发布-指数体系-市场表征
http://www.swsindex.com/idx0110.aspx
:param symbol: 市场表征代码
:type symbol: str
:return: 市场表征
:rtype: pandas.DataFrame
| 627 | 682 |
def index_market_representation_hist_sw(
symbol: str = "801001",
) -> pd.DataFrame:
"""
申万指数-指数发布-指数体系-市场表征
http://www.swsindex.com/idx0110.aspx
:param symbol: 市场表征代码
:type symbol: str
:return: 市场表征
:rtype: pandas.DataFrame
"""
url = "http://www.swsindex.com/downloadfiles.aspx"
params = {
"swindexcode": symbol,
"type": "510",
"columnid": "8890",
}
headers = {
"Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9",
"Accept-Encoding": "gzip, deflate",
"Accept-Language": "en,zh;q=0.9",
"Cache-Control": "no-cache",
"Host": "www.swsindex.com",
"Pragma": "no-cache",
"Proxy-Connection": "keep-alive",
"Referer": "http://www.swsindex.com/idx0110.aspx",
"Upgrade-Insecure-Requests": "1",
"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/104.0.0.0 Safari/537.36",
}
r = requests.get(url, params=params, headers=headers)
r.encoding = "utf-8"
temp_df = pd.read_html(r.text)[0]
temp_df.columns = [
"指数代码",
"指数名称",
"发布日期",
"开盘指数",
"最高指数",
"最低指数",
"收盘指数",
"成交量",
"成交额",
"涨跌幅",
"换手率",
"市盈率",
"市净率",
"均价",
"成交额占比",
"流通市值",
"平均流通市值",
"股息率",
]
temp_df["发布日期"] = pd.to_datetime(temp_df["发布日期"]).dt.date
temp_df.sort_values(["发布日期"], inplace=True, ignore_index=True)
temp_df["指数代码"] = temp_df["指数代码"].astype(str)
return temp_df
|
https://github.com/akfamily/akshare/blob/087025d8d6f799b30ca114013e82c1ad22dc9294/project25/akshare/index/index_sw.py#L627-L682
| 25 |
[
0
] | 1.785714 |
[
11,
12,
17,
29,
30,
31,
32,
52,
53,
54,
55
] | 19.642857 | false | 7.006369 | 56 | 1 | 80.357143 | 6 |
def index_market_representation_hist_sw(
symbol: str = "801001",
) -> pd.DataFrame:
url = "http://www.swsindex.com/downloadfiles.aspx"
params = {
"swindexcode": symbol,
"type": "510",
"columnid": "8890",
}
headers = {
"Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9",
"Accept-Encoding": "gzip, deflate",
"Accept-Language": "en,zh;q=0.9",
"Cache-Control": "no-cache",
"Host": "www.swsindex.com",
"Pragma": "no-cache",
"Proxy-Connection": "keep-alive",
"Referer": "http://www.swsindex.com/idx0110.aspx",
"Upgrade-Insecure-Requests": "1",
"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/104.0.0.0 Safari/537.36",
}
r = requests.get(url, params=params, headers=headers)
r.encoding = "utf-8"
temp_df = pd.read_html(r.text)[0]
temp_df.columns = [
"指数代码",
"指数名称",
"发布日期",
"开盘指数",
"最高指数",
"最低指数",
"收盘指数",
"成交量",
"成交额",
"涨跌幅",
"换手率",
"市盈率",
"市净率",
"均价",
"成交额占比",
"流通市值",
"平均流通市值",
"股息率",
]
temp_df["发布日期"] = pd.to_datetime(temp_df["发布日期"]).dt.date
temp_df.sort_values(["发布日期"], inplace=True, ignore_index=True)
temp_df["指数代码"] = temp_df["指数代码"].astype(str)
return temp_df
| 18,344 |
akfamily/akshare
|
087025d8d6f799b30ca114013e82c1ad22dc9294
|
akshare/index/index_sw.py
|
index_style_index_hist_sw
|
(symbol: str = "801811")
|
return temp_df
|
申万指数-指数发布-指数体系-风格指数
http://www.swsindex.com/idx0110.aspx
:param symbol: 风格指数代码
:type symbol: str
:return: 风格指数
:rtype: pandas.DataFrame
|
申万指数-指数发布-指数体系-风格指数
http://www.swsindex.com/idx0110.aspx
:param symbol: 风格指数代码
:type symbol: str
:return: 风格指数
:rtype: pandas.DataFrame
| 685 | 738 |
def index_style_index_hist_sw(symbol: str = "801811") -> pd.DataFrame:
"""
申万指数-指数发布-指数体系-风格指数
http://www.swsindex.com/idx0110.aspx
:param symbol: 风格指数代码
:type symbol: str
:return: 风格指数
:rtype: pandas.DataFrame
"""
url = "http://www.swsindex.com/downloadfiles.aspx"
params = {
"swindexcode": symbol,
"type": "510",
"columnid": "8890",
}
headers = {
"Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9",
"Accept-Encoding": "gzip, deflate",
"Accept-Language": "en,zh;q=0.9",
"Cache-Control": "no-cache",
"Host": "www.swsindex.com",
"Pragma": "no-cache",
"Proxy-Connection": "keep-alive",
"Referer": "http://www.swsindex.com/idx0110.aspx",
"Upgrade-Insecure-Requests": "1",
"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/104.0.0.0 Safari/537.36",
}
r = requests.get(url, params=params, headers=headers)
r.encoding = "utf-8"
temp_df = pd.read_html(r.text)[0]
temp_df.columns = [
"指数代码",
"指数名称",
"发布日期",
"开盘指数",
"最高指数",
"最低指数",
"收盘指数",
"成交量",
"成交额",
"涨跌幅",
"换手率",
"市盈率",
"市净率",
"均价",
"成交额占比",
"流通市值",
"平均流通市值",
"股息率",
]
temp_df["发布日期"] = pd.to_datetime(temp_df["发布日期"]).dt.date
temp_df.sort_values(["发布日期"], inplace=True, ignore_index=True)
temp_df["指数代码"] = temp_df["指数代码"].astype(str)
return temp_df
|
https://github.com/akfamily/akshare/blob/087025d8d6f799b30ca114013e82c1ad22dc9294/project25/akshare/index/index_sw.py#L685-L738
| 25 |
[
0,
1,
2,
3,
4,
5,
6,
7,
8
] | 16.666667 |
[
9,
10,
15,
27,
28,
29,
30,
50,
51,
52,
53
] | 20.37037 | false | 7.006369 | 54 | 1 | 79.62963 | 6 |
def index_style_index_hist_sw(symbol: str = "801811") -> pd.DataFrame:
url = "http://www.swsindex.com/downloadfiles.aspx"
params = {
"swindexcode": symbol,
"type": "510",
"columnid": "8890",
}
headers = {
"Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9",
"Accept-Encoding": "gzip, deflate",
"Accept-Language": "en,zh;q=0.9",
"Cache-Control": "no-cache",
"Host": "www.swsindex.com",
"Pragma": "no-cache",
"Proxy-Connection": "keep-alive",
"Referer": "http://www.swsindex.com/idx0110.aspx",
"Upgrade-Insecure-Requests": "1",
"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/104.0.0.0 Safari/537.36",
}
r = requests.get(url, params=params, headers=headers)
r.encoding = "utf-8"
temp_df = pd.read_html(r.text)[0]
temp_df.columns = [
"指数代码",
"指数名称",
"发布日期",
"开盘指数",
"最高指数",
"最低指数",
"收盘指数",
"成交量",
"成交额",
"涨跌幅",
"换手率",
"市盈率",
"市净率",
"均价",
"成交额占比",
"流通市值",
"平均流通市值",
"股息率",
]
temp_df["发布日期"] = pd.to_datetime(temp_df["发布日期"]).dt.date
temp_df.sort_values(["发布日期"], inplace=True, ignore_index=True)
temp_df["指数代码"] = temp_df["指数代码"].astype(str)
return temp_df
| 18,345 |
akfamily/akshare
|
087025d8d6f799b30ca114013e82c1ad22dc9294
|
akshare/index/index_google.py
|
google_index
|
(
symbol: str = "python",
start_date: str = "20191201",
end_date: str = "20191204",
)
|
return search_df
|
谷歌指数
:param symbol: 关键词
:type symbol: str
:param start_date: 开始时间
:type start_date: str
:param end_date: 结束时间
:type end_date: str
:return: 谷歌指数
:rtype: pandas
|
谷歌指数
:param symbol: 关键词
:type symbol: str
:param start_date: 开始时间
:type start_date: str
:param end_date: 结束时间
:type end_date: str
:return: 谷歌指数
:rtype: pandas
| 11 | 41 |
def google_index(
symbol: str = "python",
start_date: str = "20191201",
end_date: str = "20191204",
):
"""
谷歌指数
:param symbol: 关键词
:type symbol: str
:param start_date: 开始时间
:type start_date: str
:param end_date: 结束时间
:type end_date: str
:return: 谷歌指数
:rtype: pandas
"""
start_date = "-".join([start_date[:4], start_date[4:6], start_date[6:]])
end_date = "-".join([end_date[:4], end_date[4:6], end_date[6:]])
pytrends = TrendReq(hl="en-US", tz=360)
kw_list = [symbol]
pytrends.build_payload(
kw_list, cat=0, timeframe=start_date + " " + end_date, geo="", gprop=""
)
search_df = pytrends.interest_over_time()
search_se = search_df[symbol]
search_df = pd.DataFrame(search_se)
search_df.reset_index(inplace=True)
if "T" not in start_date:
search_df['date'] = pd.to_datetime(search_df['date']).dt.date
return search_df
|
https://github.com/akfamily/akshare/blob/087025d8d6f799b30ca114013e82c1ad22dc9294/project25/akshare/index/index_google.py#L11-L41
| 25 |
[
0
] | 3.225806 |
[
17,
18,
19,
20,
21,
24,
25,
26,
27,
28,
29,
30
] | 38.709677 | false | 21.73913 | 31 | 2 | 61.290323 | 9 |
def google_index(
symbol: str = "python",
start_date: str = "20191201",
end_date: str = "20191204",
):
start_date = "-".join([start_date[:4], start_date[4:6], start_date[6:]])
end_date = "-".join([end_date[:4], end_date[4:6], end_date[6:]])
pytrends = TrendReq(hl="en-US", tz=360)
kw_list = [symbol]
pytrends.build_payload(
kw_list, cat=0, timeframe=start_date + " " + end_date, geo="", gprop=""
)
search_df = pytrends.interest_over_time()
search_se = search_df[symbol]
search_df = pd.DataFrame(search_se)
search_df.reset_index(inplace=True)
if "T" not in start_date:
search_df['date'] = pd.to_datetime(search_df['date']).dt.date
return search_df
| 18,346 |
akfamily/akshare
|
087025d8d6f799b30ca114013e82c1ad22dc9294
|
akshare/index/index_zh_em.py
|
index_code_id_map_em
|
()
|
return code_id_dict
|
东方财富-股票和市场代码
http://quote.eastmoney.com/center/gridlist.html#hs_a_board
:return: 股票和市场代码
:rtype: dict
|
东方财富-股票和市场代码
http://quote.eastmoney.com/center/gridlist.html#hs_a_board
:return: 股票和市场代码
:rtype: dict
| 11 | 84 |
def index_code_id_map_em() -> dict:
"""
东方财富-股票和市场代码
http://quote.eastmoney.com/center/gridlist.html#hs_a_board
:return: 股票和市场代码
:rtype: dict
"""
url = "http://80.push2.eastmoney.com/api/qt/clist/get"
params = {
"pn": "1",
"pz": "5000",
"po": "1",
"np": "1",
"ut": "bd1d9ddb04089700cf9c27f6f7426281",
"fltt": "2",
"invt": "2",
"fid": "f3",
"fs": "m:1 t:2,m:1 t:23",
"fields": "f12",
"_": "1623833739532",
}
r = requests.get(url, params=params)
data_json = r.json()
if not data_json["data"]["diff"]:
return dict()
temp_df = pd.DataFrame(data_json["data"]["diff"])
temp_df["market_id"] = 1
temp_df.columns = ["sh_code", "sh_id"]
code_id_dict = dict(zip(temp_df["sh_code"], temp_df["sh_id"]))
params = {
"pn": "1",
"pz": "5000",
"po": "1",
"np": "1",
"ut": "bd1d9ddb04089700cf9c27f6f7426281",
"fltt": "2",
"invt": "2",
"fid": "f3",
"fs": "m:0 t:6,m:0 t:80",
"fields": "f12",
"_": "1623833739532",
}
r = requests.get(url, params=params)
data_json = r.json()
if not data_json["data"]["diff"]:
return dict()
temp_df_sz = pd.DataFrame(data_json["data"]["diff"])
temp_df_sz["sz_id"] = 0
code_id_dict.update(dict(zip(temp_df_sz["f12"], temp_df_sz["sz_id"])))
params = {
"pn": "1",
"pz": "5000",
"po": "1",
"np": "1",
"ut": "bd1d9ddb04089700cf9c27f6f7426281",
"fltt": "2",
"invt": "2",
"fid": "f3",
"fs": "m:0 t:81 s:2048",
"fields": "f12",
"_": "1623833739532",
}
r = requests.get(url, params=params)
data_json = r.json()
if not data_json["data"]["diff"]:
return dict()
temp_df_sz = pd.DataFrame(data_json["data"]["diff"])
temp_df_sz["bj_id"] = 0
code_id_dict.update(dict(zip(temp_df_sz["f12"], temp_df_sz["bj_id"])))
code_id_dict = {
key: value - 1 if value == 1 else value + 1
for key, value in code_id_dict.items()
}
return code_id_dict
|
https://github.com/akfamily/akshare/blob/087025d8d6f799b30ca114013e82c1ad22dc9294/project25/akshare/index/index_zh_em.py#L11-L84
| 25 |
[
0,
1,
2,
3,
4,
5,
6
] | 9.459459 |
[
7,
8,
21,
22,
23,
24,
25,
26,
27,
28,
29,
42,
43,
44,
45,
46,
47,
48,
49,
62,
63,
64,
65,
66,
67,
68,
69,
73
] | 37.837838 | false | 5.384615 | 74 | 4 | 62.162162 | 4 |
def index_code_id_map_em() -> dict:
url = "http://80.push2.eastmoney.com/api/qt/clist/get"
params = {
"pn": "1",
"pz": "5000",
"po": "1",
"np": "1",
"ut": "bd1d9ddb04089700cf9c27f6f7426281",
"fltt": "2",
"invt": "2",
"fid": "f3",
"fs": "m:1 t:2,m:1 t:23",
"fields": "f12",
"_": "1623833739532",
}
r = requests.get(url, params=params)
data_json = r.json()
if not data_json["data"]["diff"]:
return dict()
temp_df = pd.DataFrame(data_json["data"]["diff"])
temp_df["market_id"] = 1
temp_df.columns = ["sh_code", "sh_id"]
code_id_dict = dict(zip(temp_df["sh_code"], temp_df["sh_id"]))
params = {
"pn": "1",
"pz": "5000",
"po": "1",
"np": "1",
"ut": "bd1d9ddb04089700cf9c27f6f7426281",
"fltt": "2",
"invt": "2",
"fid": "f3",
"fs": "m:0 t:6,m:0 t:80",
"fields": "f12",
"_": "1623833739532",
}
r = requests.get(url, params=params)
data_json = r.json()
if not data_json["data"]["diff"]:
return dict()
temp_df_sz = pd.DataFrame(data_json["data"]["diff"])
temp_df_sz["sz_id"] = 0
code_id_dict.update(dict(zip(temp_df_sz["f12"], temp_df_sz["sz_id"])))
params = {
"pn": "1",
"pz": "5000",
"po": "1",
"np": "1",
"ut": "bd1d9ddb04089700cf9c27f6f7426281",
"fltt": "2",
"invt": "2",
"fid": "f3",
"fs": "m:0 t:81 s:2048",
"fields": "f12",
"_": "1623833739532",
}
r = requests.get(url, params=params)
data_json = r.json()
if not data_json["data"]["diff"]:
return dict()
temp_df_sz = pd.DataFrame(data_json["data"]["diff"])
temp_df_sz["bj_id"] = 0
code_id_dict.update(dict(zip(temp_df_sz["f12"], temp_df_sz["bj_id"])))
code_id_dict = {
key: value - 1 if value == 1 else value + 1
for key, value in code_id_dict.items()
}
return code_id_dict
| 18,347 |
akfamily/akshare
|
087025d8d6f799b30ca114013e82c1ad22dc9294
|
akshare/index/index_zh_em.py
|
index_zh_a_hist
|
(
symbol: str = "000859",
period: str = "daily",
start_date: str = "19700101",
end_date: str = "22220101",
)
|
return temp_df
|
东方财富网-中国股票指数-行情数据
http://quote.eastmoney.com/concept/sh603777.html?from=classic
:param symbol: 指数代码
:type symbol: str
:param period: choice of {'daily', 'weekly', 'monthly'}
:type period: str
:param start_date: 开始日期
:type start_date: str
:param end_date: 结束日期
:type end_date: str
:return: 行情数据
:rtype: pandas.DataFrame
|
东方财富网-中国股票指数-行情数据
http://quote.eastmoney.com/concept/sh603777.html?from=classic
:param symbol: 指数代码
:type symbol: str
:param period: choice of {'daily', 'weekly', 'monthly'}
:type period: str
:param start_date: 开始日期
:type start_date: str
:param end_date: 结束日期
:type end_date: str
:return: 行情数据
:rtype: pandas.DataFrame
| 87 | 212 |
def index_zh_a_hist(
symbol: str = "000859",
period: str = "daily",
start_date: str = "19700101",
end_date: str = "22220101",
) -> pd.DataFrame:
"""
东方财富网-中国股票指数-行情数据
http://quote.eastmoney.com/concept/sh603777.html?from=classic
:param symbol: 指数代码
:type symbol: str
:param period: choice of {'daily', 'weekly', 'monthly'}
:type period: str
:param start_date: 开始日期
:type start_date: str
:param end_date: 结束日期
:type end_date: str
:return: 行情数据
:rtype: pandas.DataFrame
"""
code_id_dict = index_code_id_map_em()
period_dict = {"daily": "101", "weekly": "102", "monthly": "103"}
url = "http://push2his.eastmoney.com/api/qt/stock/kline/get"
try:
params = {
"secid": f"{code_id_dict[symbol]}.{symbol}",
"ut": "7eea3edcaed734bea9cbfc24409ed989",
"fields1": "f1,f2,f3,f4,f5,f6",
"fields2": "f51,f52,f53,f54,f55,f56,f57,f58,f59,f60,f61",
"klt": period_dict[period],
"fqt": "0",
"beg": "0",
"end": "20500000",
"_": "1623766962675",
}
except KeyError:
params = {
"secid": f"1.{symbol}",
"ut": "7eea3edcaed734bea9cbfc24409ed989",
"fields1": "f1,f2,f3,f4,f5,f6",
"fields2": "f51,f52,f53,f54,f55,f56,f57,f58,f59,f60,f61",
"klt": period_dict[period],
"fqt": "0",
"beg": "0",
"end": "20500000",
"_": "1623766962675",
}
r = requests.get(url, params=params)
data_json = r.json()
if data_json["data"] is None:
params = {
"secid": f"0.{symbol}",
"ut": "7eea3edcaed734bea9cbfc24409ed989",
"fields1": "f1,f2,f3,f4,f5,f6",
"fields2": "f51,f52,f53,f54,f55,f56,f57,f58,f59,f60,f61",
"klt": period_dict[period],
"fqt": "0",
"beg": "0",
"end": "20500000",
"_": "1623766962675",
}
r = requests.get(url, params=params)
data_json = r.json()
if data_json["data"] is None:
params = {
"secid": f"2.{symbol}",
"ut": "7eea3edcaed734bea9cbfc24409ed989",
"fields1": "f1,f2,f3,f4,f5,f6",
"fields2": "f51,f52,f53,f54,f55,f56,f57,f58,f59,f60,f61",
"klt": period_dict[period],
"fqt": "0",
"beg": "0",
"end": "20500000",
"_": "1623766962675",
}
r = requests.get(url, params=params)
data_json = r.json()
try:
temp_df = pd.DataFrame(
[item.split(",") for item in data_json["data"]["klines"]]
)
except:
# 兼容 000859(中证国企一路一带) 和 000861(中证央企创新)
params = {
"secid": f"2.{symbol}",
"ut": "7eea3edcaed734bea9cbfc24409ed989",
"fields1": "f1,f2,f3,f4,f5,f6",
"fields2": "f51,f52,f53,f54,f55,f56,f57,f58,f59,f60,f61",
"klt": period_dict[period],
"fqt": "0",
"beg": "0",
"end": "20500000",
"_": "1623766962675",
}
r = requests.get(url, params=params)
data_json = r.json()
temp_df = pd.DataFrame(
[item.split(",") for item in data_json["data"]["klines"]]
)
temp_df.columns = [
"日期",
"开盘",
"收盘",
"最高",
"最低",
"成交量",
"成交额",
"振幅",
"涨跌幅",
"涨跌额",
"换手率",
]
temp_df.index = pd.to_datetime(temp_df["日期"])
temp_df = temp_df[start_date:end_date]
temp_df.reset_index(inplace=True, drop=True)
temp_df["开盘"] = pd.to_numeric(temp_df["开盘"])
temp_df["收盘"] = pd.to_numeric(temp_df["收盘"])
temp_df["最高"] = pd.to_numeric(temp_df["最高"])
temp_df["最低"] = pd.to_numeric(temp_df["最低"])
temp_df["成交量"] = pd.to_numeric(temp_df["成交量"])
temp_df["成交额"] = pd.to_numeric(temp_df["成交额"])
temp_df["振幅"] = pd.to_numeric(temp_df["振幅"])
temp_df["涨跌幅"] = pd.to_numeric(temp_df["涨跌幅"])
temp_df["涨跌额"] = pd.to_numeric(temp_df["涨跌额"])
temp_df["换手率"] = pd.to_numeric(temp_df["换手率"])
return temp_df
|
https://github.com/akfamily/akshare/blob/087025d8d6f799b30ca114013e82c1ad22dc9294/project25/akshare/index/index_zh_em.py#L87-L212
| 25 |
[
0
] | 0.793651 |
[
20,
21,
22,
23,
24,
35,
36,
47,
48,
49,
50,
61,
62,
63,
64,
75,
76,
77,
78,
81,
83,
94,
95,
96,
99,
112,
113,
114,
115,
116,
117,
118,
119,
120,
121,
122,
123,
124,
125
] | 30.952381 | false | 5.384615 | 126 | 7 | 69.047619 | 12 |
def index_zh_a_hist(
symbol: str = "000859",
period: str = "daily",
start_date: str = "19700101",
end_date: str = "22220101",
) -> pd.DataFrame:
code_id_dict = index_code_id_map_em()
period_dict = {"daily": "101", "weekly": "102", "monthly": "103"}
url = "http://push2his.eastmoney.com/api/qt/stock/kline/get"
try:
params = {
"secid": f"{code_id_dict[symbol]}.{symbol}",
"ut": "7eea3edcaed734bea9cbfc24409ed989",
"fields1": "f1,f2,f3,f4,f5,f6",
"fields2": "f51,f52,f53,f54,f55,f56,f57,f58,f59,f60,f61",
"klt": period_dict[period],
"fqt": "0",
"beg": "0",
"end": "20500000",
"_": "1623766962675",
}
except KeyError:
params = {
"secid": f"1.{symbol}",
"ut": "7eea3edcaed734bea9cbfc24409ed989",
"fields1": "f1,f2,f3,f4,f5,f6",
"fields2": "f51,f52,f53,f54,f55,f56,f57,f58,f59,f60,f61",
"klt": period_dict[period],
"fqt": "0",
"beg": "0",
"end": "20500000",
"_": "1623766962675",
}
r = requests.get(url, params=params)
data_json = r.json()
if data_json["data"] is None:
params = {
"secid": f"0.{symbol}",
"ut": "7eea3edcaed734bea9cbfc24409ed989",
"fields1": "f1,f2,f3,f4,f5,f6",
"fields2": "f51,f52,f53,f54,f55,f56,f57,f58,f59,f60,f61",
"klt": period_dict[period],
"fqt": "0",
"beg": "0",
"end": "20500000",
"_": "1623766962675",
}
r = requests.get(url, params=params)
data_json = r.json()
if data_json["data"] is None:
params = {
"secid": f"2.{symbol}",
"ut": "7eea3edcaed734bea9cbfc24409ed989",
"fields1": "f1,f2,f3,f4,f5,f6",
"fields2": "f51,f52,f53,f54,f55,f56,f57,f58,f59,f60,f61",
"klt": period_dict[period],
"fqt": "0",
"beg": "0",
"end": "20500000",
"_": "1623766962675",
}
r = requests.get(url, params=params)
data_json = r.json()
try:
temp_df = pd.DataFrame(
[item.split(",") for item in data_json["data"]["klines"]]
)
except:
# 兼容 000859(中证国企一路一带) 和 000861(中证央企创新)
params = {
"secid": f"2.{symbol}",
"ut": "7eea3edcaed734bea9cbfc24409ed989",
"fields1": "f1,f2,f3,f4,f5,f6",
"fields2": "f51,f52,f53,f54,f55,f56,f57,f58,f59,f60,f61",
"klt": period_dict[period],
"fqt": "0",
"beg": "0",
"end": "20500000",
"_": "1623766962675",
}
r = requests.get(url, params=params)
data_json = r.json()
temp_df = pd.DataFrame(
[item.split(",") for item in data_json["data"]["klines"]]
)
temp_df.columns = [
"日期",
"开盘",
"收盘",
"最高",
"最低",
"成交量",
"成交额",
"振幅",
"涨跌幅",
"涨跌额",
"换手率",
]
temp_df.index = pd.to_datetime(temp_df["日期"])
temp_df = temp_df[start_date:end_date]
temp_df.reset_index(inplace=True, drop=True)
temp_df["开盘"] = pd.to_numeric(temp_df["开盘"])
temp_df["收盘"] = pd.to_numeric(temp_df["收盘"])
temp_df["最高"] = pd.to_numeric(temp_df["最高"])
temp_df["最低"] = pd.to_numeric(temp_df["最低"])
temp_df["成交量"] = pd.to_numeric(temp_df["成交量"])
temp_df["成交额"] = pd.to_numeric(temp_df["成交额"])
temp_df["振幅"] = pd.to_numeric(temp_df["振幅"])
temp_df["涨跌幅"] = pd.to_numeric(temp_df["涨跌幅"])
temp_df["涨跌额"] = pd.to_numeric(temp_df["涨跌额"])
temp_df["换手率"] = pd.to_numeric(temp_df["换手率"])
return temp_df
| 18,348 |
akfamily/akshare
|
087025d8d6f799b30ca114013e82c1ad22dc9294
|
akshare/index/index_zh_em.py
|
index_zh_a_hist_min_em
|
(
symbol: str = "399006",
period: str = "1",
start_date: str = "1979-09-01 09:32:00",
end_date: str = "2222-01-01 09:32:00",
)
|
东方财富网-指数数据-每日分时行情
http://quote.eastmoney.com/concept/sh603777.html?from=classic
:param symbol: 指数代码
:type symbol: str
:param period: choice of {'1', '5', '15', '30', '60'}
:type period: str
:param start_date: 开始日期
:type start_date: str
:param end_date: 结束日期
:type end_date: str
:return: 每日分时行情
:rtype: pandas.DataFrame
|
东方财富网-指数数据-每日分时行情
http://quote.eastmoney.com/concept/sh603777.html?from=classic
:param symbol: 指数代码
:type symbol: str
:param period: choice of {'1', '5', '15', '30', '60'}
:type period: str
:param start_date: 开始日期
:type start_date: str
:param end_date: 结束日期
:type end_date: str
:return: 每日分时行情
:rtype: pandas.DataFrame
| 215 | 370 |
def index_zh_a_hist_min_em(
symbol: str = "399006",
period: str = "1",
start_date: str = "1979-09-01 09:32:00",
end_date: str = "2222-01-01 09:32:00",
) -> pd.DataFrame:
"""
东方财富网-指数数据-每日分时行情
http://quote.eastmoney.com/concept/sh603777.html?from=classic
:param symbol: 指数代码
:type symbol: str
:param period: choice of {'1', '5', '15', '30', '60'}
:type period: str
:param start_date: 开始日期
:type start_date: str
:param end_date: 结束日期
:type end_date: str
:return: 每日分时行情
:rtype: pandas.DataFrame
"""
code_id_dict = index_code_id_map_em()
if period == "1":
url = "http://push2his.eastmoney.com/api/qt/stock/trends2/get"
try:
params = {
"fields1": "f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13",
"fields2": "f51,f52,f53,f54,f55,f56,f57,f58",
"ut": "fa5fd1943c7b386f172d6893dbfba10b",
"iscr": "0",
"ndays": "5",
"secid": f"{code_id_dict[symbol]}.{symbol}",
"_": "1623766962675",
}
except KeyError:
params = {
"fields1": "f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13",
"fields2": "f51,f52,f53,f54,f55,f56,f57,f58",
"ut": "fa5fd1943c7b386f172d6893dbfba10b",
"iscr": "0",
"ndays": "5",
"secid": f"1.{symbol}",
"_": "1623766962675",
}
r = requests.get(url, params=params)
data_json = r.json()
if data_json["data"] is None:
params = {
"fields1": "f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13",
"fields2": "f51,f52,f53,f54,f55,f56,f57,f58",
"ut": "fa5fd1943c7b386f172d6893dbfba10b",
"iscr": "0",
"ndays": "5",
"secid": f"0.{symbol}",
"_": "1623766962675",
}
r = requests.get(url, params=params)
data_json = r.json()
temp_df = pd.DataFrame(
[item.split(",") for item in data_json["data"]["trends"]]
)
temp_df.columns = [
"时间",
"开盘",
"收盘",
"最高",
"最低",
"成交量",
"成交额",
"最新价",
]
temp_df.index = pd.to_datetime(temp_df["时间"])
temp_df = temp_df[start_date:end_date]
temp_df.reset_index(drop=True, inplace=True)
temp_df["开盘"] = pd.to_numeric(temp_df["开盘"])
temp_df["收盘"] = pd.to_numeric(temp_df["收盘"])
temp_df["最高"] = pd.to_numeric(temp_df["最高"])
temp_df["最低"] = pd.to_numeric(temp_df["最低"])
temp_df["成交量"] = pd.to_numeric(temp_df["成交量"])
temp_df["成交额"] = pd.to_numeric(temp_df["成交额"])
temp_df["最新价"] = pd.to_numeric(temp_df["最新价"])
temp_df["时间"] = pd.to_datetime(temp_df["时间"]).astype(str)
return temp_df
else:
url = "http://push2his.eastmoney.com/api/qt/stock/kline/get"
try:
params = {
"secid": f"{code_id_dict[symbol]}.{symbol}",
"ut": "7eea3edcaed734bea9cbfc24409ed989",
"fields1": "f1,f2,f3,f4,f5,f6",
"fields2": "f51,f52,f53,f54,f55,f56,f57,f58,f59,f60,f61",
"klt": period,
"fqt": "1",
"beg": "0",
"end": "20500000",
"_": "1630930917857",
}
except:
params = {
"secid": f"0.{symbol}",
"ut": "7eea3edcaed734bea9cbfc24409ed989",
"fields1": "f1,f2,f3,f4,f5,f6",
"fields2": "f51,f52,f53,f54,f55,f56,f57,f58,f59,f60,f61",
"klt": period,
"fqt": "1",
"beg": "0",
"end": "20500000",
"_": "1630930917857",
}
r = requests.get(url, params=params)
data_json = r.json()
temp_df = pd.DataFrame(
[item.split(",") for item in data_json["data"]["klines"]]
)
temp_df.columns = [
"时间",
"开盘",
"收盘",
"最高",
"最低",
"成交量",
"成交额",
"振幅",
"涨跌幅",
"涨跌额",
"换手率",
]
temp_df.index = pd.to_datetime(temp_df["时间"])
temp_df = temp_df[start_date:end_date]
temp_df.reset_index(drop=True, inplace=True)
temp_df["开盘"] = pd.to_numeric(temp_df["开盘"])
temp_df["收盘"] = pd.to_numeric(temp_df["收盘"])
temp_df["最高"] = pd.to_numeric(temp_df["最高"])
temp_df["最低"] = pd.to_numeric(temp_df["最低"])
temp_df["成交量"] = pd.to_numeric(temp_df["成交量"])
temp_df["成交额"] = pd.to_numeric(temp_df["成交额"])
temp_df["振幅"] = pd.to_numeric(temp_df["振幅"])
temp_df["涨跌幅"] = pd.to_numeric(temp_df["涨跌幅"])
temp_df["涨跌额"] = pd.to_numeric(temp_df["涨跌额"])
temp_df["换手率"] = pd.to_numeric(temp_df["换手率"])
temp_df["时间"] = pd.to_datetime(temp_df["时间"]).astype(str)
temp_df = temp_df[
[
"时间",
"开盘",
"收盘",
"最高",
"最低",
"涨跌幅",
"涨跌额",
"成交量",
"成交额",
"振幅",
"换手率",
]
]
return temp_df
|
https://github.com/akfamily/akshare/blob/087025d8d6f799b30ca114013e82c1ad22dc9294/project25/akshare/index/index_zh_em.py#L215-L370
| 25 |
[
0
] | 0.641026 |
[
20,
21,
22,
23,
24,
33,
34,
43,
44,
45,
46,
55,
56,
57,
60,
70,
71,
72,
73,
74,
75,
76,
77,
78,
79,
80,
81,
83,
84,
85,
96,
97,
108,
109,
110,
113,
126,
127,
128,
129,
130,
131,
132,
133,
134,
135,
136,
137,
138,
139,
140,
155
] | 33.333333 | false | 5.384615 | 156 | 7 | 66.666667 | 12 |
def index_zh_a_hist_min_em(
symbol: str = "399006",
period: str = "1",
start_date: str = "1979-09-01 09:32:00",
end_date: str = "2222-01-01 09:32:00",
) -> pd.DataFrame:
code_id_dict = index_code_id_map_em()
if period == "1":
url = "http://push2his.eastmoney.com/api/qt/stock/trends2/get"
try:
params = {
"fields1": "f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13",
"fields2": "f51,f52,f53,f54,f55,f56,f57,f58",
"ut": "fa5fd1943c7b386f172d6893dbfba10b",
"iscr": "0",
"ndays": "5",
"secid": f"{code_id_dict[symbol]}.{symbol}",
"_": "1623766962675",
}
except KeyError:
params = {
"fields1": "f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13",
"fields2": "f51,f52,f53,f54,f55,f56,f57,f58",
"ut": "fa5fd1943c7b386f172d6893dbfba10b",
"iscr": "0",
"ndays": "5",
"secid": f"1.{symbol}",
"_": "1623766962675",
}
r = requests.get(url, params=params)
data_json = r.json()
if data_json["data"] is None:
params = {
"fields1": "f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13",
"fields2": "f51,f52,f53,f54,f55,f56,f57,f58",
"ut": "fa5fd1943c7b386f172d6893dbfba10b",
"iscr": "0",
"ndays": "5",
"secid": f"0.{symbol}",
"_": "1623766962675",
}
r = requests.get(url, params=params)
data_json = r.json()
temp_df = pd.DataFrame(
[item.split(",") for item in data_json["data"]["trends"]]
)
temp_df.columns = [
"时间",
"开盘",
"收盘",
"最高",
"最低",
"成交量",
"成交额",
"最新价",
]
temp_df.index = pd.to_datetime(temp_df["时间"])
temp_df = temp_df[start_date:end_date]
temp_df.reset_index(drop=True, inplace=True)
temp_df["开盘"] = pd.to_numeric(temp_df["开盘"])
temp_df["收盘"] = pd.to_numeric(temp_df["收盘"])
temp_df["最高"] = pd.to_numeric(temp_df["最高"])
temp_df["最低"] = pd.to_numeric(temp_df["最低"])
temp_df["成交量"] = pd.to_numeric(temp_df["成交量"])
temp_df["成交额"] = pd.to_numeric(temp_df["成交额"])
temp_df["最新价"] = pd.to_numeric(temp_df["最新价"])
temp_df["时间"] = pd.to_datetime(temp_df["时间"]).astype(str)
return temp_df
else:
url = "http://push2his.eastmoney.com/api/qt/stock/kline/get"
try:
params = {
"secid": f"{code_id_dict[symbol]}.{symbol}",
"ut": "7eea3edcaed734bea9cbfc24409ed989",
"fields1": "f1,f2,f3,f4,f5,f6",
"fields2": "f51,f52,f53,f54,f55,f56,f57,f58,f59,f60,f61",
"klt": period,
"fqt": "1",
"beg": "0",
"end": "20500000",
"_": "1630930917857",
}
except:
params = {
"secid": f"0.{symbol}",
"ut": "7eea3edcaed734bea9cbfc24409ed989",
"fields1": "f1,f2,f3,f4,f5,f6",
"fields2": "f51,f52,f53,f54,f55,f56,f57,f58,f59,f60,f61",
"klt": period,
"fqt": "1",
"beg": "0",
"end": "20500000",
"_": "1630930917857",
}
r = requests.get(url, params=params)
data_json = r.json()
temp_df = pd.DataFrame(
[item.split(",") for item in data_json["data"]["klines"]]
)
temp_df.columns = [
"时间",
"开盘",
"收盘",
"最高",
"最低",
"成交量",
"成交额",
"振幅",
"涨跌幅",
"涨跌额",
"换手率",
]
temp_df.index = pd.to_datetime(temp_df["时间"])
temp_df = temp_df[start_date:end_date]
temp_df.reset_index(drop=True, inplace=True)
temp_df["开盘"] = pd.to_numeric(temp_df["开盘"])
temp_df["收盘"] = pd.to_numeric(temp_df["收盘"])
temp_df["最高"] = pd.to_numeric(temp_df["最高"])
temp_df["最低"] = pd.to_numeric(temp_df["最低"])
temp_df["成交量"] = pd.to_numeric(temp_df["成交量"])
temp_df["成交额"] = pd.to_numeric(temp_df["成交额"])
temp_df["振幅"] = pd.to_numeric(temp_df["振幅"])
temp_df["涨跌幅"] = pd.to_numeric(temp_df["涨跌幅"])
temp_df["涨跌额"] = pd.to_numeric(temp_df["涨跌额"])
temp_df["换手率"] = pd.to_numeric(temp_df["换手率"])
temp_df["时间"] = pd.to_datetime(temp_df["时间"]).astype(str)
temp_df = temp_df[
[
"时间",
"开盘",
"收盘",
"最高",
"最低",
"涨跌幅",
"涨跌额",
"成交量",
"成交额",
"振幅",
"换手率",
]
]
return temp_df
| 18,349 |
|
akfamily/akshare
|
087025d8d6f799b30ca114013e82c1ad22dc9294
|
akshare/index/index_cni.py
|
index_all_cni
|
()
|
return temp_df
|
国证指数-最近交易日的所有指数
http://www.cnindex.com.cn/zh_indices/sese/index.html?act_menu=1&index_type=-1
:return: 国证指数-所有指数
:rtype: pandas.DataFrame
|
国证指数-最近交易日的所有指数
http://www.cnindex.com.cn/zh_indices/sese/index.html?act_menu=1&index_type=-1
:return: 国证指数-所有指数
:rtype: pandas.DataFrame
| 14 | 74 |
def index_all_cni() -> pd.DataFrame:
"""
国证指数-最近交易日的所有指数
http://www.cnindex.com.cn/zh_indices/sese/index.html?act_menu=1&index_type=-1
:return: 国证指数-所有指数
:rtype: pandas.DataFrame
"""
url = "http://www.cnindex.com.cn/index/indexList"
params = {
"channelCode": "-1",
"rows": "2000",
"pageNum": "1",
}
r = requests.get(url, params=params)
data_json = r.json()
temp_df = pd.DataFrame(data_json["data"]["rows"])
temp_df.columns = [
"_",
"_",
"指数代码",
"_",
"_",
"_",
"_",
"_",
"指数简称",
"_",
"_",
"_",
"样本数",
"收盘点位",
"涨跌幅",
"_",
"PE滚动",
"_",
"成交量",
"成交额",
"总市值",
"自由流通市值",
"_",
"_",
]
temp_df = temp_df[
[
"指数代码",
"指数简称",
"样本数",
"收盘点位",
"涨跌幅",
"PE滚动",
"成交量",
"成交额",
"总市值",
"自由流通市值",
]
]
temp_df['成交量'] = temp_df['成交量'] / 100000
temp_df['成交额'] = temp_df['成交额'] / 100000000
temp_df['总市值'] = temp_df['总市值'] / 100000000
temp_df['自由流通市值'] = temp_df['自由流通市值'] / 100000000
return temp_df
|
https://github.com/akfamily/akshare/blob/087025d8d6f799b30ca114013e82c1ad22dc9294/project25/akshare/index/index_cni.py#L14-L74
| 25 |
[
0,
1,
2,
3,
4,
5,
6
] | 11.47541 |
[
7,
8,
13,
14,
15,
16,
42,
56,
57,
58,
59,
60
] | 19.672131 | false | 12.5 | 61 | 1 | 80.327869 | 4 |
def index_all_cni() -> pd.DataFrame:
url = "http://www.cnindex.com.cn/index/indexList"
params = {
"channelCode": "-1",
"rows": "2000",
"pageNum": "1",
}
r = requests.get(url, params=params)
data_json = r.json()
temp_df = pd.DataFrame(data_json["data"]["rows"])
temp_df.columns = [
"_",
"_",
"指数代码",
"_",
"_",
"_",
"_",
"_",
"指数简称",
"_",
"_",
"_",
"样本数",
"收盘点位",
"涨跌幅",
"_",
"PE滚动",
"_",
"成交量",
"成交额",
"总市值",
"自由流通市值",
"_",
"_",
]
temp_df = temp_df[
[
"指数代码",
"指数简称",
"样本数",
"收盘点位",
"涨跌幅",
"PE滚动",
"成交量",
"成交额",
"总市值",
"自由流通市值",
]
]
temp_df['成交量'] = temp_df['成交量'] / 100000
temp_df['成交额'] = temp_df['成交额'] / 100000000
temp_df['总市值'] = temp_df['总市值'] / 100000000
temp_df['自由流通市值'] = temp_df['自由流通市值'] / 100000000
return temp_df
| 18,350 |
akfamily/akshare
|
087025d8d6f799b30ca114013e82c1ad22dc9294
|
akshare/index/index_cni.py
|
index_hist_cni
|
(symbol: str = "399001")
|
return temp_df
|
指数历史行情数据
http://www.cnindex.com.cn/module/index-detail.html?act_menu=1&indexCode=399001
:param symbol: 指数代码
:type symbol: str
:return: 指数历史行情数据
:rtype: pandas.DataFrame
|
指数历史行情数据
http://www.cnindex.com.cn/module/index-detail.html?act_menu=1&indexCode=399001
:param symbol: 指数代码
:type symbol: str
:return: 指数历史行情数据
:rtype: pandas.DataFrame
| 77 | 124 |
def index_hist_cni(symbol: str = "399001") -> pd.DataFrame:
"""
指数历史行情数据
http://www.cnindex.com.cn/module/index-detail.html?act_menu=1&indexCode=399001
:param symbol: 指数代码
:type symbol: str
:return: 指数历史行情数据
:rtype: pandas.DataFrame
"""
url = "http://hq.cnindex.com.cn/market/market/getIndexDailyDataWithDataFormat"
params = {
"indexCode": symbol,
"startDate": "",
"endDate": "",
"frequency": "day",
}
r = requests.get(url, params=params)
data_json = r.json()
temp_df = pd.DataFrame(data_json["data"]["data"])
temp_df.columns = [
"日期",
"_",
"最高价",
"开盘价",
"最低价",
"收盘价",
"_",
"涨跌幅",
"成交额",
"成交量",
"_",
]
temp_df = temp_df[
[
"日期",
"开盘价",
"最高价",
"最低价",
"收盘价",
"涨跌幅",
"成交量",
"成交额",
]
]
temp_df["涨跌幅"] = temp_df["涨跌幅"].str.replace("%", "")
temp_df["涨跌幅"] = temp_df["涨跌幅"].astype("float")
temp_df["涨跌幅"] = temp_df["涨跌幅"] / 100
return temp_df
|
https://github.com/akfamily/akshare/blob/087025d8d6f799b30ca114013e82c1ad22dc9294/project25/akshare/index/index_cni.py#L77-L124
| 25 |
[
0,
1,
2,
3,
4,
5,
6,
7,
8
] | 18.75 |
[
9,
10,
16,
17,
18,
19,
32,
44,
45,
46,
47
] | 22.916667 | false | 12.5 | 48 | 1 | 77.083333 | 6 |
def index_hist_cni(symbol: str = "399001") -> pd.DataFrame:
url = "http://hq.cnindex.com.cn/market/market/getIndexDailyDataWithDataFormat"
params = {
"indexCode": symbol,
"startDate": "",
"endDate": "",
"frequency": "day",
}
r = requests.get(url, params=params)
data_json = r.json()
temp_df = pd.DataFrame(data_json["data"]["data"])
temp_df.columns = [
"日期",
"_",
"最高价",
"开盘价",
"最低价",
"收盘价",
"_",
"涨跌幅",
"成交额",
"成交量",
"_",
]
temp_df = temp_df[
[
"日期",
"开盘价",
"最高价",
"最低价",
"收盘价",
"涨跌幅",
"成交量",
"成交额",
]
]
temp_df["涨跌幅"] = temp_df["涨跌幅"].str.replace("%", "")
temp_df["涨跌幅"] = temp_df["涨跌幅"].astype("float")
temp_df["涨跌幅"] = temp_df["涨跌幅"] / 100
return temp_df
| 18,351 |
akfamily/akshare
|
087025d8d6f799b30ca114013e82c1ad22dc9294
|
akshare/index/index_cni.py
|
index_detail_cni
|
(symbol: str = '399005', date: str = '202011')
|
return temp_df
|
国证指数-样本详情-指定日期的样本成份
http://www.cnindex.com.cn/module/index-detail.html?act_menu=1&indexCode=399001
:param symbol: 指数代码
:type symbol: str
:param date: 指定月份
:type date: str
:return: 指定日期的样本成份
:rtype: pandas.DataFrame
|
国证指数-样本详情-指定日期的样本成份
http://www.cnindex.com.cn/module/index-detail.html?act_menu=1&indexCode=399001
:param symbol: 指数代码
:type symbol: str
:param date: 指定月份
:type date: str
:return: 指定日期的样本成份
:rtype: pandas.DataFrame
| 127 | 158 |
def index_detail_cni(symbol: str = '399005', date: str = '202011') -> pd.DataFrame:
"""
国证指数-样本详情-指定日期的样本成份
http://www.cnindex.com.cn/module/index-detail.html?act_menu=1&indexCode=399001
:param symbol: 指数代码
:type symbol: str
:param date: 指定月份
:type date: str
:return: 指定日期的样本成份
:rtype: pandas.DataFrame
"""
url = 'http://www.cnindex.com.cn/sample-detail/download'
params = {
'indexcode': symbol,
'dateStr': '-'.join([date[:4], date[4:]])
}
r = requests.get(url, params=params)
temp_df = pd.read_excel(r.content)
temp_df['样本代码'] = temp_df['样本代码'].astype(str).str.zfill(6)
temp_df.columns = [
'日期',
'样本代码',
'样本简称',
'所属行业',
'自由流通市值',
'总市值',
'权重',
]
temp_df['自由流通市值'] = pd.to_numeric(temp_df['自由流通市值'])
temp_df['总市值'] = pd.to_numeric(temp_df['总市值'])
temp_df['权重'] = pd.to_numeric(temp_df['权重'])
return temp_df
|
https://github.com/akfamily/akshare/blob/087025d8d6f799b30ca114013e82c1ad22dc9294/project25/akshare/index/index_cni.py#L127-L158
| 25 |
[
0,
1,
2,
3,
4,
5,
6,
7,
8,
9,
10
] | 34.375 |
[
11,
12,
16,
17,
18,
19,
28,
29,
30,
31
] | 31.25 | false | 12.5 | 32 | 1 | 68.75 | 8 |
def index_detail_cni(symbol: str = '399005', date: str = '202011') -> pd.DataFrame:
url = 'http://www.cnindex.com.cn/sample-detail/download'
params = {
'indexcode': symbol,
'dateStr': '-'.join([date[:4], date[4:]])
}
r = requests.get(url, params=params)
temp_df = pd.read_excel(r.content)
temp_df['样本代码'] = temp_df['样本代码'].astype(str).str.zfill(6)
temp_df.columns = [
'日期',
'样本代码',
'样本简称',
'所属行业',
'自由流通市值',
'总市值',
'权重',
]
temp_df['自由流通市值'] = pd.to_numeric(temp_df['自由流通市值'])
temp_df['总市值'] = pd.to_numeric(temp_df['总市值'])
temp_df['权重'] = pd.to_numeric(temp_df['权重'])
return temp_df
| 18,352 |
akfamily/akshare
|
087025d8d6f799b30ca114013e82c1ad22dc9294
|
akshare/index/index_cni.py
|
index_detail_hist_cni
|
(symbol: str = '399001', date: str = "")
|
return temp_df
|
国证指数-样本详情-历史样本
http://www.cnindex.com.cn/module/index-detail.html?act_menu=1&indexCode=399001
:param date: 指数代码
:type date: str
:param symbol: 指数代码
:type symbol: str
:return: 历史样本
:rtype: pandas.DataFrame
|
国证指数-样本详情-历史样本
http://www.cnindex.com.cn/module/index-detail.html?act_menu=1&indexCode=399001
:param date: 指数代码
:type date: str
:param symbol: 指数代码
:type symbol: str
:return: 历史样本
:rtype: pandas.DataFrame
| 161 | 225 |
def index_detail_hist_cni(symbol: str = '399001', date: str = "") -> pd.DataFrame:
"""
国证指数-样本详情-历史样本
http://www.cnindex.com.cn/module/index-detail.html?act_menu=1&indexCode=399001
:param date: 指数代码
:type date: str
:param symbol: 指数代码
:type symbol: str
:return: 历史样本
:rtype: pandas.DataFrame
"""
if date:
url = 'http://www.cnindex.com.cn/sample-detail/detail'
params = {
'indexcode': symbol,
'dateStr': '-'.join([date[:4], date[4:]]),
'pageNum': '1',
'rows': '50000',
}
r = requests.get(url, params=params)
data_json = r.json()
temp_df = pd.DataFrame(data_json['data']['rows'])
temp_df.columns = [
'-',
'-',
'日期',
'样本代码',
'样本简称',
'所属行业',
'-',
'自由流通市值',
'总市值',
'权重',
'-',
]
temp_df = temp_df[[
'日期',
'样本代码',
'样本简称',
'所属行业',
'自由流通市值',
'总市值',
'权重',
]]
else:
url = 'http://www.cnindex.com.cn/sample-detail/download-history'
params = {
'indexcode': symbol
}
r = requests.get(url, params=params)
temp_df = pd.read_excel(r.content)
temp_df['样本代码'] = temp_df['样本代码'].astype(str).str.zfill(6)
temp_df.columns = [
'日期',
'样本代码',
'样本简称',
'所属行业',
'自由流通市值',
'总市值',
'权重',
]
temp_df['自由流通市值'] = pd.to_numeric(temp_df['自由流通市值'])
temp_df['总市值'] = pd.to_numeric(temp_df['总市值'])
temp_df['权重'] = pd.to_numeric(temp_df['权重'])
return temp_df
|
https://github.com/akfamily/akshare/blob/087025d8d6f799b30ca114013e82c1ad22dc9294/project25/akshare/index/index_cni.py#L161-L225
| 25 |
[
0,
1,
2,
3,
4,
5,
6,
7,
8,
9,
10
] | 16.923077 |
[
11,
12,
13,
19,
20,
21,
22,
35,
45,
46,
49,
50,
51,
52,
61,
62,
63,
64
] | 27.692308 | false | 12.5 | 65 | 2 | 72.307692 | 8 |
def index_detail_hist_cni(symbol: str = '399001', date: str = "") -> pd.DataFrame:
if date:
url = 'http://www.cnindex.com.cn/sample-detail/detail'
params = {
'indexcode': symbol,
'dateStr': '-'.join([date[:4], date[4:]]),
'pageNum': '1',
'rows': '50000',
}
r = requests.get(url, params=params)
data_json = r.json()
temp_df = pd.DataFrame(data_json['data']['rows'])
temp_df.columns = [
'-',
'-',
'日期',
'样本代码',
'样本简称',
'所属行业',
'-',
'自由流通市值',
'总市值',
'权重',
'-',
]
temp_df = temp_df[[
'日期',
'样本代码',
'样本简称',
'所属行业',
'自由流通市值',
'总市值',
'权重',
]]
else:
url = 'http://www.cnindex.com.cn/sample-detail/download-history'
params = {
'indexcode': symbol
}
r = requests.get(url, params=params)
temp_df = pd.read_excel(r.content)
temp_df['样本代码'] = temp_df['样本代码'].astype(str).str.zfill(6)
temp_df.columns = [
'日期',
'样本代码',
'样本简称',
'所属行业',
'自由流通市值',
'总市值',
'权重',
]
temp_df['自由流通市值'] = pd.to_numeric(temp_df['自由流通市值'])
temp_df['总市值'] = pd.to_numeric(temp_df['总市值'])
temp_df['权重'] = pd.to_numeric(temp_df['权重'])
return temp_df
| 18,353 |
akfamily/akshare
|
087025d8d6f799b30ca114013e82c1ad22dc9294
|
akshare/index/index_cni.py
|
index_detail_hist_adjust_cni
|
(symbol: str = '399005')
|
return temp_df
|
国证指数-样本详情-历史调样
http://www.cnindex.com.cn/module/index-detail.html?act_menu=1&indexCode=399005
:param symbol: 指数代码
:type symbol: str
:return: 历史调样
:rtype: pandas.DataFrame
|
国证指数-样本详情-历史调样
http://www.cnindex.com.cn/module/index-detail.html?act_menu=1&indexCode=399005
:param symbol: 指数代码
:type symbol: str
:return: 历史调样
:rtype: pandas.DataFrame
| 228 | 247 |
def index_detail_hist_adjust_cni(symbol: str = '399005') -> pd.DataFrame:
"""
国证指数-样本详情-历史调样
http://www.cnindex.com.cn/module/index-detail.html?act_menu=1&indexCode=399005
:param symbol: 指数代码
:type symbol: str
:return: 历史调样
:rtype: pandas.DataFrame
"""
url = 'http://www.cnindex.com.cn/sample-detail/download-adjustment'
params = {
'indexcode': symbol
}
r = requests.get(url, params=params)
try:
temp_df = pd.read_excel(r.content, engine="openpyxl")
except zipfile.BadZipFile as e:
return pd.DataFrame()
temp_df['样本代码'] = temp_df['样本代码'].astype(str).str.zfill(6)
return temp_df
|
https://github.com/akfamily/akshare/blob/087025d8d6f799b30ca114013e82c1ad22dc9294/project25/akshare/index/index_cni.py#L228-L247
| 25 |
[
0,
1,
2,
3,
4,
5,
6,
7,
8
] | 45 |
[
9,
10,
13,
14,
15,
16,
17,
18,
19
] | 45 | false | 12.5 | 20 | 2 | 55 | 6 |
def index_detail_hist_adjust_cni(symbol: str = '399005') -> pd.DataFrame:
url = 'http://www.cnindex.com.cn/sample-detail/download-adjustment'
params = {
'indexcode': symbol
}
r = requests.get(url, params=params)
try:
temp_df = pd.read_excel(r.content, engine="openpyxl")
except zipfile.BadZipFile as e:
return pd.DataFrame()
temp_df['样本代码'] = temp_df['样本代码'].astype(str).str.zfill(6)
return temp_df
| 18,354 |
akfamily/akshare
|
087025d8d6f799b30ca114013e82c1ad22dc9294
|
akshare/utils/demjson.py
|
_get_pyver
|
()
| 27 | 34 |
def _get_pyver():
global _py_major, _py_minor
import sys
vi = sys.version_info
try:
_py_major, _py_minor = vi.major, vi.minor
except AttributeError:
_py_major, _py_minor = vi[0], vi[1]
|
https://github.com/akfamily/akshare/blob/087025d8d6f799b30ca114013e82c1ad22dc9294/project25/akshare/utils/demjson.py#L27-L34
| 25 |
[
0,
2,
3,
4,
5
] | 62.5 |
[
6,
7
] | 25 | false | 14.825334 | 8 | 2 | 75 | 0 |
def _get_pyver():
global _py_major, _py_minor
import sys
vi = sys.version_info
try:
_py_major, _py_minor = vi.major, vi.minor
except AttributeError:
_py_major, _py_minor = vi[0], vi[1]
| 18,355 |
|||
akfamily/akshare
|
087025d8d6f799b30ca114013e82c1ad22dc9294
|
akshare/utils/demjson.py
|
determine_float_limits
|
(number_type=float)
|
return _namedtuple('float_limits', ['significant_digits', 'max_exponent', 'min_exponent'])(sigdigits, maxexp,
-minexp)
|
Determines the precision and range of the given float type.
The passed in 'number_type' argument should refer to the type of
floating-point number. It should either be the built-in 'float',
or decimal context or constructor; i.e., one of:
# 1. FLOAT TYPE
determine_float_limits( float )
# 2. DEFAULT DECIMAL CONTEXT
determine_float_limits( decimal.Decimal )
# 3. CUSTOM DECIMAL CONTEXT
ctx = decimal.Context( prec=75 )
determine_float_limits( ctx )
Returns a named tuple with components:
( significant_digits,
max_exponent,
min_exponent )
Where:
* significant_digits -- maximum number of *decimal* digits
that can be represented without any loss of precision.
This is conservative, so if there are 16 1/2 digits, it
will return 16, not 17.
* max_exponent -- The maximum exponent (power of 10) that can
be represented before an overflow (or rounding to
infinity) occurs.
* min_exponent -- The minimum exponent (negative power of 10)
that can be represented before either an underflow
(rounding to zero) or a subnormal result (loss of
precision) occurs. Note this is conservative, as
subnormal numbers are excluded.
|
Determines the precision and range of the given float type.
| 75 | 214 |
def determine_float_limits(number_type=float):
"""Determines the precision and range of the given float type.
The passed in 'number_type' argument should refer to the type of
floating-point number. It should either be the built-in 'float',
or decimal context or constructor; i.e., one of:
# 1. FLOAT TYPE
determine_float_limits( float )
# 2. DEFAULT DECIMAL CONTEXT
determine_float_limits( decimal.Decimal )
# 3. CUSTOM DECIMAL CONTEXT
ctx = decimal.Context( prec=75 )
determine_float_limits( ctx )
Returns a named tuple with components:
( significant_digits,
max_exponent,
min_exponent )
Where:
* significant_digits -- maximum number of *decimal* digits
that can be represented without any loss of precision.
This is conservative, so if there are 16 1/2 digits, it
will return 16, not 17.
* max_exponent -- The maximum exponent (power of 10) that can
be represented before an overflow (or rounding to
infinity) occurs.
* min_exponent -- The minimum exponent (negative power of 10)
that can be represented before either an underflow
(rounding to zero) or a subnormal result (loss of
precision) occurs. Note this is conservative, as
subnormal numbers are excluded.
"""
if decimal:
numeric_exceptions = (ValueError, decimal.Overflow, decimal.Underflow)
else:
numeric_exceptions = (ValueError,)
if decimal and number_type == decimal.Decimal:
number_type = decimal.DefaultContext
if decimal and isinstance(number_type, decimal.Context):
# Passed a decimal Context, extract the bound creator function.
create_num = number_type.create_decimal
decimal_ctx = decimal.localcontext(number_type)
is_zero_or_subnormal = lambda n: n.is_zero() or n.is_subnormal()
elif number_type == float:
create_num = number_type
decimal_ctx = _dummy_context_manager
is_zero_or_subnormal = lambda n: n == 0
else:
raise TypeError("Expected a float type, e.g., float or decimal context")
with decimal_ctx:
zero = create_num('0.0')
# Find signifianct digits by comparing floats of increasing
# number of digits, differing in the last digit only, until
# they numerically compare as being equal.
sigdigits = None
n = 0
while True:
n = n + 1
pfx = '0.' + '1' * n
a = create_num(pfx + '0')
for sfx in '123456789': # Check all possible last digits to
# avoid any partial-decimal.
b = create_num(pfx + sfx)
if (a + zero) == (b + zero):
sigdigits = n
break
if sigdigits:
break
# Find exponent limits. First find order of magnitude and
# then use a binary search to find the exact exponent.
base = '1.' + '1' * (sigdigits - 1)
base0 = '1.' + '1' * (sigdigits - 2)
minexp, maxexp = None, None
for expsign in ('+', '-'):
minv = 0;
maxv = 10
# First find order of magnitude of exponent limit
while True:
try:
s = base + 'e' + expsign + str(maxv)
s0 = base0 + 'e' + expsign + str(maxv)
f = create_num(s) + zero
f0 = create_num(s0) + zero
except numeric_exceptions:
f = None
if not f or not str(f)[0].isdigit() or is_zero_or_subnormal(f) or f == f0:
break
else:
minv = maxv
maxv = maxv * 10
# Now do a binary search to find exact limit
while True:
if minv + 1 == maxv:
if expsign == '+':
maxexp = minv
else:
minexp = minv
break
elif maxv < minv:
if expsign == '+':
maxexp = None
else:
minexp = None
break
m = (minv + maxv) // 2
try:
s = base + 'e' + expsign + str(m)
s0 = base0 + 'e' + expsign + str(m)
f = create_num(s) + zero
f0 = create_num(s0) + zero
except numeric_exceptions:
f = None
else:
if not f or not str(f)[0].isdigit():
f = None
elif is_zero_or_subnormal(f) or f == f0:
f = None
if not f:
# infinite
maxv = m
else:
minv = m
return _namedtuple('float_limits', ['significant_digits', 'max_exponent', 'min_exponent'])(sigdigits, maxexp,
-minexp)
|
https://github.com/akfamily/akshare/blob/087025d8d6f799b30ca114013e82c1ad22dc9294/project25/akshare/utils/demjson.py#L75-L214
| 25 |
[
0,
1,
2,
3,
4,
5,
6,
7,
8,
9,
10,
11,
12,
13,
14,
15,
16,
17,
18,
19,
20,
21,
22,
23,
24,
25,
26,
27,
28,
29,
30,
31,
32,
33,
34,
35,
36,
37,
38,
39,
40,
41,
42,
44,
45,
47,
48,
49,
53,
54,
55,
56,
57,
59,
60,
61,
62,
63,
64,
65,
66,
67,
68,
69,
70,
71,
72,
73,
74,
75,
76,
77,
78,
79,
80,
81,
82,
83,
84,
85,
86,
87,
88,
89,
90,
91,
92,
93,
94,
95,
96,
99,
100,
101,
102,
103,
104,
105,
106,
107,
108,
109,
110,
111,
112,
113,
119,
120,
121,
122,
123,
124,
127,
128,
129,
130,
131,
132,
133,
134,
135,
136,
137,
138,
139
] | 89.285714 |
[
43,
46,
50,
51,
52,
58,
97,
98,
114,
115,
117,
118,
125,
126
] | 10 | false | 14.825334 | 140 | 31 | 90 | 37 |
def determine_float_limits(number_type=float):
if decimal:
numeric_exceptions = (ValueError, decimal.Overflow, decimal.Underflow)
else:
numeric_exceptions = (ValueError,)
if decimal and number_type == decimal.Decimal:
number_type = decimal.DefaultContext
if decimal and isinstance(number_type, decimal.Context):
# Passed a decimal Context, extract the bound creator function.
create_num = number_type.create_decimal
decimal_ctx = decimal.localcontext(number_type)
is_zero_or_subnormal = lambda n: n.is_zero() or n.is_subnormal()
elif number_type == float:
create_num = number_type
decimal_ctx = _dummy_context_manager
is_zero_or_subnormal = lambda n: n == 0
else:
raise TypeError("Expected a float type, e.g., float or decimal context")
with decimal_ctx:
zero = create_num('0.0')
# Find signifianct digits by comparing floats of increasing
# number of digits, differing in the last digit only, until
# they numerically compare as being equal.
sigdigits = None
n = 0
while True:
n = n + 1
pfx = '0.' + '1' * n
a = create_num(pfx + '0')
for sfx in '123456789': # Check all possible last digits to
# avoid any partial-decimal.
b = create_num(pfx + sfx)
if (a + zero) == (b + zero):
sigdigits = n
break
if sigdigits:
break
# Find exponent limits. First find order of magnitude and
# then use a binary search to find the exact exponent.
base = '1.' + '1' * (sigdigits - 1)
base0 = '1.' + '1' * (sigdigits - 2)
minexp, maxexp = None, None
for expsign in ('+', '-'):
minv = 0;
maxv = 10
# First find order of magnitude of exponent limit
while True:
try:
s = base + 'e' + expsign + str(maxv)
s0 = base0 + 'e' + expsign + str(maxv)
f = create_num(s) + zero
f0 = create_num(s0) + zero
except numeric_exceptions:
f = None
if not f or not str(f)[0].isdigit() or is_zero_or_subnormal(f) or f == f0:
break
else:
minv = maxv
maxv = maxv * 10
# Now do a binary search to find exact limit
while True:
if minv + 1 == maxv:
if expsign == '+':
maxexp = minv
else:
minexp = minv
break
elif maxv < minv:
if expsign == '+':
maxexp = None
else:
minexp = None
break
m = (minv + maxv) // 2
try:
s = base + 'e' + expsign + str(m)
s0 = base0 + 'e' + expsign + str(m)
f = create_num(s) + zero
f0 = create_num(s0) + zero
except numeric_exceptions:
f = None
else:
if not f or not str(f)[0].isdigit():
f = None
elif is_zero_or_subnormal(f) or f == f0:
f = None
if not f:
# infinite
maxv = m
else:
minv = m
return _namedtuple('float_limits', ['significant_digits', 'max_exponent', 'min_exponent'])(sigdigits, maxexp,
-minexp)
| 18,356 |
akfamily/akshare
|
087025d8d6f799b30ca114013e82c1ad22dc9294
|
akshare/utils/demjson.py
|
determine_float_precision
|
()
|
return (v.significant_digits, v.max_exponent)
| 221 | 223 |
def determine_float_precision():
v = determine_float_limits(float)
return (v.significant_digits, v.max_exponent)
|
https://github.com/akfamily/akshare/blob/087025d8d6f799b30ca114013e82c1ad22dc9294/project25/akshare/utils/demjson.py#L221-L223
| 25 |
[
0
] | 33.333333 |
[
1,
2
] | 66.666667 | false | 14.825334 | 3 | 1 | 33.333333 | 0 |
def determine_float_precision():
v = determine_float_limits(float)
return (v.significant_digits, v.max_exponent)
| 18,357 |
||
akfamily/akshare
|
087025d8d6f799b30ca114013e82c1ad22dc9294
|
akshare/utils/demjson.py
|
_nonnumber_float_constants
|
()
|
return nan, inf, neginf
|
Try to return the Nan, Infinity, and -Infinity float values.
This is necessarily complex because there is no standard
platform-independent way to do this in Python as the language
(opposed to some implementation of it) doesn't discuss
non-numbers. We try various strategies from the best to the
worst.
If this Python interpreter uses the IEEE 754 floating point
standard then the returned values will probably be real instances
of the 'float' type. Otherwise a custom class object is returned
which will attempt to simulate the correct behavior as much as
possible.
|
Try to return the Nan, Infinity, and -Infinity float values.
This is necessarily complex because there is no standard
platform-independent way to do this in Python as the language
(opposed to some implementation of it) doesn't discuss
non-numbers. We try various strategies from the best to the
worst.
If this Python interpreter uses the IEEE 754 floating point
standard then the returned values will probably be real instances
of the 'float' type. Otherwise a custom class object is returned
which will attempt to simulate the correct behavior as much as
possible.
| 262 | 627 |
def _nonnumber_float_constants():
"""Try to return the Nan, Infinity, and -Infinity float values.
This is necessarily complex because there is no standard
platform-independent way to do this in Python as the language
(opposed to some implementation of it) doesn't discuss
non-numbers. We try various strategies from the best to the
worst.
If this Python interpreter uses the IEEE 754 floating point
standard then the returned values will probably be real instances
of the 'float' type. Otherwise a custom class object is returned
which will attempt to simulate the correct behavior as much as
possible.
"""
try:
# First, try (mostly portable) float constructor. Works under
# Linux x86 (gcc) and some Unices.
nan = float('nan')
inf = float('inf')
neginf = float('-inf')
except ValueError:
try:
# Try the AIX (PowerPC) float constructors
nan = float('NaNQ')
inf = float('INF')
neginf = float('-INF')
except ValueError:
try:
# Next, try binary unpacking. Should work under
# platforms using IEEE 754 floating point.
import struct, sys
xnan = '7ff8000000000000'.decode('hex') # Quiet NaN
xinf = '7ff0000000000000'.decode('hex')
xcheck = 'bdc145651592979d'.decode('hex') # -3.14159e-11
# Could use float.__getformat__, but it is a new python feature,
# so we use sys.byteorder.
if sys.byteorder == 'big':
nan = struct.unpack('d', xnan)[0]
inf = struct.unpack('d', xinf)[0]
check = struct.unpack('d', xcheck)[0]
else:
nan = struct.unpack('d', xnan[::-1])[0]
inf = struct.unpack('d', xinf[::-1])[0]
check = struct.unpack('d', xcheck[::-1])[0]
neginf = - inf
if check != -3.14159e-11:
raise ValueError('Unpacking raw IEEE 754 floats does not work')
except (ValueError, TypeError):
# Punt, make some fake classes to simulate. These are
# not perfect though. For instance nan * 1.0 == nan,
# as expected, but 1.0 * nan == 0.0, which is wrong.
class nan(float):
"""An approximation of the NaN (not a number) floating point number."""
def __repr__(self): return 'nan'
def __str__(self): return 'nan'
def __add__(self, x): return self
def __radd__(self, x): return self
def __sub__(self, x): return self
def __rsub__(self, x): return self
def __mul__(self, x): return self
def __rmul__(self, x): return self
def __div__(self, x): return self
def __rdiv__(self, x): return self
def __divmod__(self, x): return (self, self)
def __rdivmod__(self, x): return (self, self)
def __mod__(self, x): return self
def __rmod__(self, x): return self
def __pow__(self, exp): return self
def __rpow__(self, exp): return self
def __neg__(self): return self
def __pos__(self): return self
def __abs__(self): return self
def __lt__(self, x): return False
def __le__(self, x): return False
def __eq__(self, x): return False
def __neq__(self, x): return True
def __ge__(self, x): return False
def __gt__(self, x): return False
def __complex__(self, *a): raise NotImplementedError('NaN can not be converted to a complex')
if decimal:
nan = decimal.Decimal('NaN')
else:
nan = nan()
class inf(float):
"""An approximation of the +Infinity floating point number."""
def __repr__(self):
return 'inf'
def __str__(self):
return 'inf'
def __add__(self, x):
return self
def __radd__(self, x):
return self
def __sub__(self, x):
return self
def __rsub__(self, x):
return self
def __mul__(self, x):
if x is neginf or x < 0:
return neginf
elif x == 0:
return nan
else:
return self
def __rmul__(self, x):
return self.__mul__(x)
def __div__(self, x):
if x == 0:
raise ZeroDivisionError('float division')
elif x < 0:
return neginf
else:
return self
def __rdiv__(self, x):
if x is inf or x is neginf or x is nan:
return nan
return 0.0
def __divmod__(self, x):
if x == 0:
raise ZeroDivisionError('float divmod()')
elif x < 0:
return (nan, nan)
else:
return (self, self)
def __rdivmod__(self, x):
if x is inf or x is neginf or x is nan:
return (nan, nan)
return (0.0, x)
def __mod__(self, x):
if x == 0:
raise ZeroDivisionError('float modulo')
else:
return nan
def __rmod__(self, x):
if x is inf or x is neginf or x is nan:
return nan
return x
def __pow__(self, exp):
if exp == 0:
return 1.0
else:
return self
def __rpow__(self, x):
if -1 < x < 1:
return 0.0
elif x == 1.0:
return 1.0
elif x is nan or x is neginf or x < 0:
return nan
else:
return self
def __neg__(self):
return neginf
def __pos__(self):
return self
def __abs__(self):
return self
def __lt__(self, x):
return False
def __le__(self, x):
if x is self:
return True
else:
return False
def __eq__(self, x):
if x is self:
return True
else:
return False
def __neq__(self, x):
if x is self:
return False
else:
return True
def __ge__(self, x):
return True
def __gt__(self, x):
return True
def __complex__(self, *a):
raise NotImplementedError('Infinity can not be converted to a complex')
if decimal:
inf = decimal.Decimal('Infinity')
else:
inf = inf()
class neginf(float):
"""An approximation of the -Infinity floating point number."""
def __repr__(self):
return '-inf'
def __str__(self):
return '-inf'
def __add__(self, x):
return self
def __radd__(self, x):
return self
def __sub__(self, x):
return self
def __rsub__(self, x):
return self
def __mul__(self, x):
if x is self or x < 0:
return inf
elif x == 0:
return nan
else:
return self
def __rmul__(self, x):
return self.__mul__(self)
def __div__(self, x):
if x == 0:
raise ZeroDivisionError('float division')
elif x < 0:
return inf
else:
return self
def __rdiv__(self, x):
if x is inf or x is neginf or x is nan:
return nan
return -0.0
def __divmod__(self, x):
if x == 0:
raise ZeroDivisionError('float divmod()')
elif x < 0:
return (nan, nan)
else:
return (self, self)
def __rdivmod__(self, x):
if x is inf or x is neginf or x is nan:
return (nan, nan)
return (-0.0, x)
def __mod__(self, x):
if x == 0:
raise ZeroDivisionError('float modulo')
else:
return nan
def __rmod__(self, x):
if x is inf or x is neginf or x is nan:
return nan
return x
def __pow__(self, exp):
if exp == 0:
return 1.0
else:
return self
def __rpow__(self, x):
if x is nan or x is inf or x is inf:
return nan
return 0.0
def __neg__(self):
return inf
def __pos__(self):
return self
def __abs__(self):
return inf
def __lt__(self, x):
return True
def __le__(self, x):
return True
def __eq__(self, x):
if x is self:
return True
else:
return False
def __neq__(self, x):
if x is self:
return False
else:
return True
def __ge__(self, x):
if x is self:
return True
else:
return False
def __gt__(self, x):
return False
def __complex__(self, *a):
raise NotImplementedError('-Infinity can not be converted to a complex')
if decimal:
neginf = decimal.Decimal('-Infinity')
else:
neginf = neginf(0)
return nan, inf, neginf
|
https://github.com/akfamily/akshare/blob/087025d8d6f799b30ca114013e82c1ad22dc9294/project25/akshare/utils/demjson.py#L262-L627
| 25 |
[
0,
1,
2,
3,
4,
5,
6,
7,
8,
9,
10,
11,
12,
13,
14,
15,
16,
17,
18,
19,
20,
21,
365
] | 6.284153 |
[
22,
23,
25,
26,
27,
28,
29,
32,
33,
34,
35,
38,
39,
40,
41,
43,
44,
45,
46,
47,
48,
49,
53,
56,
58,
60,
62,
64,
66,
68,
70,
72,
74,
76,
78,
80,
82,
84,
86,
88,
90,
92,
94,
96,
98,
100,
102,
104,
106,
108,
109,
111,
113,
116,
117,
119,
120,
122,
123,
125,
126,
128,
129,
131,
132,
134,
135,
136,
137,
138,
140,
142,
143,
145,
146,
147,
148,
149,
151,
153,
154,
155,
156,
158,
159,
160,
161,
162,
164,
166,
167,
168,
169,
171,
172,
173,
175,
177,
178,
179,
180,
182,
183,
184,
186,
188,
189,
190,
191,
192,
193,
194,
196,
198,
199,
201,
202,
204,
205,
207,
208,
210,
211,
212,
214,
216,
217,
218,
220,
222,
223,
224,
226,
228,
229,
231,
232,
234,
235,
237,
238,
240,
242,
245,
246,
248,
249,
251,
252,
254,
255,
257,
258,
260,
261,
263,
264,
265,
266,
267,
269,
271,
272,
274,
275,
276,
277,
278,
280,
282,
283,
284,
285,
287,
288,
289,
290,
291,
293,
295,
296,
297,
298,
300,
301,
302,
304,
306,
307,
308,
309,
311,
312,
313,
315,
317,
318,
319,
320,
322,
323,
325,
326,
328,
329,
331,
332,
334,
335,
337,
338,
339,
341,
343,
344,
345,
347,
349,
350,
351,
353,
355,
356,
358,
359,
361,
362,
364
] | 62.295082 | false | 14.825334 | 366 | 137 | 37.704918 | 13 |
def _nonnumber_float_constants():
try:
# First, try (mostly portable) float constructor. Works under
# Linux x86 (gcc) and some Unices.
nan = float('nan')
inf = float('inf')
neginf = float('-inf')
except ValueError:
try:
# Try the AIX (PowerPC) float constructors
nan = float('NaNQ')
inf = float('INF')
neginf = float('-INF')
except ValueError:
try:
# Next, try binary unpacking. Should work under
# platforms using IEEE 754 floating point.
import struct, sys
xnan = '7ff8000000000000'.decode('hex') # Quiet NaN
xinf = '7ff0000000000000'.decode('hex')
xcheck = 'bdc145651592979d'.decode('hex') # -3.14159e-11
# Could use float.__getformat__, but it is a new python feature,
# so we use sys.byteorder.
if sys.byteorder == 'big':
nan = struct.unpack('d', xnan)[0]
inf = struct.unpack('d', xinf)[0]
check = struct.unpack('d', xcheck)[0]
else:
nan = struct.unpack('d', xnan[::-1])[0]
inf = struct.unpack('d', xinf[::-1])[0]
check = struct.unpack('d', xcheck[::-1])[0]
neginf = - inf
if check != -3.14159e-11:
raise ValueError('Unpacking raw IEEE 754 floats does not work')
except (ValueError, TypeError):
# Punt, make some fake classes to simulate. These are
# not perfect though. For instance nan * 1.0 == nan,
# as expected, but 1.0 * nan == 0.0, which is wrong.
class nan(float):
def __repr__(self): return 'nan'
def __str__(self): return 'nan'
def __add__(self, x): return self
def __radd__(self, x): return self
def __sub__(self, x): return self
def __rsub__(self, x): return self
def __mul__(self, x): return self
def __rmul__(self, x): return self
def __div__(self, x): return self
def __rdiv__(self, x): return self
def __divmod__(self, x): return (self, self)
def __rdivmod__(self, x): return (self, self)
def __mod__(self, x): return self
def __rmod__(self, x): return self
def __pow__(self, exp): return self
def __rpow__(self, exp): return self
def __neg__(self): return self
def __pos__(self): return self
def __abs__(self): return self
def __lt__(self, x): return False
def __le__(self, x): return False
def __eq__(self, x): return False
def __neq__(self, x): return True
def __ge__(self, x): return False
def __gt__(self, x): return False
def __complex__(self, *a): raise NotImplementedError('NaN can not be converted to a complex')
if decimal:
nan = decimal.Decimal('NaN')
else:
nan = nan()
class inf(float):
def __repr__(self):
return 'inf'
def __str__(self):
return 'inf'
def __add__(self, x):
return self
def __radd__(self, x):
return self
def __sub__(self, x):
return self
def __rsub__(self, x):
return self
def __mul__(self, x):
if x is neginf or x < 0:
return neginf
elif x == 0:
return nan
else:
return self
def __rmul__(self, x):
return self.__mul__(x)
def __div__(self, x):
if x == 0:
raise ZeroDivisionError('float division')
elif x < 0:
return neginf
else:
return self
def __rdiv__(self, x):
if x is inf or x is neginf or x is nan:
return nan
return 0.0
def __divmod__(self, x):
if x == 0:
raise ZeroDivisionError('float divmod()')
elif x < 0:
return (nan, nan)
else:
return (self, self)
def __rdivmod__(self, x):
if x is inf or x is neginf or x is nan:
return (nan, nan)
return (0.0, x)
def __mod__(self, x):
if x == 0:
raise ZeroDivisionError('float modulo')
else:
return nan
def __rmod__(self, x):
if x is inf or x is neginf or x is nan:
return nan
return x
def __pow__(self, exp):
if exp == 0:
return 1.0
else:
return self
def __rpow__(self, x):
if -1 < x < 1:
return 0.0
elif x == 1.0:
return 1.0
elif x is nan or x is neginf or x < 0:
return nan
else:
return self
def __neg__(self):
return neginf
def __pos__(self):
return self
def __abs__(self):
return self
def __lt__(self, x):
return False
def __le__(self, x):
if x is self:
return True
else:
return False
def __eq__(self, x):
if x is self:
return True
else:
return False
def __neq__(self, x):
if x is self:
return False
else:
return True
def __ge__(self, x):
return True
def __gt__(self, x):
return True
def __complex__(self, *a):
raise NotImplementedError('Infinity can not be converted to a complex')
if decimal:
inf = decimal.Decimal('Infinity')
else:
inf = inf()
class neginf(float):
def __repr__(self):
return '-inf'
def __str__(self):
return '-inf'
def __add__(self, x):
return self
def __radd__(self, x):
return self
def __sub__(self, x):
return self
def __rsub__(self, x):
return self
def __mul__(self, x):
if x is self or x < 0:
return inf
elif x == 0:
return nan
else:
return self
def __rmul__(self, x):
return self.__mul__(self)
def __div__(self, x):
if x == 0:
raise ZeroDivisionError('float division')
elif x < 0:
return inf
else:
return self
def __rdiv__(self, x):
if x is inf or x is neginf or x is nan:
return nan
return -0.0
def __divmod__(self, x):
if x == 0:
raise ZeroDivisionError('float divmod()')
elif x < 0:
return (nan, nan)
else:
return (self, self)
def __rdivmod__(self, x):
if x is inf or x is neginf or x is nan:
return (nan, nan)
return (-0.0, x)
def __mod__(self, x):
if x == 0:
raise ZeroDivisionError('float modulo')
else:
return nan
def __rmod__(self, x):
if x is inf or x is neginf or x is nan:
return nan
return x
def __pow__(self, exp):
if exp == 0:
return 1.0
else:
return self
def __rpow__(self, x):
if x is nan or x is inf or x is inf:
return nan
return 0.0
def __neg__(self):
return inf
def __pos__(self):
return self
def __abs__(self):
return inf
def __lt__(self, x):
return True
def __le__(self, x):
return True
def __eq__(self, x):
if x is self:
return True
else:
return False
def __neq__(self, x):
if x is self:
return False
else:
return True
def __ge__(self, x):
if x is self:
return True
else:
return False
def __gt__(self, x):
return False
def __complex__(self, *a):
raise NotImplementedError('-Infinity can not be converted to a complex')
if decimal:
neginf = decimal.Decimal('-Infinity')
else:
neginf = neginf(0)
return nan, inf, neginf
| 18,358 |
akfamily/akshare
|
087025d8d6f799b30ca114013e82c1ad22dc9294
|
akshare/utils/demjson.py
|
skipstringsafe
|
(s, start=0, end=None)
|
return i
| 689 | 699 |
def skipstringsafe(s, start=0, end=None):
i = start
# if end is None:
# end = len(s)
unsafe = helpers.unsafe_string_chars
while i < end and s[i] not in unsafe:
# c = s[i]
# if c in unsafe_string_chars:
# break
i += 1
return i
|
https://github.com/akfamily/akshare/blob/087025d8d6f799b30ca114013e82c1ad22dc9294/project25/akshare/utils/demjson.py#L689-L699
| 25 |
[
0
] | 9.090909 |
[
1,
4,
5,
9,
10
] | 45.454545 | false | 14.825334 | 11 | 3 | 54.545455 | 0 |
def skipstringsafe(s, start=0, end=None):
i = start
# if end is None:
# end = len(s)
unsafe = helpers.unsafe_string_chars
while i < end and s[i] not in unsafe:
# c = s[i]
# if c in unsafe_string_chars:
# break
i += 1
return i
| 18,359 |
||
akfamily/akshare
|
087025d8d6f799b30ca114013e82c1ad22dc9294
|
akshare/utils/demjson.py
|
skipstringsafe_slow
|
(s, start=0, end=None)
|
return i
| 702 | 711 |
def skipstringsafe_slow(s, start=0, end=None):
i = start
if end is None:
end = len(s)
while i < end:
c = s[i]
if c == '"' or c == "'" or c == '\\' or ord(c) <= 0x1f:
break
i += 1
return i
|
https://github.com/akfamily/akshare/blob/087025d8d6f799b30ca114013e82c1ad22dc9294/project25/akshare/utils/demjson.py#L702-L711
| 25 |
[
0
] | 10 |
[
1,
2,
3,
4,
5,
6,
7,
8,
9
] | 90 | false | 14.825334 | 10 | 7 | 10 | 0 |
def skipstringsafe_slow(s, start=0, end=None):
i = start
if end is None:
end = len(s)
while i < end:
c = s[i]
if c == '"' or c == "'" or c == '\\' or ord(c) <= 0x1f:
break
i += 1
return i
| 18,360 |
||
akfamily/akshare
|
087025d8d6f799b30ca114013e82c1ad22dc9294
|
akshare/utils/demjson.py
|
extend_list_with_sep
|
(orig_seq, extension_seq, sepchar='')
| 714 | 721 |
def extend_list_with_sep(orig_seq, extension_seq, sepchar=''):
if not sepchar:
orig_seq.extend(extension_seq)
else:
for i, x in enumerate(extension_seq):
if i > 0:
orig_seq.append(sepchar)
orig_seq.append(x)
|
https://github.com/akfamily/akshare/blob/087025d8d6f799b30ca114013e82c1ad22dc9294/project25/akshare/utils/demjson.py#L714-L721
| 25 |
[
0
] | 12.5 |
[
1,
2,
4,
5,
6,
7
] | 75 | false | 14.825334 | 8 | 4 | 25 | 0 |
def extend_list_with_sep(orig_seq, extension_seq, sepchar=''):
if not sepchar:
orig_seq.extend(extension_seq)
else:
for i, x in enumerate(extension_seq):
if i > 0:
orig_seq.append(sepchar)
orig_seq.append(x)
| 18,361 |
|||
akfamily/akshare
|
087025d8d6f799b30ca114013e82c1ad22dc9294
|
akshare/utils/demjson.py
|
extend_and_flatten_list_with_sep
|
(orig_seq, extension_seq, separator='')
| 724 | 728 |
def extend_and_flatten_list_with_sep(orig_seq, extension_seq, separator=''):
for i, part in enumerate(extension_seq):
if i > 0 and separator:
orig_seq.append(separator)
orig_seq.extend(part)
|
https://github.com/akfamily/akshare/blob/087025d8d6f799b30ca114013e82c1ad22dc9294/project25/akshare/utils/demjson.py#L724-L728
| 25 |
[
0
] | 20 |
[
1,
2,
3,
4
] | 80 | false | 14.825334 | 5 | 4 | 20 | 0 |
def extend_and_flatten_list_with_sep(orig_seq, extension_seq, separator=''):
for i, part in enumerate(extension_seq):
if i > 0 and separator:
orig_seq.append(separator)
orig_seq.extend(part)
| 18,362 |
|||
akfamily/akshare
|
087025d8d6f799b30ca114013e82c1ad22dc9294
|
akshare/utils/demjson.py
|
_make_raw_bytes
|
(byte_list)
|
return b
|
Takes a list of byte values (numbers) and returns a bytes (Python 3) or string (Python 2)
|
Takes a list of byte values (numbers) and returns a bytes (Python 3) or string (Python 2)
| 735 | 742 |
def _make_raw_bytes(byte_list):
"""Takes a list of byte values (numbers) and returns a bytes (Python 3) or string (Python 2)
"""
if _py_major >= 3:
b = bytes(byte_list)
else:
b = ''.join(chr(n) for n in byte_list)
return b
|
https://github.com/akfamily/akshare/blob/087025d8d6f799b30ca114013e82c1ad22dc9294/project25/akshare/utils/demjson.py#L735-L742
| 25 |
[
0,
1,
2,
3,
4,
5,
7
] | 87.5 |
[
6
] | 12.5 | false | 14.825334 | 8 | 2 | 87.5 | 1 |
def _make_raw_bytes(byte_list):
if _py_major >= 3:
b = bytes(byte_list)
else:
b = ''.join(chr(n) for n in byte_list)
return b
| 18,363 |
akfamily/akshare
|
087025d8d6f799b30ca114013e82c1ad22dc9294
|
akshare/utils/demjson.py
|
_make_unsafe_string_chars
|
()
|
return ''.join(unsafe)
| 962 | 969 |
def _make_unsafe_string_chars():
import unicodedata
unsafe = []
for c in [chr(i) for i in range(0x100)]:
if c == '"' or c == '\\' \
or unicodedata.category(c) in ['Cc', 'Cf', 'Zl', 'Zp']:
unsafe.append(c)
return ''.join(unsafe)
|
https://github.com/akfamily/akshare/blob/087025d8d6f799b30ca114013e82c1ad22dc9294/project25/akshare/utils/demjson.py#L962-L969
| 25 |
[
0,
1,
2,
3,
4,
6,
7
] | 87.5 |
[] | 0 | false | 14.825334 | 8 | 6 | 100 | 0 |
def _make_unsafe_string_chars():
import unicodedata
unsafe = []
for c in [chr(i) for i in range(0x100)]:
if c == '"' or c == '\\' \
or unicodedata.category(c) in ['Cc', 'Cf', 'Zl', 'Zp']:
unsafe.append(c)
return ''.join(unsafe)
| 18,364 |
||
akfamily/akshare
|
087025d8d6f799b30ca114013e82c1ad22dc9294
|
akshare/utils/demjson.py
|
smart_sort_transform
|
(key)
|
return key
| 2,820 | 2,846 |
def smart_sort_transform(key):
numfmt = '%012d'
digits = '0123456789'
zero = ord('0')
if not key:
key = ''
elif isinstance(key, int):
key = numfmt % key
elif isinstance(key, str):
keylen = len(key)
words = []
i = 0
while i < keylen:
if key[i] in digits:
num = 0
while i < keylen and key[i] in digits:
num *= 10
num += ord(key[i]) - zero
i += 1
words.append(numfmt % num)
else:
words.append(key[i].upper())
i += 1
key = ''.join(words)
else:
key = str(key)
return key
|
https://github.com/akfamily/akshare/blob/087025d8d6f799b30ca114013e82c1ad22dc9294/project25/akshare/utils/demjson.py#L2820-L2846
| 25 |
[
0
] | 3.703704 |
[
1,
2,
3,
4,
5,
6,
7,
8,
9,
10,
11,
12,
13,
14,
15,
16,
17,
18,
19,
21,
22,
23,
25,
26
] | 88.888889 | false | 14.825334 | 27 | 8 | 11.111111 | 0 |
def smart_sort_transform(key):
numfmt = '%012d'
digits = '0123456789'
zero = ord('0')
if not key:
key = ''
elif isinstance(key, int):
key = numfmt % key
elif isinstance(key, str):
keylen = len(key)
words = []
i = 0
while i < keylen:
if key[i] in digits:
num = 0
while i < keylen and key[i] in digits:
num *= 10
num += ord(key[i]) - zero
i += 1
words.append(numfmt % num)
else:
words.append(key[i].upper())
i += 1
key = ''.join(words)
else:
key = str(key)
return key
| 18,365 |
||
akfamily/akshare
|
087025d8d6f799b30ca114013e82c1ad22dc9294
|
akshare/utils/demjson.py
|
encode
|
(obj, encoding=None, **kwargs)
|
return output
|
r"""Encodes a Python object into a JSON-encoded string.
* 'strict' (Boolean, default False)
If 'strict' is set to True, then only strictly-conforming JSON
output will be produced. Note that this means that some types
of values may not be convertable and will result in a
JSONEncodeError exception.
* 'compactly' (Boolean, default True)
If 'compactly' is set to True, then the resulting string will
have all extraneous white space removed; if False then the
string will be "pretty printed" with whitespace and
indentation added to make it more readable.
* 'encode_namedtuple_as_object' (Boolean or callable, default True)
If True, then objects of type namedtuple, or subclasses of
'tuple' that have an _asdict() method, will be encoded as an
object rather than an array.
If can also be a predicate function that takes a namedtuple
object as an argument and returns True or False.
* 'indent_amount' (Integer, default 2)
The number of spaces to output for each indentation level.
If 'compactly' is True then indentation is ignored.
* 'indent_limit' (Integer or None, default None)
If not None, then this is the maximum limit of indentation
levels, after which further indentation spaces are not
inserted. If None, then there is no limit.
CONCERNING CHARACTER ENCODING:
The 'encoding' argument should be one of:
* None - The return will be a Unicode string.
* encoding_name - A string which is the name of a known
encoding, such as 'UTF-8' or 'ascii'.
* codec - A CodecInfo object, such as as found by codecs.lookup().
This allows you to use a custom codec as well as those
built into Python.
If an encoding is given (either by name or by codec), then the
returned value will be a byte array (Python 3), or a 'str' string
(Python 2); which represents the raw set of bytes. Otherwise,
if encoding is None, then the returned value will be a Unicode
string.
The 'escape_unicode' argument is used to determine which characters
in string literals must be \u escaped. Should be one of:
* True -- All non-ASCII characters are always \u escaped.
* False -- Try to insert actual Unicode characters if possible.
* function -- A user-supplied function that accepts a single
unicode character and returns True or False; where True
means to \u escape that character.
Regardless of escape_unicode, certain characters will always be
\u escaped. Additionaly any characters not in the output encoding
repertoire for the encoding codec will be \u escaped as well.
|
r"""Encodes a Python object into a JSON-encoded string.
| 5,530 | 5,600 |
def encode(obj, encoding=None, **kwargs):
r"""Encodes a Python object into a JSON-encoded string.
* 'strict' (Boolean, default False)
If 'strict' is set to True, then only strictly-conforming JSON
output will be produced. Note that this means that some types
of values may not be convertable and will result in a
JSONEncodeError exception.
* 'compactly' (Boolean, default True)
If 'compactly' is set to True, then the resulting string will
have all extraneous white space removed; if False then the
string will be "pretty printed" with whitespace and
indentation added to make it more readable.
* 'encode_namedtuple_as_object' (Boolean or callable, default True)
If True, then objects of type namedtuple, or subclasses of
'tuple' that have an _asdict() method, will be encoded as an
object rather than an array.
If can also be a predicate function that takes a namedtuple
object as an argument and returns True or False.
* 'indent_amount' (Integer, default 2)
The number of spaces to output for each indentation level.
If 'compactly' is True then indentation is ignored.
* 'indent_limit' (Integer or None, default None)
If not None, then this is the maximum limit of indentation
levels, after which further indentation spaces are not
inserted. If None, then there is no limit.
CONCERNING CHARACTER ENCODING:
The 'encoding' argument should be one of:
* None - The return will be a Unicode string.
* encoding_name - A string which is the name of a known
encoding, such as 'UTF-8' or 'ascii'.
* codec - A CodecInfo object, such as as found by codecs.lookup().
This allows you to use a custom codec as well as those
built into Python.
If an encoding is given (either by name or by codec), then the
returned value will be a byte array (Python 3), or a 'str' string
(Python 2); which represents the raw set of bytes. Otherwise,
if encoding is None, then the returned value will be a Unicode
string.
The 'escape_unicode' argument is used to determine which characters
in string literals must be \u escaped. Should be one of:
* True -- All non-ASCII characters are always \u escaped.
* False -- Try to insert actual Unicode characters if possible.
* function -- A user-supplied function that accepts a single
unicode character and returns True or False; where True
means to \u escape that character.
Regardless of escape_unicode, certain characters will always be
\u escaped. Additionaly any characters not in the output encoding
repertoire for the encoding codec will be \u escaped as well.
"""
# Do the JSON encoding
j = JSON(**kwargs)
output = j.encode(obj, encoding)
return output
|
https://github.com/akfamily/akshare/blob/087025d8d6f799b30ca114013e82c1ad22dc9294/project25/akshare/utils/demjson.py#L5530-L5600
| 25 |
[
0
] | 1.408451 |
[
68,
69,
70
] | 4.225352 | false | 14.825334 | 71 | 1 | 95.774648 | 64 |
def encode(obj, encoding=None, **kwargs):
r"""Encodes a Python object into a JSON-encoded string.
* 'strict' (Boolean, default False)
If 'strict' is set to True, then only strictly-conforming JSON
output will be produced. Note that this means that some types
of values may not be convertable and will result in a
JSONEncodeError exception.
* 'compactly' (Boolean, default True)
If 'compactly' is set to True, then the resulting string will
have all extraneous white space removed; if False then the
string will be "pretty printed" with whitespace and
indentation added to make it more readable.
* 'encode_namedtuple_as_object' (Boolean or callable, default True)
If True, then objects of type namedtuple, or subclasses of
'tuple' that have an _asdict() method, will be encoded as an
object rather than an array.
If can also be a predicate function that takes a namedtuple
object as an argument and returns True or False.
* 'indent_amount' (Integer, default 2)
The number of spaces to output for each indentation level.
If 'compactly' is True then indentation is ignored.
* 'indent_limit' (Integer or None, default None)
If not None, then this is the maximum limit of indentation
levels, after which further indentation spaces are not
inserted. If None, then there is no limit.
CONCERNING CHARACTER ENCODING:
The 'encoding' argument should be one of:
* None - The return will be a Unicode string.
* encoding_name - A string which is the name of a known
encoding, such as 'UTF-8' or 'ascii'.
* codec - A CodecInfo object, such as as found by codecs.lookup().
This allows you to use a custom codec as well as those
built into Python.
If an encoding is given (either by name or by codec), then the
returned value will be a byte array (Python 3), or a 'str' string
(Python 2); which represents the raw set of bytes. Otherwise,
if encoding is None, then the returned value will be a Unicode
string.
The 'escape_unicode' argument is used to determine which characters
in string literals must be \u escaped. Should be one of:
* True -- All non-ASCII characters are always \u escaped.
* False -- Try to insert actual Unicode characters if possible.
* function -- A user-supplied function that accepts a single
unicode character and returns True or False; where True
means to \u escape that character.
Regardless of escape_unicode, certain characters will always be
\u escaped. Additionaly any characters not in the output encoding
repertoire for the encoding codec will be \u escaped as well.
"""
# Do the JSON encoding
j = JSON(**kwargs)
output = j.encode(obj, encoding)
return output
| 18,366 |
akfamily/akshare
|
087025d8d6f799b30ca114013e82c1ad22dc9294
|
akshare/utils/demjson.py
|
decode
|
(txt, encoding=None, **kwargs)
|
return result
|
Decodes a JSON-encoded string into a Python object.
== Optional arguments ==
* 'encoding' (string, default None)
This argument provides a hint regarding the character encoding
that the input text is assumed to be in (if it is not already a
unicode string type).
If set to None then autodetection of the encoding is attempted
(see discussion above). Otherwise this argument should be the
name of a registered codec (see the standard 'codecs' module).
* 'strict' (Boolean, default False)
If 'strict' is set to True, then those strings that are not
entirely strictly conforming to JSON will result in a
JSONDecodeError exception.
* 'return_errors' (Boolean, default False)
Controls the return value from this function. If False, then
only the Python equivalent object is returned on success, or
an error will be raised as an exception.
If True then a 2-tuple is returned: (object, error_list). The
error_list will be an empty list [] if the decoding was
successful, otherwise it will be a list of all the errors
encountered. Note that it is possible for an object to be
returned even if errors were encountered.
* 'return_stats' (Boolean, default False)
Controls whether statistics about the decoded JSON document
are returns (and instance of decode_statistics).
If True, then the stats object will be added to the end of the
tuple returned. If return_errors is also set then a 3-tuple
is returned, otherwise a 2-tuple is returned.
* 'write_errors' (Boolean OR File-like object, default False)
Controls what to do with errors.
- If False, then the first decoding error is raised as an exception.
- If True, then errors will be printed out to sys.stderr.
- If a File-like object, then errors will be printed to that file.
The write_errors and return_errors arguments can be set
independently.
* 'filename_for_errors' (string or None)
Provides a filename to be used when writting error messages.
* 'allow_xxx', 'warn_xxx', and 'forbid_xxx' (Booleans)
These arguments allow for fine-adjustments to be made to the
'strict' argument, by allowing or forbidding specific
syntaxes.
There are many of these arguments, named by replacing the
"xxx" with any number of possible behavior names (See the JSON
class for more details).
Each of these will allow (or forbid) the specific behavior,
after the evaluation of the 'strict' argument. For example,
if strict=True then by also passing 'allow_comments=True' then
comments will be allowed. If strict=False then
forbid_comments=True will allow everything except comments.
Unicode decoding:
-----------------
The input string can be either a python string or a python unicode
string (or a byte array in Python 3). If it is already a unicode
string, then it is assumed that no character set decoding is
required.
However, if you pass in a non-Unicode text string (a Python 2
'str' type or a Python 3 'bytes' or 'bytearray') then an attempt
will be made to auto-detect and decode the character encoding.
This will be successful if the input was encoded in any of UTF-8,
UTF-16 (BE or LE), or UTF-32 (BE or LE), and of course plain ASCII
works too.
Note though that if you know the character encoding, then you
should convert to a unicode string yourself, or pass it the name
of the 'encoding' to avoid the guessing made by the auto
detection, as with
python_object = demjson.decode( input_bytes, encoding='utf8' )
Callback hooks:
---------------
You may supply callback hooks by using the hook name as the
named argument, such as:
decode_float=decimal.Decimal
See the hooks documentation on the JSON.set_hook() method.
|
Decodes a JSON-encoded string into a Python object.
| 5,603 | 5,760 |
def decode(txt, encoding=None, **kwargs):
"""Decodes a JSON-encoded string into a Python object.
== Optional arguments ==
* 'encoding' (string, default None)
This argument provides a hint regarding the character encoding
that the input text is assumed to be in (if it is not already a
unicode string type).
If set to None then autodetection of the encoding is attempted
(see discussion above). Otherwise this argument should be the
name of a registered codec (see the standard 'codecs' module).
* 'strict' (Boolean, default False)
If 'strict' is set to True, then those strings that are not
entirely strictly conforming to JSON will result in a
JSONDecodeError exception.
* 'return_errors' (Boolean, default False)
Controls the return value from this function. If False, then
only the Python equivalent object is returned on success, or
an error will be raised as an exception.
If True then a 2-tuple is returned: (object, error_list). The
error_list will be an empty list [] if the decoding was
successful, otherwise it will be a list of all the errors
encountered. Note that it is possible for an object to be
returned even if errors were encountered.
* 'return_stats' (Boolean, default False)
Controls whether statistics about the decoded JSON document
are returns (and instance of decode_statistics).
If True, then the stats object will be added to the end of the
tuple returned. If return_errors is also set then a 3-tuple
is returned, otherwise a 2-tuple is returned.
* 'write_errors' (Boolean OR File-like object, default False)
Controls what to do with errors.
- If False, then the first decoding error is raised as an exception.
- If True, then errors will be printed out to sys.stderr.
- If a File-like object, then errors will be printed to that file.
The write_errors and return_errors arguments can be set
independently.
* 'filename_for_errors' (string or None)
Provides a filename to be used when writting error messages.
* 'allow_xxx', 'warn_xxx', and 'forbid_xxx' (Booleans)
These arguments allow for fine-adjustments to be made to the
'strict' argument, by allowing or forbidding specific
syntaxes.
There are many of these arguments, named by replacing the
"xxx" with any number of possible behavior names (See the JSON
class for more details).
Each of these will allow (or forbid) the specific behavior,
after the evaluation of the 'strict' argument. For example,
if strict=True then by also passing 'allow_comments=True' then
comments will be allowed. If strict=False then
forbid_comments=True will allow everything except comments.
Unicode decoding:
-----------------
The input string can be either a python string or a python unicode
string (or a byte array in Python 3). If it is already a unicode
string, then it is assumed that no character set decoding is
required.
However, if you pass in a non-Unicode text string (a Python 2
'str' type or a Python 3 'bytes' or 'bytearray') then an attempt
will be made to auto-detect and decode the character encoding.
This will be successful if the input was encoded in any of UTF-8,
UTF-16 (BE or LE), or UTF-32 (BE or LE), and of course plain ASCII
works too.
Note though that if you know the character encoding, then you
should convert to a unicode string yourself, or pass it the name
of the 'encoding' to avoid the guessing made by the auto
detection, as with
python_object = demjson.decode( input_bytes, encoding='utf8' )
Callback hooks:
---------------
You may supply callback hooks by using the hook name as the
named argument, such as:
decode_float=decimal.Decimal
See the hooks documentation on the JSON.set_hook() method.
"""
import sys
# Initialize the JSON object
return_errors = False
return_stats = False
write_errors = False
filename_for_errors = None
write_stats = False
kwargs = kwargs.copy()
todel = []
for kw, val in list(kwargs.items()):
if kw == "return_errors":
return_errors = bool(val)
todel.append(kw)
elif kw == 'return_stats':
return_stats = bool(val)
todel.append(kw)
elif kw == "write_errors":
write_errors = val
todel.append(kw)
elif kw == "filename_for_errors":
filename_for_errors = val
todel.append(kw)
elif kw == "write_stats":
write_stats = val
todel.append(kw)
# next keyword argument
for kw in todel:
del kwargs[kw]
j = JSON(**kwargs)
# Now do the actual JSON decoding
result = j.decode(txt,
encoding=encoding,
return_errors=(return_errors or write_errors),
return_stats=(return_stats or write_stats))
if write_errors:
import sys
if write_errors is True:
write_errors = sys.stderr
for err in result.errors:
write_errors.write(err.pretty_description(filename=filename_for_errors) + "\n")
if write_stats:
import sys
if write_stats is True:
write_stats = sys.stderr
if result.stats:
write_stats.write("%s----- Begin JSON statistics\n" % filename_for_errors)
write_stats.write(result.stats.pretty_description(prefix=" | "))
write_stats.write("%s----- End of JSON statistics\n" % filename_for_errors)
return result
|
https://github.com/akfamily/akshare/blob/087025d8d6f799b30ca114013e82c1ad22dc9294/project25/akshare/utils/demjson.py#L5603-L5760
| 25 |
[
0,
1,
2,
3,
4,
5,
6,
7,
8,
9,
10,
11,
12,
13,
14,
15,
16,
17,
18,
19,
20,
21,
22,
23,
24,
25,
26,
27,
28,
29,
30,
31,
32,
33,
34,
35,
36,
37,
38,
39,
40,
41,
42,
43,
44,
45,
46,
47,
48,
49,
50,
51,
52,
53,
54,
55,
56,
57,
58,
59,
60,
61,
62,
63,
64,
65,
66,
67,
68,
69,
70,
71,
72,
73,
74,
75,
76,
77,
78,
79,
80,
81,
82,
83,
84,
85,
86,
87,
88,
89,
90,
91,
92,
93,
94,
95,
96,
97,
98,
99,
100,
101,
102
] | 65.189873 |
[
103,
105,
106,
107,
108,
109,
111,
113,
114,
115,
116,
117,
118,
119,
120,
121,
122,
123,
124,
125,
126,
127,
128,
129,
131,
132,
134,
137,
142,
143,
144,
145,
146,
147,
149,
150,
151,
152,
153,
154,
155,
156,
157
] | 27.21519 | false | 14.825334 | 158 | 16 | 72.78481 | 100 |
def decode(txt, encoding=None, **kwargs):
import sys
# Initialize the JSON object
return_errors = False
return_stats = False
write_errors = False
filename_for_errors = None
write_stats = False
kwargs = kwargs.copy()
todel = []
for kw, val in list(kwargs.items()):
if kw == "return_errors":
return_errors = bool(val)
todel.append(kw)
elif kw == 'return_stats':
return_stats = bool(val)
todel.append(kw)
elif kw == "write_errors":
write_errors = val
todel.append(kw)
elif kw == "filename_for_errors":
filename_for_errors = val
todel.append(kw)
elif kw == "write_stats":
write_stats = val
todel.append(kw)
# next keyword argument
for kw in todel:
del kwargs[kw]
j = JSON(**kwargs)
# Now do the actual JSON decoding
result = j.decode(txt,
encoding=encoding,
return_errors=(return_errors or write_errors),
return_stats=(return_stats or write_stats))
if write_errors:
import sys
if write_errors is True:
write_errors = sys.stderr
for err in result.errors:
write_errors.write(err.pretty_description(filename=filename_for_errors) + "\n")
if write_stats:
import sys
if write_stats is True:
write_stats = sys.stderr
if result.stats:
write_stats.write("%s----- Begin JSON statistics\n" % filename_for_errors)
write_stats.write(result.stats.pretty_description(prefix=" | "))
write_stats.write("%s----- End of JSON statistics\n" % filename_for_errors)
return result
| 18,367 |
akfamily/akshare
|
087025d8d6f799b30ca114013e82c1ad22dc9294
|
akshare/utils/demjson.py
|
encode_to_file
|
(filename, obj, encoding='utf-8', overwrite=False, **kwargs)
|
Encodes a Python object into JSON and writes into the given file.
If no encoding is given, then UTF-8 will be used.
See the encode() function for a description of other possible options.
If the file already exists and the 'overwrite' option is not set
to True, then the existing file will not be overwritten. (Note,
there is a subtle race condition in the check so there are
possible conditions in which a file may be overwritten)
|
Encodes a Python object into JSON and writes into the given file.
| 5,763 | 5,796 |
def encode_to_file(filename, obj, encoding='utf-8', overwrite=False, **kwargs):
"""Encodes a Python object into JSON and writes into the given file.
If no encoding is given, then UTF-8 will be used.
See the encode() function for a description of other possible options.
If the file already exists and the 'overwrite' option is not set
to True, then the existing file will not be overwritten. (Note,
there is a subtle race condition in the check so there are
possible conditions in which a file may be overwritten)
"""
import os, errno
if not encoding:
encoding = 'utf-8'
if not isinstance(filename, str) or not filename:
raise TypeError("Expected a file name")
if not overwrite and os.path.exists(filename):
raise IOError(errno.EEXIST, "File exists: %r" % filename)
jsondata = encode(obj, encoding=encoding, **kwargs)
try:
fp = open(filename, 'wb')
except Exception:
raise
else:
try:
fp.write(jsondata)
finally:
fp.close()
|
https://github.com/akfamily/akshare/blob/087025d8d6f799b30ca114013e82c1ad22dc9294/project25/akshare/utils/demjson.py#L5763-L5796
| 25 |
[
0,
1,
2,
3,
4,
5,
6,
7,
8,
9,
10,
11,
12
] | 38.235294 |
[
13,
14,
15,
17,
18,
20,
21,
23,
25,
26,
27,
28,
30,
31,
33
] | 44.117647 | false | 14.825334 | 34 | 7 | 55.882353 | 10 |
def encode_to_file(filename, obj, encoding='utf-8', overwrite=False, **kwargs):
import os, errno
if not encoding:
encoding = 'utf-8'
if not isinstance(filename, str) or not filename:
raise TypeError("Expected a file name")
if not overwrite and os.path.exists(filename):
raise IOError(errno.EEXIST, "File exists: %r" % filename)
jsondata = encode(obj, encoding=encoding, **kwargs)
try:
fp = open(filename, 'wb')
except Exception:
raise
else:
try:
fp.write(jsondata)
finally:
fp.close()
| 18,368 |
|
akfamily/akshare
|
087025d8d6f799b30ca114013e82c1ad22dc9294
|
akshare/utils/demjson.py
|
decode_file
|
(filename, encoding=None, **kwargs)
|
return decode(jsondata, encoding=encoding, **kwargs)
|
Decodes JSON found in the given file.
See the decode() function for a description of other possible options.
|
Decodes JSON found in the given file.
| 5,799 | 5,817 |
def decode_file(filename, encoding=None, **kwargs):
"""Decodes JSON found in the given file.
See the decode() function for a description of other possible options.
"""
if isinstance(filename, str):
try:
fp = open(filename, 'rb')
except Exception:
raise
else:
try:
jsondata = fp.read()
finally:
fp.close()
else:
raise TypeError("Expected a file name")
return decode(jsondata, encoding=encoding, **kwargs)
|
https://github.com/akfamily/akshare/blob/087025d8d6f799b30ca114013e82c1ad22dc9294/project25/akshare/utils/demjson.py#L5799-L5817
| 25 |
[
0,
1,
2,
3,
4,
5
] | 31.578947 |
[
6,
7,
8,
9,
10,
12,
13,
15,
17,
18
] | 52.631579 | false | 14.825334 | 19 | 3 | 47.368421 | 3 |
def decode_file(filename, encoding=None, **kwargs):
if isinstance(filename, str):
try:
fp = open(filename, 'rb')
except Exception:
raise
else:
try:
jsondata = fp.read()
finally:
fp.close()
else:
raise TypeError("Expected a file name")
return decode(jsondata, encoding=encoding, **kwargs)
| 18,369 |
akfamily/akshare
|
087025d8d6f799b30ca114013e82c1ad22dc9294
|
akshare/utils/demjson.py
|
_dummy_context_manager.__exit__
|
(self, exc_type, exc_val, exc_tb)
|
return False
| 52 | 53 |
def __exit__(self, exc_type, exc_val, exc_tb):
return False
|
https://github.com/akfamily/akshare/blob/087025d8d6f799b30ca114013e82c1ad22dc9294/project25/akshare/utils/demjson.py#L52-L53
| 25 |
[
0,
1
] | 100 |
[] | 0 | true | 14.825334 | 2 | 1 | 100 | 0 |
def __exit__(self, exc_type, exc_val, exc_tb):
return False
| 18,370 |
||
akfamily/akshare
|
087025d8d6f799b30ca114013e82c1ad22dc9294
|
akshare/utils/demjson.py
|
_undefined_class.__repr__
|
(self)
|
return self.__module__ + '.undefined'
| 237 | 238 |
def __repr__(self):
return self.__module__ + '.undefined'
|
https://github.com/akfamily/akshare/blob/087025d8d6f799b30ca114013e82c1ad22dc9294/project25/akshare/utils/demjson.py#L237-L238
| 25 |
[
0
] | 50 |
[
1
] | 50 | false | 14.825334 | 2 | 1 | 50 | 0 |
def __repr__(self):
return self.__module__ + '.undefined'
| 18,371 |
||
akfamily/akshare
|
087025d8d6f799b30ca114013e82c1ad22dc9294
|
akshare/utils/demjson.py
|
_undefined_class.__str__
|
(self)
|
return 'undefined'
| 240 | 241 |
def __str__(self):
return 'undefined'
|
https://github.com/akfamily/akshare/blob/087025d8d6f799b30ca114013e82c1ad22dc9294/project25/akshare/utils/demjson.py#L240-L241
| 25 |
[
0
] | 50 |
[
1
] | 50 | false | 14.825334 | 2 | 1 | 50 | 0 |
def __str__(self):
return 'undefined'
| 18,372 |
||
akfamily/akshare
|
087025d8d6f799b30ca114013e82c1ad22dc9294
|
akshare/utils/demjson.py
|
_undefined_class.__bool__
|
(self)
|
return False
| 243 | 244 |
def __bool__(self):
return False
|
https://github.com/akfamily/akshare/blob/087025d8d6f799b30ca114013e82c1ad22dc9294/project25/akshare/utils/demjson.py#L243-L244
| 25 |
[
0
] | 50 |
[
1
] | 50 | false | 14.825334 | 2 | 1 | 50 | 0 |
def __bool__(self):
return False
| 18,373 |
||
akfamily/akshare
|
087025d8d6f799b30ca114013e82c1ad22dc9294
|
akshare/utils/demjson.py
|
json_int.__new__
|
(cls, *args, **kwargs)
|
return obj
| 647 | 659 |
def __new__(cls, *args, **kwargs):
if 'number_format' in kwargs:
number_format = kwargs['number_format']
del kwargs['number_format']
if number_format not in (
NUMBER_FORMAT_DECIMAL, NUMBER_FORMAT_HEX, NUMBER_FORMAT_OCTAL, NUMBER_FORMAT_LEGACYOCTAL,
NUMBER_FORMAT_BINARY):
raise TypeError("json_int(): Invalid value for number_format argument")
else:
number_format = NUMBER_FORMAT_DECIMAL
obj = super(json_int, cls).__new__(cls, *args, **kwargs)
obj._jsonfmt = number_format
return obj
|
https://github.com/akfamily/akshare/blob/087025d8d6f799b30ca114013e82c1ad22dc9294/project25/akshare/utils/demjson.py#L647-L659
| 25 |
[
0
] | 7.692308 |
[
1,
2,
3,
4,
7,
9,
10,
11,
12
] | 69.230769 | false | 14.825334 | 13 | 3 | 30.769231 | 0 |
def __new__(cls, *args, **kwargs):
if 'number_format' in kwargs:
number_format = kwargs['number_format']
del kwargs['number_format']
if number_format not in (
NUMBER_FORMAT_DECIMAL, NUMBER_FORMAT_HEX, NUMBER_FORMAT_OCTAL, NUMBER_FORMAT_LEGACYOCTAL,
NUMBER_FORMAT_BINARY):
raise TypeError("json_int(): Invalid value for number_format argument")
else:
number_format = NUMBER_FORMAT_DECIMAL
obj = super(json_int, cls).__new__(cls, *args, **kwargs)
obj._jsonfmt = number_format
return obj
| 18,374 |
||
akfamily/akshare
|
087025d8d6f799b30ca114013e82c1ad22dc9294
|
akshare/utils/demjson.py
|
json_int.number_format
|
(self)
|
return self._jsonfmt
|
The original radix format of the number
|
The original radix format of the number
| 662 | 664 |
def number_format(self):
"""The original radix format of the number"""
return self._jsonfmt
|
https://github.com/akfamily/akshare/blob/087025d8d6f799b30ca114013e82c1ad22dc9294/project25/akshare/utils/demjson.py#L662-L664
| 25 |
[
0,
1
] | 66.666667 |
[
2
] | 33.333333 | false | 14.825334 | 3 | 1 | 66.666667 | 1 |
def number_format(self):
return self._jsonfmt
| 18,375 |
akfamily/akshare
|
087025d8d6f799b30ca114013e82c1ad22dc9294
|
akshare/utils/demjson.py
|
json_int.json_format
|
(self)
|
Returns the integer value formatted as a JSON literal
|
Returns the integer value formatted as a JSON literal
| 666 | 683 |
def json_format(self):
"""Returns the integer value formatted as a JSON literal"""
fmt = self._jsonfmt
if fmt == NUMBER_FORMAT_HEX:
return format(self, '#x')
elif fmt == NUMBER_FORMAT_OCTAL:
return format(self, '#o')
elif fmt == NUMBER_FORMAT_BINARY:
return format(self, '#b')
elif fmt == NUMBER_FORMAT_LEGACYOCTAL:
if self == 0:
return '0' # For some reason Python's int doesn't do '00'
elif self < 0:
return '-0%o' % (-self)
else:
return '0%o' % self
else:
return str(self)
|
https://github.com/akfamily/akshare/blob/087025d8d6f799b30ca114013e82c1ad22dc9294/project25/akshare/utils/demjson.py#L666-L683
| 25 |
[
0,
1
] | 11.111111 |
[
2,
3,
4,
5,
6,
7,
8,
9,
10,
11,
12,
13,
15,
17
] | 77.777778 | false | 14.825334 | 18 | 7 | 22.222222 | 1 |
def json_format(self):
fmt = self._jsonfmt
if fmt == NUMBER_FORMAT_HEX:
return format(self, '#x')
elif fmt == NUMBER_FORMAT_OCTAL:
return format(self, '#o')
elif fmt == NUMBER_FORMAT_BINARY:
return format(self, '#b')
elif fmt == NUMBER_FORMAT_LEGACYOCTAL:
if self == 0:
return '0' # For some reason Python's int doesn't do '00'
elif self < 0:
return '-0%o' % (-self)
else:
return '0%o' % self
else:
return str(self)
| 18,376 |
|
akfamily/akshare
|
087025d8d6f799b30ca114013e82c1ad22dc9294
|
akshare/utils/demjson.py
|
utf32.lookup
|
(name)
|
return ci
|
A standard Python codec lookup function for UCS4/UTF32.
If if recognizes an encoding name it returns a CodecInfo
structure which contains the various encode and decoder
functions to use.
|
A standard Python codec lookup function for UCS4/UTF32.
| 765 | 781 |
def lookup(name):
"""A standard Python codec lookup function for UCS4/UTF32.
If if recognizes an encoding name it returns a CodecInfo
structure which contains the various encode and decoder
functions to use.
"""
ci = None
name = name.upper()
if name in ('UCS4BE', 'UCS-4BE', 'UCS-4-BE', 'UTF32BE', 'UTF-32BE', 'UTF-32-BE'):
ci = codecs.CodecInfo(utf32.utf32be_encode, utf32.utf32be_decode, name='utf-32be')
elif name in ('UCS4LE', 'UCS-4LE', 'UCS-4-LE', 'UTF32LE', 'UTF-32LE', 'UTF-32-LE'):
ci = codecs.CodecInfo(utf32.utf32le_encode, utf32.utf32le_decode, name='utf-32le')
elif name in ('UCS4', 'UCS-4', 'UTF32', 'UTF-32'):
ci = codecs.CodecInfo(utf32.encode, utf32.decode, name='utf-32')
return ci
|
https://github.com/akfamily/akshare/blob/087025d8d6f799b30ca114013e82c1ad22dc9294/project25/akshare/utils/demjson.py#L765-L781
| 25 |
[
0,
1,
2,
3,
4,
5,
6,
7
] | 47.058824 |
[
8,
9,
10,
11,
12,
13,
14,
15,
16
] | 52.941176 | false | 14.825334 | 17 | 4 | 47.058824 | 5 |
def lookup(name):
ci = None
name = name.upper()
if name in ('UCS4BE', 'UCS-4BE', 'UCS-4-BE', 'UTF32BE', 'UTF-32BE', 'UTF-32-BE'):
ci = codecs.CodecInfo(utf32.utf32be_encode, utf32.utf32be_decode, name='utf-32be')
elif name in ('UCS4LE', 'UCS-4LE', 'UCS-4-LE', 'UTF32LE', 'UTF-32LE', 'UTF-32-LE'):
ci = codecs.CodecInfo(utf32.utf32le_encode, utf32.utf32le_decode, name='utf-32le')
elif name in ('UCS4', 'UCS-4', 'UTF32', 'UTF-32'):
ci = codecs.CodecInfo(utf32.encode, utf32.decode, name='utf-32')
return ci
| 18,377 |
akfamily/akshare
|
087025d8d6f799b30ca114013e82c1ad22dc9294
|
akshare/utils/demjson.py
|
utf32.encode
|
(obj, errors='strict', endianness=None, include_bom=True)
|
return (tobytes(), num_chars)
|
Encodes a Unicode string into a UTF-32 encoded byte string.
Returns a tuple: (bytearray, num_chars)
The errors argument should be one of 'strict', 'ignore', or 'replace'.
The endianness should be one of:
* 'B', '>', or 'big' -- Big endian
* 'L', '<', or 'little' -- Little endien
* None -- Default, from sys.byteorder
If include_bom is true a Byte-Order Mark will be written to
the beginning of the string, otherwise it will be omitted.
|
Encodes a Unicode string into a UTF-32 encoded byte string.
| 784 | 853 |
def encode(obj, errors='strict', endianness=None, include_bom=True):
"""Encodes a Unicode string into a UTF-32 encoded byte string.
Returns a tuple: (bytearray, num_chars)
The errors argument should be one of 'strict', 'ignore', or 'replace'.
The endianness should be one of:
* 'B', '>', or 'big' -- Big endian
* 'L', '<', or 'little' -- Little endien
* None -- Default, from sys.byteorder
If include_bom is true a Byte-Order Mark will be written to
the beginning of the string, otherwise it will be omitted.
"""
import sys, struct
# Make a container that can store bytes
if _py_major >= 3:
f = bytearray()
write = f.extend
def tobytes():
return bytes(f)
else:
try:
import io as sio
except ImportError:
import io as sio
f = sio.StringIO()
write = f.write
tobytes = f.getvalue
if not endianness:
endianness = sys.byteorder
if endianness.upper()[0] in ('B>'):
big_endian = True
elif endianness.upper()[0] in ('L<'):
big_endian = False
else:
raise ValueError("Invalid endianness %r: expected 'big', 'little', or None" % endianness)
pack = struct.pack
packspec = '>L' if big_endian else '<L'
num_chars = 0
if include_bom:
if big_endian:
write(utf32.BOM_UTF32_BE)
else:
write(utf32.BOM_UTF32_LE)
num_chars += 1
for pos, c in enumerate(obj):
n = ord(c)
if 0xD800 <= n <= 0xDFFF: # surrogate codepoints are prohibited by UTF-32
if errors == 'ignore':
pass
elif errors == 'replace':
n = 0xFFFD
else:
raise UnicodeEncodeError('utf32', obj, pos, pos + 1,
"surrogate code points from U+D800 to U+DFFF are not allowed")
write(pack(packspec, n))
num_chars += 1
return (tobytes(), num_chars)
|
https://github.com/akfamily/akshare/blob/087025d8d6f799b30ca114013e82c1ad22dc9294/project25/akshare/utils/demjson.py#L784-L853
| 25 |
[
0,
1,
2,
3,
4,
5,
6,
7,
8,
9,
10,
11,
12,
13,
14,
15
] | 22.857143 |
[
16,
19,
20,
21,
23,
24,
26,
27,
28,
29,
30,
31,
32,
34,
35,
37,
38,
39,
40,
42,
44,
45,
47,
49,
50,
51,
53,
54,
56,
57,
58,
59,
60,
61,
62,
64,
66,
67,
69
] | 55.714286 | false | 14.825334 | 70 | 13 | 44.285714 | 13 |
def encode(obj, errors='strict', endianness=None, include_bom=True):
import sys, struct
# Make a container that can store bytes
if _py_major >= 3:
f = bytearray()
write = f.extend
def tobytes():
return bytes(f)
else:
try:
import io as sio
except ImportError:
import io as sio
f = sio.StringIO()
write = f.write
tobytes = f.getvalue
if not endianness:
endianness = sys.byteorder
if endianness.upper()[0] in ('B>'):
big_endian = True
elif endianness.upper()[0] in ('L<'):
big_endian = False
else:
raise ValueError("Invalid endianness %r: expected 'big', 'little', or None" % endianness)
pack = struct.pack
packspec = '>L' if big_endian else '<L'
num_chars = 0
if include_bom:
if big_endian:
write(utf32.BOM_UTF32_BE)
else:
write(utf32.BOM_UTF32_LE)
num_chars += 1
for pos, c in enumerate(obj):
n = ord(c)
if 0xD800 <= n <= 0xDFFF: # surrogate codepoints are prohibited by UTF-32
if errors == 'ignore':
pass
elif errors == 'replace':
n = 0xFFFD
else:
raise UnicodeEncodeError('utf32', obj, pos, pos + 1,
"surrogate code points from U+D800 to U+DFFF are not allowed")
write(pack(packspec, n))
num_chars += 1
return (tobytes(), num_chars)
| 18,378 |
akfamily/akshare
|
087025d8d6f799b30ca114013e82c1ad22dc9294
|
akshare/utils/demjson.py
|
utf32.utf32le_encode
|
(obj, errors='strict', include_bom=False)
|
return utf32.encode(obj, errors=errors, endianness='L', include_bom=include_bom)
|
Encodes a Unicode string into a UTF-32LE (little endian) encoded byte string.
|
Encodes a Unicode string into a UTF-32LE (little endian) encoded byte string.
| 856 | 858 |
def utf32le_encode(obj, errors='strict', include_bom=False):
"""Encodes a Unicode string into a UTF-32LE (little endian) encoded byte string."""
return utf32.encode(obj, errors=errors, endianness='L', include_bom=include_bom)
|
https://github.com/akfamily/akshare/blob/087025d8d6f799b30ca114013e82c1ad22dc9294/project25/akshare/utils/demjson.py#L856-L858
| 25 |
[
0,
1
] | 66.666667 |
[
2
] | 33.333333 | false | 14.825334 | 3 | 1 | 66.666667 | 1 |
def utf32le_encode(obj, errors='strict', include_bom=False):
return utf32.encode(obj, errors=errors, endianness='L', include_bom=include_bom)
| 18,379 |
akfamily/akshare
|
087025d8d6f799b30ca114013e82c1ad22dc9294
|
akshare/utils/demjson.py
|
utf32.utf32be_encode
|
(obj, errors='strict', include_bom=False)
|
return utf32.encode(obj, errors=errors, endianness='B', include_bom=include_bom)
|
Encodes a Unicode string into a UTF-32BE (big endian) encoded byte string.
|
Encodes a Unicode string into a UTF-32BE (big endian) encoded byte string.
| 861 | 863 |
def utf32be_encode(obj, errors='strict', include_bom=False):
"""Encodes a Unicode string into a UTF-32BE (big endian) encoded byte string."""
return utf32.encode(obj, errors=errors, endianness='B', include_bom=include_bom)
|
https://github.com/akfamily/akshare/blob/087025d8d6f799b30ca114013e82c1ad22dc9294/project25/akshare/utils/demjson.py#L861-L863
| 25 |
[
0,
1
] | 66.666667 |
[
2
] | 33.333333 | false | 14.825334 | 3 | 1 | 66.666667 | 1 |
def utf32be_encode(obj, errors='strict', include_bom=False):
return utf32.encode(obj, errors=errors, endianness='B', include_bom=include_bom)
| 18,380 |
akfamily/akshare
|
087025d8d6f799b30ca114013e82c1ad22dc9294
|
akshare/utils/demjson.py
|
utf32.decode
|
(obj, errors='strict', endianness=None)
|
return (''.join(chars), num_bytes)
|
Decodes a UTF-32 byte string into a Unicode string.
Returns tuple (bytearray, num_bytes)
The errors argument shold be one of 'strict', 'ignore',
'replace', 'backslashreplace', or 'xmlcharrefreplace'.
The endianness should either be None (for auto-guessing), or a
word that starts with 'B' (big) or 'L' (little).
Will detect a Byte-Order Mark. If a BOM is found and endianness
is also set, then the two must match.
If neither a BOM is found nor endianness is set, then big
endian order is assumed.
|
Decodes a UTF-32 byte string into a Unicode string.
| 866 | 945 |
def decode(obj, errors='strict', endianness=None):
"""Decodes a UTF-32 byte string into a Unicode string.
Returns tuple (bytearray, num_bytes)
The errors argument shold be one of 'strict', 'ignore',
'replace', 'backslashreplace', or 'xmlcharrefreplace'.
The endianness should either be None (for auto-guessing), or a
word that starts with 'B' (big) or 'L' (little).
Will detect a Byte-Order Mark. If a BOM is found and endianness
is also set, then the two must match.
If neither a BOM is found nor endianness is set, then big
endian order is assumed.
"""
import struct, sys
maxunicode = sys.maxunicode
unpack = struct.unpack
# Detect BOM
if obj.startswith(utf32.BOM_UTF32_BE):
bom_endianness = 'B'
start = len(utf32.BOM_UTF32_BE)
elif obj.startswith(utf32.BOM_UTF32_LE):
bom_endianness = 'L'
start = len(utf32.BOM_UTF32_LE)
else:
bom_endianness = None
start = 0
num_bytes = start
if endianness == None:
if bom_endianness == None:
endianness = sys.byteorder.upper()[0] # Assume platform default
else:
endianness = bom_endianness
else:
endianness = endianness[0].upper()
if bom_endianness and endianness != bom_endianness:
raise UnicodeDecodeError('utf32', obj, 0, start, 'BOM does not match expected byte order')
# Check for truncated last character
if ((len(obj) - start) % 4) != 0:
raise UnicodeDecodeError('utf32', obj, start, len(obj),
'Data length not a multiple of 4 bytes')
# Start decoding characters
chars = []
packspec = '>L' if endianness == 'B' else '<L'
i = 0
for i in range(start, len(obj), 4):
seq = obj[i:i + 4]
n = unpack(packspec, seq)[0]
num_bytes += 4
if n > maxunicode or (0xD800 <= n <= 0xDFFF):
if errors == 'strict':
raise UnicodeDecodeError('utf32', obj, i, i + 4, 'Invalid code point U+%04X' % n)
elif errors == 'replace':
chars.append(chr(0xFFFD))
elif errors == 'backslashreplace':
if n > 0xffff:
esc = "\\u%04x" % (n,)
else:
esc = "\\U%08x" % (n,)
for esc_c in esc:
chars.append(esc_c)
elif errors == 'xmlcharrefreplace':
esc = "&#%d;" % (n,)
for esc_c in esc:
chars.append(esc_c)
else: # ignore
pass
else:
chars.append(helpers.safe_unichr(n))
return (''.join(chars), num_bytes)
|
https://github.com/akfamily/akshare/blob/087025d8d6f799b30ca114013e82c1ad22dc9294/project25/akshare/utils/demjson.py#L866-L945
| 25 |
[
0,
1,
2,
3,
4,
5,
6,
7,
8,
9,
10,
11,
12,
13,
14,
15,
16,
17
] | 22.5 |
[
18,
19,
20,
23,
24,
25,
26,
27,
28,
30,
31,
33,
35,
36,
37,
39,
41,
42,
43,
46,
47,
51,
52,
53,
54,
55,
56,
57,
59,
60,
61,
62,
63,
64,
65,
66,
68,
69,
70,
71,
72,
73,
74,
76,
78,
79
] | 57.5 | false | 14.825334 | 80 | 18 | 42.5 | 15 |
def decode(obj, errors='strict', endianness=None):
import struct, sys
maxunicode = sys.maxunicode
unpack = struct.unpack
# Detect BOM
if obj.startswith(utf32.BOM_UTF32_BE):
bom_endianness = 'B'
start = len(utf32.BOM_UTF32_BE)
elif obj.startswith(utf32.BOM_UTF32_LE):
bom_endianness = 'L'
start = len(utf32.BOM_UTF32_LE)
else:
bom_endianness = None
start = 0
num_bytes = start
if endianness == None:
if bom_endianness == None:
endianness = sys.byteorder.upper()[0] # Assume platform default
else:
endianness = bom_endianness
else:
endianness = endianness[0].upper()
if bom_endianness and endianness != bom_endianness:
raise UnicodeDecodeError('utf32', obj, 0, start, 'BOM does not match expected byte order')
# Check for truncated last character
if ((len(obj) - start) % 4) != 0:
raise UnicodeDecodeError('utf32', obj, start, len(obj),
'Data length not a multiple of 4 bytes')
# Start decoding characters
chars = []
packspec = '>L' if endianness == 'B' else '<L'
i = 0
for i in range(start, len(obj), 4):
seq = obj[i:i + 4]
n = unpack(packspec, seq)[0]
num_bytes += 4
if n > maxunicode or (0xD800 <= n <= 0xDFFF):
if errors == 'strict':
raise UnicodeDecodeError('utf32', obj, i, i + 4, 'Invalid code point U+%04X' % n)
elif errors == 'replace':
chars.append(chr(0xFFFD))
elif errors == 'backslashreplace':
if n > 0xffff:
esc = "\\u%04x" % (n,)
else:
esc = "\\U%08x" % (n,)
for esc_c in esc:
chars.append(esc_c)
elif errors == 'xmlcharrefreplace':
esc = "&#%d;" % (n,)
for esc_c in esc:
chars.append(esc_c)
else: # ignore
pass
else:
chars.append(helpers.safe_unichr(n))
return (''.join(chars), num_bytes)
| 18,381 |
akfamily/akshare
|
087025d8d6f799b30ca114013e82c1ad22dc9294
|
akshare/utils/demjson.py
|
utf32.utf32le_decode
|
(obj, errors='strict')
|
return utf32.decode(obj, errors=errors, endianness='L')
|
Decodes a UTF-32LE (little endian) byte string into a Unicode string.
|
Decodes a UTF-32LE (little endian) byte string into a Unicode string.
| 948 | 950 |
def utf32le_decode(obj, errors='strict'):
"""Decodes a UTF-32LE (little endian) byte string into a Unicode string."""
return utf32.decode(obj, errors=errors, endianness='L')
|
https://github.com/akfamily/akshare/blob/087025d8d6f799b30ca114013e82c1ad22dc9294/project25/akshare/utils/demjson.py#L948-L950
| 25 |
[
0,
1
] | 66.666667 |
[
2
] | 33.333333 | false | 14.825334 | 3 | 1 | 66.666667 | 1 |
def utf32le_decode(obj, errors='strict'):
return utf32.decode(obj, errors=errors, endianness='L')
| 18,382 |
akfamily/akshare
|
087025d8d6f799b30ca114013e82c1ad22dc9294
|
akshare/utils/demjson.py
|
utf32.utf32be_decode
|
(obj, errors='strict')
|
return utf32.decode(obj, errors=errors, endianness='B')
|
Decodes a UTF-32BE (big endian) byte string into a Unicode string.
|
Decodes a UTF-32BE (big endian) byte string into a Unicode string.
| 953 | 955 |
def utf32be_decode(obj, errors='strict'):
"""Decodes a UTF-32BE (big endian) byte string into a Unicode string."""
return utf32.decode(obj, errors=errors, endianness='B')
|
https://github.com/akfamily/akshare/blob/087025d8d6f799b30ca114013e82c1ad22dc9294/project25/akshare/utils/demjson.py#L953-L955
| 25 |
[
0,
1
] | 66.666667 |
[
2
] | 33.333333 | false | 14.825334 | 3 | 1 | 66.666667 | 1 |
def utf32be_decode(obj, errors='strict'):
return utf32.decode(obj, errors=errors, endianness='B')
| 18,383 |
akfamily/akshare
|
087025d8d6f799b30ca114013e82c1ad22dc9294
|
akshare/utils/demjson.py
|
helpers.make_raw_bytes
|
(byte_list)
|
return _make_raw_bytes(byte_list)
|
Constructs a byte array (bytes in Python 3, str in Python 2) from a list of byte values (0-255).
|
Constructs a byte array (bytes in Python 3, str in Python 2) from a list of byte values (0-255).
| 1,002 | 1,006 |
def make_raw_bytes(byte_list):
"""Constructs a byte array (bytes in Python 3, str in Python 2) from a list of byte values (0-255).
"""
return _make_raw_bytes(byte_list)
|
https://github.com/akfamily/akshare/blob/087025d8d6f799b30ca114013e82c1ad22dc9294/project25/akshare/utils/demjson.py#L1002-L1006
| 25 |
[
0,
1,
2,
3
] | 80 |
[
4
] | 20 | false | 14.825334 | 5 | 1 | 80 | 1 |
def make_raw_bytes(byte_list):
return _make_raw_bytes(byte_list)
| 18,384 |
akfamily/akshare
|
087025d8d6f799b30ca114013e82c1ad22dc9294
|
akshare/utils/demjson.py
|
helpers.is_hex_digit
|
(c)
|
return (c in helpers.hexdigits)
|
Determines if the given character is a valid hexadecimal digit (0-9, a-f, A-F).
|
Determines if the given character is a valid hexadecimal digit (0-9, a-f, A-F).
| 1,009 | 1,011 |
def is_hex_digit(c):
"""Determines if the given character is a valid hexadecimal digit (0-9, a-f, A-F)."""
return (c in helpers.hexdigits)
|
https://github.com/akfamily/akshare/blob/087025d8d6f799b30ca114013e82c1ad22dc9294/project25/akshare/utils/demjson.py#L1009-L1011
| 25 |
[
0,
1
] | 66.666667 |
[
2
] | 33.333333 | false | 14.825334 | 3 | 1 | 66.666667 | 1 |
def is_hex_digit(c):
return (c in helpers.hexdigits)
| 18,385 |
akfamily/akshare
|
087025d8d6f799b30ca114013e82c1ad22dc9294
|
akshare/utils/demjson.py
|
helpers.is_octal_digit
|
(c)
|
return (c in helpers.octaldigits)
|
Determines if the given character is a valid octal digit (0-7).
|
Determines if the given character is a valid octal digit (0-7).
| 1,014 | 1,016 |
def is_octal_digit(c):
"""Determines if the given character is a valid octal digit (0-7)."""
return (c in helpers.octaldigits)
|
https://github.com/akfamily/akshare/blob/087025d8d6f799b30ca114013e82c1ad22dc9294/project25/akshare/utils/demjson.py#L1014-L1016
| 25 |
[
0,
1
] | 66.666667 |
[
2
] | 33.333333 | false | 14.825334 | 3 | 1 | 66.666667 | 1 |
def is_octal_digit(c):
return (c in helpers.octaldigits)
| 18,386 |
akfamily/akshare
|
087025d8d6f799b30ca114013e82c1ad22dc9294
|
akshare/utils/demjson.py
|
helpers.is_binary_digit
|
(c)
|
return (c == '0' or c == '1')
|
Determines if the given character is a valid binary digit (0 or 1).
|
Determines if the given character is a valid binary digit (0 or 1).
| 1,019 | 1,021 |
def is_binary_digit(c):
"""Determines if the given character is a valid binary digit (0 or 1)."""
return (c == '0' or c == '1')
|
https://github.com/akfamily/akshare/blob/087025d8d6f799b30ca114013e82c1ad22dc9294/project25/akshare/utils/demjson.py#L1019-L1021
| 25 |
[
0,
1
] | 66.666667 |
[
2
] | 33.333333 | false | 14.825334 | 3 | 2 | 66.666667 | 1 |
def is_binary_digit(c):
return (c == '0' or c == '1')
| 18,387 |
akfamily/akshare
|
087025d8d6f799b30ca114013e82c1ad22dc9294
|
akshare/utils/demjson.py
|
helpers.char_is_json_ws
|
(c)
|
return c in ' \t\n\r'
|
Determines if the given character is a JSON white-space character
|
Determines if the given character is a JSON white-space character
| 1,024 | 1,026 |
def char_is_json_ws(c):
"""Determines if the given character is a JSON white-space character"""
return c in ' \t\n\r'
|
https://github.com/akfamily/akshare/blob/087025d8d6f799b30ca114013e82c1ad22dc9294/project25/akshare/utils/demjson.py#L1024-L1026
| 25 |
[
0,
1
] | 66.666667 |
[
2
] | 33.333333 | false | 14.825334 | 3 | 1 | 66.666667 | 1 |
def char_is_json_ws(c):
return c in ' \t\n\r'
| 18,388 |
akfamily/akshare
|
087025d8d6f799b30ca114013e82c1ad22dc9294
|
akshare/utils/demjson.py
|
helpers.safe_unichr
|
(codepoint)
|
return c
|
Just like Python's unichr() but works in narrow-Unicode Pythons.
|
Just like Python's unichr() but works in narrow-Unicode Pythons.
| 1,029 | 1,040 |
def safe_unichr(codepoint):
"""Just like Python's unichr() but works in narrow-Unicode Pythons."""
if codepoint >= 0x10000 and codepoint > helpers.maxunicode:
# Narrow-Unicode python, construct a UTF-16 surrogate pair.
w1, w2 = helpers.make_surrogate_pair(codepoint)
if w2 is None:
c = chr(w1)
else:
c = chr(w1) + chr(w2)
else:
c = chr(codepoint)
return c
|
https://github.com/akfamily/akshare/blob/087025d8d6f799b30ca114013e82c1ad22dc9294/project25/akshare/utils/demjson.py#L1029-L1040
| 25 |
[
0,
1
] | 16.666667 |
[
2,
4,
5,
6,
8,
10,
11
] | 58.333333 | false | 14.825334 | 12 | 4 | 41.666667 | 1 |
def safe_unichr(codepoint):
if codepoint >= 0x10000 and codepoint > helpers.maxunicode:
# Narrow-Unicode python, construct a UTF-16 surrogate pair.
w1, w2 = helpers.make_surrogate_pair(codepoint)
if w2 is None:
c = chr(w1)
else:
c = chr(w1) + chr(w2)
else:
c = chr(codepoint)
return c
| 18,389 |
akfamily/akshare
|
087025d8d6f799b30ca114013e82c1ad22dc9294
|
akshare/utils/demjson.py
|
helpers.char_is_unicode_ws
|
(c)
|
return unicodedata.category(c) == 'Zs'
|
Determines if the given character is a Unicode space character
|
Determines if the given character is a Unicode space character
| 1,043 | 1,050 |
def char_is_unicode_ws(c):
"""Determines if the given character is a Unicode space character"""
if not isinstance(c, str):
c = str(c)
if c in ' \t\n\r\f\v':
return True
import unicodedata
return unicodedata.category(c) == 'Zs'
|
https://github.com/akfamily/akshare/blob/087025d8d6f799b30ca114013e82c1ad22dc9294/project25/akshare/utils/demjson.py#L1043-L1050
| 25 |
[
0,
1
] | 25 |
[
2,
3,
4,
5,
6,
7
] | 75 | false | 14.825334 | 8 | 3 | 25 | 1 |
def char_is_unicode_ws(c):
if not isinstance(c, str):
c = str(c)
if c in ' \t\n\r\f\v':
return True
import unicodedata
return unicodedata.category(c) == 'Zs'
| 18,390 |
akfamily/akshare
|
087025d8d6f799b30ca114013e82c1ad22dc9294
|
akshare/utils/demjson.py
|
helpers.char_is_json_eol
|
(c)
|
return c in '\n\r'
|
Determines if the given character is a JSON line separator
|
Determines if the given character is a JSON line separator
| 1,053 | 1,055 |
def char_is_json_eol(c):
"""Determines if the given character is a JSON line separator"""
return c in '\n\r'
|
https://github.com/akfamily/akshare/blob/087025d8d6f799b30ca114013e82c1ad22dc9294/project25/akshare/utils/demjson.py#L1053-L1055
| 25 |
[
0,
1
] | 66.666667 |
[
2
] | 33.333333 | false | 14.825334 | 3 | 1 | 66.666667 | 1 |
def char_is_json_eol(c):
return c in '\n\r'
| 18,391 |
akfamily/akshare
|
087025d8d6f799b30ca114013e82c1ad22dc9294
|
akshare/utils/demjson.py
|
helpers.char_is_unicode_eol
|
(c)
|
return c in '\r\n\u2028\u2029'
|
Determines if the given character is a Unicode line or
paragraph separator. These correspond to CR and LF as well as
Unicode characters in the Zl or Zp categories.
|
Determines if the given character is a Unicode line or
paragraph separator. These correspond to CR and LF as well as
Unicode characters in the Zl or Zp categories.
| 1,058 | 1,064 |
def char_is_unicode_eol(c):
"""Determines if the given character is a Unicode line or
paragraph separator. These correspond to CR and LF as well as
Unicode characters in the Zl or Zp categories.
"""
return c in '\r\n\u2028\u2029'
|
https://github.com/akfamily/akshare/blob/087025d8d6f799b30ca114013e82c1ad22dc9294/project25/akshare/utils/demjson.py#L1058-L1064
| 25 |
[
0,
1,
2,
3,
4,
5
] | 85.714286 |
[
6
] | 14.285714 | false | 14.825334 | 7 | 1 | 85.714286 | 3 |
def char_is_unicode_eol(c):
return c in '\r\n\u2028\u2029'
| 18,392 |
akfamily/akshare
|
087025d8d6f799b30ca114013e82c1ad22dc9294
|
akshare/utils/demjson.py
|
helpers.char_is_identifier_leader
|
(c)
|
return c.isalpha() or c in '_$'
|
Determines if the character may be the first character of a
JavaScript identifier.
|
Determines if the character may be the first character of a
JavaScript identifier.
| 1,067 | 1,071 |
def char_is_identifier_leader(c):
"""Determines if the character may be the first character of a
JavaScript identifier.
"""
return c.isalpha() or c in '_$'
|
https://github.com/akfamily/akshare/blob/087025d8d6f799b30ca114013e82c1ad22dc9294/project25/akshare/utils/demjson.py#L1067-L1071
| 25 |
[
0,
1,
2,
3
] | 80 |
[
4
] | 20 | false | 14.825334 | 5 | 2 | 80 | 2 |
def char_is_identifier_leader(c):
return c.isalpha() or c in '_$'
| 18,393 |
akfamily/akshare
|
087025d8d6f799b30ca114013e82c1ad22dc9294
|
akshare/utils/demjson.py
|
helpers.char_is_identifier_tail
|
(c)
|
return c.isalnum() or c in '_$\u200c\u200d'
|
Determines if the character may be part of a JavaScript
identifier.
|
Determines if the character may be part of a JavaScript
identifier.
| 1,074 | 1,078 |
def char_is_identifier_tail(c):
"""Determines if the character may be part of a JavaScript
identifier.
"""
return c.isalnum() or c in '_$\u200c\u200d'
|
https://github.com/akfamily/akshare/blob/087025d8d6f799b30ca114013e82c1ad22dc9294/project25/akshare/utils/demjson.py#L1074-L1078
| 25 |
[
0,
1,
2,
3
] | 80 |
[
4
] | 20 | false | 14.825334 | 5 | 2 | 80 | 2 |
def char_is_identifier_tail(c):
return c.isalnum() or c in '_$\u200c\u200d'
| 18,394 |
akfamily/akshare
|
087025d8d6f799b30ca114013e82c1ad22dc9294
|
akshare/utils/demjson.py
|
helpers.extend_and_flatten_list_with_sep
|
(orig_seq, extension_seq, separator='')
| 1,081 | 1,085 |
def extend_and_flatten_list_with_sep(orig_seq, extension_seq, separator=''):
for i, part in enumerate(extension_seq):
if i > 0 and separator:
orig_seq.append(separator)
orig_seq.extend(part)
|
https://github.com/akfamily/akshare/blob/087025d8d6f799b30ca114013e82c1ad22dc9294/project25/akshare/utils/demjson.py#L1081-L1085
| 25 |
[
0
] | 20 |
[
1,
2,
3,
4
] | 80 | false | 14.825334 | 5 | 4 | 20 | 0 |
def extend_and_flatten_list_with_sep(orig_seq, extension_seq, separator=''):
for i, part in enumerate(extension_seq):
if i > 0 and separator:
orig_seq.append(separator)
orig_seq.extend(part)
| 18,395 |
|||
akfamily/akshare
|
087025d8d6f799b30ca114013e82c1ad22dc9294
|
akshare/utils/demjson.py
|
helpers.strip_format_control_chars
|
(txt)
|
return txt2
|
Filters out all Unicode format control characters from the string.
ECMAScript permits any Unicode "format control characters" to
appear at any place in the source code. They are to be
ignored as if they are not there before any other lexical
tokenization occurs. Note that JSON does not allow them,
except within string literals.
* Ref. ECMAScript section 7.1.
* http://en.wikipedia.org/wiki/Unicode_control_characters
There are dozens of Format Control Characters, for example:
U+00AD SOFT HYPHEN
U+200B ZERO WIDTH SPACE
U+2060 WORD JOINER
|
Filters out all Unicode format control characters from the string.
| 1,088 | 1,114 |
def strip_format_control_chars(txt):
"""Filters out all Unicode format control characters from the string.
ECMAScript permits any Unicode "format control characters" to
appear at any place in the source code. They are to be
ignored as if they are not there before any other lexical
tokenization occurs. Note that JSON does not allow them,
except within string literals.
* Ref. ECMAScript section 7.1.
* http://en.wikipedia.org/wiki/Unicode_control_characters
There are dozens of Format Control Characters, for example:
U+00AD SOFT HYPHEN
U+200B ZERO WIDTH SPACE
U+2060 WORD JOINER
"""
import unicodedata
txt2 = [c for c in txt if unicodedata.category(str(c)) != 'Cf']
# 2to3 NOTE: The following is needed to work around a broken
# Python3 conversion in which filter() will be transformed
# into a list rather than a string.
if not isinstance(txt2, str):
txt2 = ''.join(txt2)
return txt2
|
https://github.com/akfamily/akshare/blob/087025d8d6f799b30ca114013e82c1ad22dc9294/project25/akshare/utils/demjson.py#L1088-L1114
| 25 |
[
0,
1,
2,
3,
4,
5,
6,
7,
8,
9,
10,
11,
12,
13,
14,
15,
16,
17
] | 66.666667 |
[
18,
19,
24,
25,
26
] | 18.518519 | false | 14.825334 | 27 | 3 | 81.481481 | 15 |
def strip_format_control_chars(txt):
import unicodedata
txt2 = [c for c in txt if unicodedata.category(str(c)) != 'Cf']
# 2to3 NOTE: The following is needed to work around a broken
# Python3 conversion in which filter() will be transformed
# into a list rather than a string.
if not isinstance(txt2, str):
txt2 = ''.join(txt2)
return txt2
| 18,396 |
akfamily/akshare
|
087025d8d6f799b30ca114013e82c1ad22dc9294
|
akshare/utils/demjson.py
|
helpers.lookup_codec
|
(encoding)
|
return cdk
|
Wrapper around codecs.lookup().
Returns None if codec not found, rather than raising a LookupError.
|
Wrapper around codecs.lookup().
| 1,117 | 1,141 |
def lookup_codec(encoding):
"""Wrapper around codecs.lookup().
Returns None if codec not found, rather than raising a LookupError.
"""
import codecs
if isinstance(encoding, codecs.CodecInfo):
return encoding
encoding = encoding.lower()
import codecs
if helpers.always_use_custom_codecs:
# Try custom utf32 first, then standard python codecs
cdk = utf32.lookup(encoding)
if not cdk:
try:
cdk = codecs.lookup(encoding)
except LookupError:
cdk = None
else:
# Try standard python codecs first, then custom utf32
try:
cdk = codecs.lookup(encoding)
except LookupError:
cdk = utf32.lookup(encoding)
return cdk
|
https://github.com/akfamily/akshare/blob/087025d8d6f799b30ca114013e82c1ad22dc9294/project25/akshare/utils/demjson.py#L1117-L1141
| 25 |
[
0,
1,
2,
3,
4
] | 20 |
[
5,
6,
7,
8,
9,
10,
12,
13,
14,
15,
16,
17,
20,
21,
22,
23,
24
] | 68 | false | 14.825334 | 25 | 6 | 32 | 3 |
def lookup_codec(encoding):
import codecs
if isinstance(encoding, codecs.CodecInfo):
return encoding
encoding = encoding.lower()
import codecs
if helpers.always_use_custom_codecs:
# Try custom utf32 first, then standard python codecs
cdk = utf32.lookup(encoding)
if not cdk:
try:
cdk = codecs.lookup(encoding)
except LookupError:
cdk = None
else:
# Try standard python codecs first, then custom utf32
try:
cdk = codecs.lookup(encoding)
except LookupError:
cdk = utf32.lookup(encoding)
return cdk
| 18,397 |
akfamily/akshare
|
087025d8d6f799b30ca114013e82c1ad22dc9294
|
akshare/utils/demjson.py
|
helpers.auto_detect_encoding
|
(s)
|
return encoding
|
Takes a string (or byte array) and tries to determine the Unicode encoding it is in.
Returns the encoding name, as a string.
|
Takes a string (or byte array) and tries to determine the Unicode encoding it is in.
| 1,144 | 1,223 |
def auto_detect_encoding(s):
"""Takes a string (or byte array) and tries to determine the Unicode encoding it is in.
Returns the encoding name, as a string.
"""
if not s or len(s) == 0:
return "utf-8"
# Get the byte values of up to the first 4 bytes
ords = []
for i in range(0, min(len(s), 4)):
x = s[i]
if isinstance(x, str):
x = ord(x)
ords.append(x)
# Look for BOM marker
import sys, codecs
bom2, bom3, bom4 = None, None, None
if len(s) >= 2:
bom2 = s[:2]
if len(s) >= 3:
bom3 = s[:3]
if len(s) >= 4:
bom4 = s[:4]
# Assign values of first four bytes to: a, b, c, d; and last byte to: z
a, b, c, d, z = None, None, None, None, None
if len(s) >= 1:
a = ords[0]
if len(s) >= 2:
b = ords[1]
if len(s) >= 3:
c = ords[2]
if len(s) >= 4:
d = ords[3]
z = s[-1]
if isinstance(z, str):
z = ord(z)
if bom4 and ((hasattr(codecs, 'BOM_UTF32_LE') and bom4 == codecs.BOM_UTF32_LE) or
(bom4 == utf32.BOM_UTF32_LE)):
encoding = 'utf-32le'
s = s[4:]
elif bom4 and ((hasattr(codecs, 'BOM_UTF32_BE') and bom4 == codecs.BOM_UTF32_BE) or
(bom4 == utf32.BOM_UTF32_BE)):
encoding = 'utf-32be'
s = s[4:]
elif bom2 and bom2 == codecs.BOM_UTF16_LE:
encoding = 'utf-16le'
s = s[2:]
elif bom2 and bom2 == codecs.BOM_UTF16_BE:
encoding = 'utf-16be'
s = s[2:]
elif bom3 and bom3 == codecs.BOM_UTF8:
encoding = 'utf-8'
s = s[3:]
# No BOM, so autodetect encoding used by looking at first four
# bytes according to RFC 4627 section 3. The first and last bytes
# in a JSON document will be ASCII. The second byte will be ASCII
# unless the first byte was a quotation mark.
elif len(s) >= 4 and a == 0 and b == 0 and c == 0 and d != 0: # UTF-32BE (0 0 0 x)
encoding = 'utf-32be'
elif len(s) >= 4 and a != 0 and b == 0 and c == 0 and d == 0 and z == 0: # UTF-32LE (x 0 0 0 [... 0])
encoding = 'utf-32le'
elif len(s) >= 2 and a == 0 and b != 0: # UTF-16BE (0 x)
encoding = 'utf-16be'
elif len(s) >= 2 and a != 0 and b == 0 and z == 0: # UTF-16LE (x 0 [... 0])
encoding = 'utf-16le'
elif ord('\t') <= a <= 127:
# First byte appears to be ASCII, so guess UTF-8.
encoding = 'utf8'
else:
raise ValueError("Can not determine the Unicode encoding for byte stream")
return encoding
|
https://github.com/akfamily/akshare/blob/087025d8d6f799b30ca114013e82c1ad22dc9294/project25/akshare/utils/demjson.py#L1144-L1223
| 25 |
[
0,
1,
2,
3,
4,
5
] | 7.5 |
[
6,
7,
10,
11,
12,
13,
14,
15,
18,
19,
20,
21,
22,
23,
24,
25,
28,
29,
30,
31,
32,
33,
34,
35,
36,
38,
39,
40,
42,
44,
45,
46,
48,
49,
50,
51,
52,
53,
54,
55,
56,
57,
58,
65,
66,
67,
68,
69,
70,
71,
72,
73,
75,
77,
79
] | 68.75 | false | 14.825334 | 80 | 46 | 31.25 | 3 |
def auto_detect_encoding(s):
if not s or len(s) == 0:
return "utf-8"
# Get the byte values of up to the first 4 bytes
ords = []
for i in range(0, min(len(s), 4)):
x = s[i]
if isinstance(x, str):
x = ord(x)
ords.append(x)
# Look for BOM marker
import sys, codecs
bom2, bom3, bom4 = None, None, None
if len(s) >= 2:
bom2 = s[:2]
if len(s) >= 3:
bom3 = s[:3]
if len(s) >= 4:
bom4 = s[:4]
# Assign values of first four bytes to: a, b, c, d; and last byte to: z
a, b, c, d, z = None, None, None, None, None
if len(s) >= 1:
a = ords[0]
if len(s) >= 2:
b = ords[1]
if len(s) >= 3:
c = ords[2]
if len(s) >= 4:
d = ords[3]
z = s[-1]
if isinstance(z, str):
z = ord(z)
if bom4 and ((hasattr(codecs, 'BOM_UTF32_LE') and bom4 == codecs.BOM_UTF32_LE) or
(bom4 == utf32.BOM_UTF32_LE)):
encoding = 'utf-32le'
s = s[4:]
elif bom4 and ((hasattr(codecs, 'BOM_UTF32_BE') and bom4 == codecs.BOM_UTF32_BE) or
(bom4 == utf32.BOM_UTF32_BE)):
encoding = 'utf-32be'
s = s[4:]
elif bom2 and bom2 == codecs.BOM_UTF16_LE:
encoding = 'utf-16le'
s = s[2:]
elif bom2 and bom2 == codecs.BOM_UTF16_BE:
encoding = 'utf-16be'
s = s[2:]
elif bom3 and bom3 == codecs.BOM_UTF8:
encoding = 'utf-8'
s = s[3:]
# No BOM, so autodetect encoding used by looking at first four
# bytes according to RFC 4627 section 3. The first and last bytes
# in a JSON document will be ASCII. The second byte will be ASCII
# unless the first byte was a quotation mark.
elif len(s) >= 4 and a == 0 and b == 0 and c == 0 and d != 0: # UTF-32BE (0 0 0 x)
encoding = 'utf-32be'
elif len(s) >= 4 and a != 0 and b == 0 and c == 0 and d == 0 and z == 0: # UTF-32LE (x 0 0 0 [... 0])
encoding = 'utf-32le'
elif len(s) >= 2 and a == 0 and b != 0: # UTF-16BE (0 x)
encoding = 'utf-16be'
elif len(s) >= 2 and a != 0 and b == 0 and z == 0: # UTF-16LE (x 0 [... 0])
encoding = 'utf-16le'
elif ord('\t') <= a <= 127:
# First byte appears to be ASCII, so guess UTF-8.
encoding = 'utf8'
else:
raise ValueError("Can not determine the Unicode encoding for byte stream")
return encoding
| 18,398 |
akfamily/akshare
|
087025d8d6f799b30ca114013e82c1ad22dc9294
|
akshare/utils/demjson.py
|
helpers.unicode_decode
|
(txt, encoding=None)
|
return res
|
Takes a string (or byte array) and tries to convert it to a Unicode string.
Returns a named tuple: (string, codec, bom)
The 'encoding' argument, if supplied, should either the name of
a character encoding, or an instance of codecs.CodecInfo. If
the encoding argument is None or "auto" then the encoding is
automatically determined, if possible.
Any BOM (Byte Order Mark) that is found at the beginning of the
input will be stripped off and placed in the 'bom' portion of
the returned value.
|
Takes a string (or byte array) and tries to convert it to a Unicode string.
| 1,226 | 1,271 |
def unicode_decode(txt, encoding=None):
"""Takes a string (or byte array) and tries to convert it to a Unicode string.
Returns a named tuple: (string, codec, bom)
The 'encoding' argument, if supplied, should either the name of
a character encoding, or an instance of codecs.CodecInfo. If
the encoding argument is None or "auto" then the encoding is
automatically determined, if possible.
Any BOM (Byte Order Mark) that is found at the beginning of the
input will be stripped off and placed in the 'bom' portion of
the returned value.
"""
if isinstance(txt, str):
res = _namedtuple('DecodedString', ['string', 'codec', 'bom'])(txt, None, None)
else:
if encoding is None or encoding == 'auto':
encoding = helpers.auto_detect_encoding(txt)
cdk = helpers.lookup_codec(encoding)
if not cdk:
raise LookupError("Can not find codec for encoding %r" % encoding)
try:
# Determine if codec takes arguments; try a decode of nothing
cdk.decode(helpers.make_raw_bytes([]), errors='strict')
except TypeError:
cdk_kw = {} # This coded doesn't like the errors argument
else:
cdk_kw = {'errors': 'strict'}
unitxt, numbytes = cdk.decode(txt, **cdk_kw) # DO THE DECODE HERE!
# Remove BOM if present
if len(unitxt) > 0 and unitxt[0] == '\uFEFF':
bom = cdk.encode(unitxt[0])[0]
unitxt = unitxt[1:]
elif len(unitxt) > 0 and unitxt[0] == '\uFFFE': # Reversed BOM
raise UnicodeDecodeError(cdk.name, txt, 0, 0, "Wrong byte order, found reversed BOM U+FFFE")
else:
bom = None
res = _namedtuple('DecodedString', ['string', 'codec', 'bom'])(unitxt, cdk, bom)
return res
|
https://github.com/akfamily/akshare/blob/087025d8d6f799b30ca114013e82c1ad22dc9294/project25/akshare/utils/demjson.py#L1226-L1271
| 25 |
[
0,
1,
2,
3,
4,
5,
6,
7,
8,
9,
10,
11,
12,
13,
14
] | 32.608696 |
[
15,
16,
18,
19,
21,
22,
23,
25,
27,
28,
29,
31,
33,
36,
37,
38,
39,
40,
42,
44,
45
] | 45.652174 | false | 14.825334 | 46 | 10 | 54.347826 | 12 |
def unicode_decode(txt, encoding=None):
if isinstance(txt, str):
res = _namedtuple('DecodedString', ['string', 'codec', 'bom'])(txt, None, None)
else:
if encoding is None or encoding == 'auto':
encoding = helpers.auto_detect_encoding(txt)
cdk = helpers.lookup_codec(encoding)
if not cdk:
raise LookupError("Can not find codec for encoding %r" % encoding)
try:
# Determine if codec takes arguments; try a decode of nothing
cdk.decode(helpers.make_raw_bytes([]), errors='strict')
except TypeError:
cdk_kw = {} # This coded doesn't like the errors argument
else:
cdk_kw = {'errors': 'strict'}
unitxt, numbytes = cdk.decode(txt, **cdk_kw) # DO THE DECODE HERE!
# Remove BOM if present
if len(unitxt) > 0 and unitxt[0] == '\uFEFF':
bom = cdk.encode(unitxt[0])[0]
unitxt = unitxt[1:]
elif len(unitxt) > 0 and unitxt[0] == '\uFFFE': # Reversed BOM
raise UnicodeDecodeError(cdk.name, txt, 0, 0, "Wrong byte order, found reversed BOM U+FFFE")
else:
bom = None
res = _namedtuple('DecodedString', ['string', 'codec', 'bom'])(unitxt, cdk, bom)
return res
| 18,399 |
akfamily/akshare
|
087025d8d6f799b30ca114013e82c1ad22dc9294
|
akshare/utils/demjson.py
|
helpers.surrogate_pair_as_unicode
|
(c1, c2)
|
return helpers.safe_unichr(v)
|
Takes a pair of unicode surrogates and returns the equivalent unicode character.
The input pair must be a surrogate pair, with c1 in the range
U+D800 to U+DBFF and c2 in the range U+DC00 to U+DFFF.
|
Takes a pair of unicode surrogates and returns the equivalent unicode character.
| 1,274 | 1,288 |
def surrogate_pair_as_unicode(c1, c2):
"""Takes a pair of unicode surrogates and returns the equivalent unicode character.
The input pair must be a surrogate pair, with c1 in the range
U+D800 to U+DBFF and c2 in the range U+DC00 to U+DFFF.
"""
n1, n2 = ord(c1), ord(c2)
if n1 < 0xD800 or n1 > 0xDBFF or n2 < 0xDC00 or n2 > 0xDFFF:
raise JSONDecodeError('illegal Unicode surrogate pair', (c1, c2))
a = n1 - 0xD800
b = n2 - 0xDC00
v = (a << 10) | b
v += 0x10000
return helpers.safe_unichr(v)
|
https://github.com/akfamily/akshare/blob/087025d8d6f799b30ca114013e82c1ad22dc9294/project25/akshare/utils/demjson.py#L1274-L1288
| 25 |
[
0,
1,
2,
3,
4,
5,
6
] | 46.666667 |
[
7,
8,
9,
10,
11,
12,
13,
14
] | 53.333333 | false | 14.825334 | 15 | 5 | 46.666667 | 4 |
def surrogate_pair_as_unicode(c1, c2):
n1, n2 = ord(c1), ord(c2)
if n1 < 0xD800 or n1 > 0xDBFF or n2 < 0xDC00 or n2 > 0xDFFF:
raise JSONDecodeError('illegal Unicode surrogate pair', (c1, c2))
a = n1 - 0xD800
b = n2 - 0xDC00
v = (a << 10) | b
v += 0x10000
return helpers.safe_unichr(v)
| 18,400 |
akfamily/akshare
|
087025d8d6f799b30ca114013e82c1ad22dc9294
|
akshare/utils/demjson.py
|
helpers.unicode_as_surrogate_pair
|
(c)
|
Takes a single unicode character and returns a sequence of surrogate pairs.
The output of this function is a tuple consisting of one or two unicode
characters, such that if the input character is outside the BMP range
then the output is a two-character surrogate pair representing that character.
If the input character is inside the BMP then the output tuple will have
just a single character...the same one.
|
Takes a single unicode character and returns a sequence of surrogate pairs.
| 1,291 | 1,307 |
def unicode_as_surrogate_pair(c):
"""Takes a single unicode character and returns a sequence of surrogate pairs.
The output of this function is a tuple consisting of one or two unicode
characters, such that if the input character is outside the BMP range
then the output is a two-character surrogate pair representing that character.
If the input character is inside the BMP then the output tuple will have
just a single character...the same one.
"""
n = ord(c)
w1, w2 = helpers.make_surrogate_pair(n)
if w2 is None:
return (chr(w1),)
else:
return (chr(w1), chr(w2))
|
https://github.com/akfamily/akshare/blob/087025d8d6f799b30ca114013e82c1ad22dc9294/project25/akshare/utils/demjson.py#L1291-L1307
| 25 |
[
0,
1,
2,
3,
4,
5,
6,
7,
8,
9,
10
] | 64.705882 |
[
11,
12,
13,
14,
16
] | 29.411765 | false | 14.825334 | 17 | 2 | 70.588235 | 8 |
def unicode_as_surrogate_pair(c):
n = ord(c)
w1, w2 = helpers.make_surrogate_pair(n)
if w2 is None:
return (chr(w1),)
else:
return (chr(w1), chr(w2))
| 18,401 |
|
akfamily/akshare
|
087025d8d6f799b30ca114013e82c1ad22dc9294
|
akshare/utils/demjson.py
|
helpers.make_surrogate_pair
|
(codepoint)
|
return (w1, w2)
|
Given a Unicode codepoint (int) returns a 2-tuple of surrogate codepoints.
|
Given a Unicode codepoint (int) returns a 2-tuple of surrogate codepoints.
| 1,310 | 1,319 |
def make_surrogate_pair(codepoint):
"""Given a Unicode codepoint (int) returns a 2-tuple of surrogate codepoints."""
if codepoint < 0x10000:
return (codepoint, None) # in BMP, surrogate pair not required
v = codepoint - 0x10000
vh = (v >> 10) & 0x3ff # highest 10 bits
vl = v & 0x3ff # lowest 10 bits
w1 = 0xD800 | vh
w2 = 0xDC00 | vl
return (w1, w2)
|
https://github.com/akfamily/akshare/blob/087025d8d6f799b30ca114013e82c1ad22dc9294/project25/akshare/utils/demjson.py#L1310-L1319
| 25 |
[
0,
1
] | 20 |
[
2,
3,
4,
5,
6,
7,
8,
9
] | 80 | false | 14.825334 | 10 | 2 | 20 | 1 |
def make_surrogate_pair(codepoint):
if codepoint < 0x10000:
return (codepoint, None) # in BMP, surrogate pair not required
v = codepoint - 0x10000
vh = (v >> 10) & 0x3ff # highest 10 bits
vl = v & 0x3ff # lowest 10 bits
w1 = 0xD800 | vh
w2 = 0xDC00 | vl
return (w1, w2)
| 18,402 |
akfamily/akshare
|
087025d8d6f799b30ca114013e82c1ad22dc9294
|
akshare/utils/demjson.py
|
helpers.isnumbertype
|
(obj)
|
return isinstance(obj, (int, float)) \
and not isinstance(obj, bool) \
or obj is nan or obj is inf or obj is neginf \
or (decimal and isinstance(obj, decimal.Decimal))
|
Is the object of a Python number type (excluding complex)?
|
Is the object of a Python number type (excluding complex)?
| 1,322 | 1,327 |
def isnumbertype(obj):
"""Is the object of a Python number type (excluding complex)?"""
return isinstance(obj, (int, float)) \
and not isinstance(obj, bool) \
or obj is nan or obj is inf or obj is neginf \
or (decimal and isinstance(obj, decimal.Decimal))
|
https://github.com/akfamily/akshare/blob/087025d8d6f799b30ca114013e82c1ad22dc9294/project25/akshare/utils/demjson.py#L1322-L1327
| 25 |
[
0,
1
] | 33.333333 |
[
2
] | 16.666667 | false | 14.825334 | 6 | 7 | 83.333333 | 1 |
def isnumbertype(obj):
return isinstance(obj, (int, float)) \
and not isinstance(obj, bool) \
or obj is nan or obj is inf or obj is neginf \
or (decimal and isinstance(obj, decimal.Decimal))
| 18,403 |
akfamily/akshare
|
087025d8d6f799b30ca114013e82c1ad22dc9294
|
akshare/utils/demjson.py
|
helpers.is_negzero
|
(n)
|
Is the number value a negative zero?
|
Is the number value a negative zero?
| 1,330 | 1,337 |
def is_negzero(n):
"""Is the number value a negative zero?"""
if isinstance(n, float):
return n == 0.0 and repr(n).startswith('-')
elif decimal and isinstance(n, decimal.Decimal):
return n.is_zero() and n.is_signed()
else:
return False
|
https://github.com/akfamily/akshare/blob/087025d8d6f799b30ca114013e82c1ad22dc9294/project25/akshare/utils/demjson.py#L1330-L1337
| 25 |
[
0,
1
] | 25 |
[
2,
3,
4,
5,
7
] | 62.5 | false | 14.825334 | 8 | 6 | 37.5 | 1 |
def is_negzero(n):
if isinstance(n, float):
return n == 0.0 and repr(n).startswith('-')
elif decimal and isinstance(n, decimal.Decimal):
return n.is_zero() and n.is_signed()
else:
return False
| 18,404 |
|
akfamily/akshare
|
087025d8d6f799b30ca114013e82c1ad22dc9294
|
akshare/utils/demjson.py
|
helpers.is_nan
|
(n)
|
Is the number a NaN (not-a-number)?
|
Is the number a NaN (not-a-number)?
| 1,340 | 1,347 |
def is_nan(n):
"""Is the number a NaN (not-a-number)?"""
if isinstance(n, float):
return n is nan or n.hex() == 'nan' or n != n
elif decimal and isinstance(n, decimal.Decimal):
return n.is_nan()
else:
return False
|
https://github.com/akfamily/akshare/blob/087025d8d6f799b30ca114013e82c1ad22dc9294/project25/akshare/utils/demjson.py#L1340-L1347
| 25 |
[
0,
1
] | 25 |
[
2,
3,
4,
5,
7
] | 62.5 | false | 14.825334 | 8 | 6 | 37.5 | 1 |
def is_nan(n):
if isinstance(n, float):
return n is nan or n.hex() == 'nan' or n != n
elif decimal and isinstance(n, decimal.Decimal):
return n.is_nan()
else:
return False
| 18,405 |
|
akfamily/akshare
|
087025d8d6f799b30ca114013e82c1ad22dc9294
|
akshare/utils/demjson.py
|
helpers.is_infinite
|
(n)
|
Is the number infinite?
|
Is the number infinite?
| 1,350 | 1,357 |
def is_infinite(n):
"""Is the number infinite?"""
if isinstance(n, float):
return n is inf or n is neginf or n.hex() in ('inf', '-inf')
elif decimal and isinstance(n, decimal.Decimal):
return n.is_infinite()
else:
return False
|
https://github.com/akfamily/akshare/blob/087025d8d6f799b30ca114013e82c1ad22dc9294/project25/akshare/utils/demjson.py#L1350-L1357
| 25 |
[
0,
1
] | 25 |
[
2,
3,
4,
5,
7
] | 62.5 | false | 14.825334 | 8 | 6 | 37.5 | 1 |
def is_infinite(n):
if isinstance(n, float):
return n is inf or n is neginf or n.hex() in ('inf', '-inf')
elif decimal and isinstance(n, decimal.Decimal):
return n.is_infinite()
else:
return False
| 18,406 |
|
akfamily/akshare
|
087025d8d6f799b30ca114013e82c1ad22dc9294
|
akshare/utils/demjson.py
|
helpers.isstringtype
|
(obj)
|
return isinstance(obj, (str,)) \
or isinstance(obj, collections.UserString)
|
Is the object of a Python string type?
|
Is the object of a Python string type?
| 1,360 | 1,367 |
def isstringtype(obj):
"""Is the object of a Python string type?"""
if isinstance(obj, str):
return True
# Must also check for some other pseudo-string types
import types, collections
return isinstance(obj, (str,)) \
or isinstance(obj, collections.UserString)
|
https://github.com/akfamily/akshare/blob/087025d8d6f799b30ca114013e82c1ad22dc9294/project25/akshare/utils/demjson.py#L1360-L1367
| 25 |
[
0,
1
] | 25 |
[
2,
3,
5,
6
] | 50 | false | 14.825334 | 8 | 3 | 50 | 1 |
def isstringtype(obj):
if isinstance(obj, str):
return True
# Must also check for some other pseudo-string types
import types, collections
return isinstance(obj, (str,)) \
or isinstance(obj, collections.UserString)
| 18,407 |
akfamily/akshare
|
087025d8d6f799b30ca114013e82c1ad22dc9294
|
akshare/utils/demjson.py
|
helpers.decode_hex
|
(hexstring)
|
return n
|
Decodes a hexadecimal string into it's integer value.
|
Decodes a hexadecimal string into it's integer value.
| 1,371 | 1,388 |
def decode_hex(hexstring):
"""Decodes a hexadecimal string into it's integer value."""
# We don't use the builtin 'hex' codec in python since it can
# not handle odd numbers of digits, nor raise the same type
# of exceptions we want to.
n = 0
for c in hexstring:
if '0' <= c <= '9':
d = ord(c) - ord('0')
elif 'a' <= c <= 'f':
d = ord(c) - ord('a') + 10
elif 'A' <= c <= 'F':
d = ord(c) - ord('A') + 10
else:
raise ValueError('Not a hexadecimal number', hexstring)
# Could use ((n << 4 ) | d), but python 2.3 issues a FutureWarning.
n = (n * 16) + d
return n
|
https://github.com/akfamily/akshare/blob/087025d8d6f799b30ca114013e82c1ad22dc9294/project25/akshare/utils/demjson.py#L1371-L1388
| 25 |
[
0,
1,
2,
3,
4
] | 27.777778 |
[
5,
6,
7,
8,
9,
10,
11,
12,
14,
16,
17
] | 61.111111 | false | 14.825334 | 18 | 5 | 38.888889 | 1 |
def decode_hex(hexstring):
# We don't use the builtin 'hex' codec in python since it can
# not handle odd numbers of digits, nor raise the same type
# of exceptions we want to.
n = 0
for c in hexstring:
if '0' <= c <= '9':
d = ord(c) - ord('0')
elif 'a' <= c <= 'f':
d = ord(c) - ord('a') + 10
elif 'A' <= c <= 'F':
d = ord(c) - ord('A') + 10
else:
raise ValueError('Not a hexadecimal number', hexstring)
# Could use ((n << 4 ) | d), but python 2.3 issues a FutureWarning.
n = (n * 16) + d
return n
| 18,408 |
akfamily/akshare
|
087025d8d6f799b30ca114013e82c1ad22dc9294
|
akshare/utils/demjson.py
|
helpers.decode_octal
|
(octalstring)
|
return n
|
Decodes an octal string into it's integer value.
|
Decodes an octal string into it's integer value.
| 1,391 | 1,401 |
def decode_octal(octalstring):
"""Decodes an octal string into it's integer value."""
n = 0
for c in octalstring:
if '0' <= c <= '7':
d = ord(c) - ord('0')
else:
raise ValueError('Not an octal number', octalstring)
# Could use ((n << 3 ) | d), but python 2.3 issues a FutureWarning.
n = (n * 8) + d
return n
|
https://github.com/akfamily/akshare/blob/087025d8d6f799b30ca114013e82c1ad22dc9294/project25/akshare/utils/demjson.py#L1391-L1401
| 25 |
[
0,
1
] | 18.181818 |
[
2,
3,
4,
5,
7,
9,
10
] | 63.636364 | false | 14.825334 | 11 | 3 | 36.363636 | 1 |
def decode_octal(octalstring):
n = 0
for c in octalstring:
if '0' <= c <= '7':
d = ord(c) - ord('0')
else:
raise ValueError('Not an octal number', octalstring)
# Could use ((n << 3 ) | d), but python 2.3 issues a FutureWarning.
n = (n * 8) + d
return n
| 18,409 |
akfamily/akshare
|
087025d8d6f799b30ca114013e82c1ad22dc9294
|
akshare/utils/demjson.py
|
helpers.decode_binary
|
(binarystring)
|
return n
|
Decodes a binary string into it's integer value.
|
Decodes a binary string into it's integer value.
| 1,404 | 1,416 |
def decode_binary(binarystring):
"""Decodes a binary string into it's integer value."""
n = 0
for c in binarystring:
if c == '0':
d = 0
elif c == '1':
d = 1
else:
raise ValueError('Not an binary number', binarystring)
# Could use ((n << 3 ) | d), but python 2.3 issues a FutureWarning.
n = (n * 2) + d
return n
|
https://github.com/akfamily/akshare/blob/087025d8d6f799b30ca114013e82c1ad22dc9294/project25/akshare/utils/demjson.py#L1404-L1416
| 25 |
[
0,
1
] | 15.384615 |
[
2,
3,
4,
5,
6,
7,
9,
11,
12
] | 69.230769 | false | 14.825334 | 13 | 4 | 30.769231 | 1 |
def decode_binary(binarystring):
n = 0
for c in binarystring:
if c == '0':
d = 0
elif c == '1':
d = 1
else:
raise ValueError('Not an binary number', binarystring)
# Could use ((n << 3 ) | d), but python 2.3 issues a FutureWarning.
n = (n * 2) + d
return n
| 18,410 |
akfamily/akshare
|
087025d8d6f799b30ca114013e82c1ad22dc9294
|
akshare/utils/demjson.py
|
helpers.format_timedelta_iso
|
(td)
|
return ''.join(a)
|
Encodes a datetime.timedelta into ISO-8601 Time Period format.
|
Encodes a datetime.timedelta into ISO-8601 Time Period format.
| 1,419 | 1,443 |
def format_timedelta_iso(td):
"""Encodes a datetime.timedelta into ISO-8601 Time Period format.
"""
d = td.days
s = td.seconds
ms = td.microseconds
m, s = divmod(s, 60)
h, m = divmod(m, 60)
a = ['P']
if d:
a.append('%dD' % d)
if h or m or s or ms:
a.append('T')
if h:
a.append('%dH' % h)
if m:
a.append('%dM' % m)
if s or ms:
if ms:
a.append('%d.%06d' % (s, ms))
else:
a.append('%d' % s)
if len(a) == 1:
a.append('T0S')
return ''.join(a)
|
https://github.com/akfamily/akshare/blob/087025d8d6f799b30ca114013e82c1ad22dc9294/project25/akshare/utils/demjson.py#L1419-L1443
| 25 |
[
0,
1,
2
] | 12 |
[
3,
4,
5,
6,
7,
8,
9,
10,
11,
12,
13,
14,
15,
16,
17,
18,
19,
21,
22,
23,
24
] | 84 | false | 14.825334 | 25 | 12 | 16 | 1 |
def format_timedelta_iso(td):
d = td.days
s = td.seconds
ms = td.microseconds
m, s = divmod(s, 60)
h, m = divmod(m, 60)
a = ['P']
if d:
a.append('%dD' % d)
if h or m or s or ms:
a.append('T')
if h:
a.append('%dH' % h)
if m:
a.append('%dM' % m)
if s or ms:
if ms:
a.append('%d.%06d' % (s, ms))
else:
a.append('%d' % s)
if len(a) == 1:
a.append('T0S')
return ''.join(a)
| 18,411 |
akfamily/akshare
|
087025d8d6f799b30ca114013e82c1ad22dc9294
|
akshare/utils/demjson.py
|
position_marker.__init__
|
(self, offset=0, line=1, column=0, text_after=None)
| 1,478 | 1,484 |
def __init__(self, offset=0, line=1, column=0, text_after=None):
self.__char_position = offset
self.__line = line
self.__column = column
self.__text_after = text_after
self.__at_end = False
self.__last_was_cr = False
|
https://github.com/akfamily/akshare/blob/087025d8d6f799b30ca114013e82c1ad22dc9294/project25/akshare/utils/demjson.py#L1478-L1484
| 25 |
[
0
] | 14.285714 |
[
1,
2,
3,
4,
5,
6
] | 85.714286 | false | 14.825334 | 7 | 1 | 14.285714 | 0 |
def __init__(self, offset=0, line=1, column=0, text_after=None):
self.__char_position = offset
self.__line = line
self.__column = column
self.__text_after = text_after
self.__at_end = False
self.__last_was_cr = False
| 18,412 |
|||
akfamily/akshare
|
087025d8d6f799b30ca114013e82c1ad22dc9294
|
akshare/utils/demjson.py
|
position_marker.line
|
(self)
|
return self.__line
|
The current line within the document, starts at 1.
|
The current line within the document, starts at 1.
| 1,487 | 1,489 |
def line(self):
"""The current line within the document, starts at 1."""
return self.__line
|
https://github.com/akfamily/akshare/blob/087025d8d6f799b30ca114013e82c1ad22dc9294/project25/akshare/utils/demjson.py#L1487-L1489
| 25 |
[
0,
1
] | 66.666667 |
[
2
] | 33.333333 | false | 14.825334 | 3 | 1 | 66.666667 | 1 |
def line(self):
return self.__line
| 18,413 |
akfamily/akshare
|
087025d8d6f799b30ca114013e82c1ad22dc9294
|
akshare/utils/demjson.py
|
position_marker.column
|
(self)
|
return self.__column
|
The current character column from the beginning of the
document, starts at 0.
|
The current character column from the beginning of the
document, starts at 0.
| 1,492 | 1,496 |
def column(self):
"""The current character column from the beginning of the
document, starts at 0.
"""
return self.__column
|
https://github.com/akfamily/akshare/blob/087025d8d6f799b30ca114013e82c1ad22dc9294/project25/akshare/utils/demjson.py#L1492-L1496
| 25 |
[
0,
1,
2,
3
] | 80 |
[
4
] | 20 | false | 14.825334 | 5 | 1 | 80 | 2 |
def column(self):
return self.__column
| 18,414 |
akfamily/akshare
|
087025d8d6f799b30ca114013e82c1ad22dc9294
|
akshare/utils/demjson.py
|
position_marker.char_position
|
(self)
|
return self.__char_position
|
The current character offset from the beginning of the
document, starts at 0.
|
The current character offset from the beginning of the
document, starts at 0.
| 1,499 | 1,503 |
def char_position(self):
"""The current character offset from the beginning of the
document, starts at 0.
"""
return self.__char_position
|
https://github.com/akfamily/akshare/blob/087025d8d6f799b30ca114013e82c1ad22dc9294/project25/akshare/utils/demjson.py#L1499-L1503
| 25 |
[
0,
1,
2,
3
] | 80 |
[
4
] | 20 | false | 14.825334 | 5 | 1 | 80 | 2 |
def char_position(self):
return self.__char_position
| 18,415 |
akfamily/akshare
|
087025d8d6f799b30ca114013e82c1ad22dc9294
|
akshare/utils/demjson.py
|
position_marker.at_start
|
(self)
|
return (self.char_position == 0)
|
Returns True if the position is at the start of the document.
|
Returns True if the position is at the start of the document.
| 1,506 | 1,508 |
def at_start(self):
"""Returns True if the position is at the start of the document."""
return (self.char_position == 0)
|
https://github.com/akfamily/akshare/blob/087025d8d6f799b30ca114013e82c1ad22dc9294/project25/akshare/utils/demjson.py#L1506-L1508
| 25 |
[
0,
1
] | 66.666667 |
[
2
] | 33.333333 | false | 14.825334 | 3 | 1 | 66.666667 | 1 |
def at_start(self):
return (self.char_position == 0)
| 18,416 |
akfamily/akshare
|
087025d8d6f799b30ca114013e82c1ad22dc9294
|
akshare/utils/demjson.py
|
position_marker.at_end
|
(self)
|
return self.__at_end
|
Returns True if the position is at the end of the document.
This property must be set by the user.
|
Returns True if the position is at the end of the document.
| 1,511 | 1,516 |
def at_end(self):
"""Returns True if the position is at the end of the document.
This property must be set by the user.
"""
return self.__at_end
|
https://github.com/akfamily/akshare/blob/087025d8d6f799b30ca114013e82c1ad22dc9294/project25/akshare/utils/demjson.py#L1511-L1516
| 25 |
[
0,
1,
2,
3,
4
] | 83.333333 |
[
5
] | 16.666667 | false | 14.825334 | 6 | 1 | 83.333333 | 3 |
def at_end(self):
return self.__at_end
| 18,417 |
akfamily/akshare
|
087025d8d6f799b30ca114013e82c1ad22dc9294
|
akshare/utils/demjson.py
|
position_marker.at_end
|
(self, b)
|
Sets the at_end property to True or False.
|
Sets the at_end property to True or False.
| 1,519 | 1,522 |
def at_end(self, b):
"""Sets the at_end property to True or False.
"""
self.__at_end = bool(b)
|
https://github.com/akfamily/akshare/blob/087025d8d6f799b30ca114013e82c1ad22dc9294/project25/akshare/utils/demjson.py#L1519-L1522
| 25 |
[
0,
1,
2
] | 75 |
[
3
] | 25 | false | 14.825334 | 4 | 1 | 75 | 1 |
def at_end(self, b):
self.__at_end = bool(b)
| 18,418 |
|
akfamily/akshare
|
087025d8d6f799b30ca114013e82c1ad22dc9294
|
akshare/utils/demjson.py
|
position_marker.text_after
|
(self)
|
return self.__at_end
|
Returns a textual excerpt starting at the current position.
This property must be set by the user.
|
Returns a textual excerpt starting at the current position.
| 1,525 | 1,530 |
def text_after(self):
"""Returns a textual excerpt starting at the current position.
This property must be set by the user.
"""
return self.__at_end
|
https://github.com/akfamily/akshare/blob/087025d8d6f799b30ca114013e82c1ad22dc9294/project25/akshare/utils/demjson.py#L1525-L1530
| 25 |
[
0,
1,
2,
3,
4
] | 83.333333 |
[
5
] | 16.666667 | false | 14.825334 | 6 | 1 | 83.333333 | 3 |
def text_after(self):
return self.__at_end
| 18,419 |
akfamily/akshare
|
087025d8d6f799b30ca114013e82c1ad22dc9294
|
akshare/utils/demjson.py
|
position_marker.text_after
|
(self, value)
|
Sets the text_after property to a given string.
|
Sets the text_after property to a given string.
| 1,533 | 1,536 |
def text_after(self, value):
"""Sets the text_after property to a given string.
"""
self.__text_after = value
|
https://github.com/akfamily/akshare/blob/087025d8d6f799b30ca114013e82c1ad22dc9294/project25/akshare/utils/demjson.py#L1533-L1536
| 25 |
[
0,
1,
2
] | 75 |
[
3
] | 25 | false | 14.825334 | 4 | 1 | 75 | 1 |
def text_after(self, value):
self.__text_after = value
| 18,420 |
|
akfamily/akshare
|
087025d8d6f799b30ca114013e82c1ad22dc9294
|
akshare/utils/demjson.py
|
position_marker.__repr__
|
(self)
|
return s
| 1,538 | 1,547 |
def __repr__(self):
s = "%s(offset=%r,line=%r,column=%r" \
% (self.__class__.__name__,
self.__char_position,
self.__line,
self.__column)
if self.text_after:
s += ",text_after=%r" % (self.text_after,)
s += ")"
return s
|
https://github.com/akfamily/akshare/blob/087025d8d6f799b30ca114013e82c1ad22dc9294/project25/akshare/utils/demjson.py#L1538-L1547
| 25 |
[
0
] | 10 |
[
1,
6,
7,
8,
9
] | 50 | false | 14.825334 | 10 | 2 | 50 | 0 |
def __repr__(self):
s = "%s(offset=%r,line=%r,column=%r" \
% (self.__class__.__name__,
self.__char_position,
self.__line,
self.__column)
if self.text_after:
s += ",text_after=%r" % (self.text_after,)
s += ")"
return s
| 18,421 |
||
akfamily/akshare
|
087025d8d6f799b30ca114013e82c1ad22dc9294
|
akshare/utils/demjson.py
|
position_marker.describe
|
(self, show_text=True)
|
return s
|
Returns a human-readable description of the position, in English.
|
Returns a human-readable description of the position, in English.
| 1,549 | 1,560 |
def describe(self, show_text=True):
"""Returns a human-readable description of the position, in English."""
s = "line %d, column %d, offset %d" % (self.__line,
self.__column,
self.__char_position)
if self.at_start:
s += " (AT-START)"
elif self.at_end:
s += " (AT-END)"
if show_text and self.text_after:
s += ", text %r" % (self.text_after)
return s
|
https://github.com/akfamily/akshare/blob/087025d8d6f799b30ca114013e82c1ad22dc9294/project25/akshare/utils/demjson.py#L1549-L1560
| 25 |
[
0,
1
] | 16.666667 |
[
2,
5,
6,
7,
8,
9,
10,
11
] | 66.666667 | false | 14.825334 | 12 | 5 | 33.333333 | 1 |
def describe(self, show_text=True):
s = "line %d, column %d, offset %d" % (self.__line,
self.__column,
self.__char_position)
if self.at_start:
s += " (AT-START)"
elif self.at_end:
s += " (AT-END)"
if show_text and self.text_after:
s += ", text %r" % (self.text_after)
return s
| 18,422 |
akfamily/akshare
|
087025d8d6f799b30ca114013e82c1ad22dc9294
|
akshare/utils/demjson.py
|
position_marker.__str__
|
(self)
|
return self.describe(show_text=True)
|
Same as the describe() function.
|
Same as the describe() function.
| 1,562 | 1,564 |
def __str__(self):
"""Same as the describe() function."""
return self.describe(show_text=True)
|
https://github.com/akfamily/akshare/blob/087025d8d6f799b30ca114013e82c1ad22dc9294/project25/akshare/utils/demjson.py#L1562-L1564
| 25 |
[
0,
1
] | 66.666667 |
[
2
] | 33.333333 | false | 14.825334 | 3 | 1 | 66.666667 | 1 |
def __str__(self):
return self.describe(show_text=True)
| 18,423 |
akfamily/akshare
|
087025d8d6f799b30ca114013e82c1ad22dc9294
|
akshare/utils/demjson.py
|
position_marker.copy
|
(self)
|
return p
|
Create a copy of the position object.
|
Create a copy of the position object.
| 1,566 | 1,575 |
def copy(self):
"""Create a copy of the position object."""
p = self.__class__()
p.__char_position = self.__char_position
p.__line = self.__line
p.__column = self.__column
p.text_after = self.__text_after
p.at_end = self.at_end
p.__last_was_cr = self.__last_was_cr
return p
|
https://github.com/akfamily/akshare/blob/087025d8d6f799b30ca114013e82c1ad22dc9294/project25/akshare/utils/demjson.py#L1566-L1575
| 25 |
[
0,
1
] | 20 |
[
2,
3,
4,
5,
6,
7,
8,
9
] | 80 | false | 14.825334 | 10 | 1 | 20 | 1 |
def copy(self):
p = self.__class__()
p.__char_position = self.__char_position
p.__line = self.__line
p.__column = self.__column
p.text_after = self.__text_after
p.at_end = self.at_end
p.__last_was_cr = self.__last_was_cr
return p
| 18,424 |
akfamily/akshare
|
087025d8d6f799b30ca114013e82c1ad22dc9294
|
akshare/utils/demjson.py
|
position_marker.rewind
|
(self)
|
Set the position to the start of the document.
|
Set the position to the start of the document.
| 1,577 | 1,585 |
def rewind(self):
"""Set the position to the start of the document."""
if not self.at_start:
self.text_after = None
self.at_end = False
self.__char_position = 0
self.__line = 1
self.__column = 0
self.__last_was_cr = False
|
https://github.com/akfamily/akshare/blob/087025d8d6f799b30ca114013e82c1ad22dc9294/project25/akshare/utils/demjson.py#L1577-L1585
| 25 |
[
0,
1
] | 22.222222 |
[
2,
3,
4,
5,
6,
7,
8
] | 77.777778 | false | 14.825334 | 9 | 2 | 22.222222 | 1 |
def rewind(self):
if not self.at_start:
self.text_after = None
self.at_end = False
self.__char_position = 0
self.__line = 1
self.__column = 0
self.__last_was_cr = False
| 18,425 |
|
akfamily/akshare
|
087025d8d6f799b30ca114013e82c1ad22dc9294
|
akshare/utils/demjson.py
|
position_marker.advance
|
(self, s)
|
Advance the position from its current place according to
the given string of characters.
|
Advance the position from its current place according to
the given string of characters.
| 1,587 | 1,604 |
def advance(self, s):
"""Advance the position from its current place according to
the given string of characters.
"""
if s:
self.text_after = None
for c in s:
self.__char_position += 1
if c == '\n' and self.__last_was_cr:
self.__last_was_cr = False
elif helpers.char_is_unicode_eol(c):
self.__line += 1
self.__column = 0
self.__last_was_cr = (c == '\r')
else:
self.__column += 1
self.__last_was_cr = False
|
https://github.com/akfamily/akshare/blob/087025d8d6f799b30ca114013e82c1ad22dc9294/project25/akshare/utils/demjson.py#L1587-L1604
| 25 |
[
0,
1,
2,
3,
4
] | 27.777778 |
[
5,
6,
7,
8,
9,
10,
11,
12,
13,
14,
16,
17
] | 66.666667 | false | 14.825334 | 18 | 6 | 33.333333 | 2 |
def advance(self, s):
if s:
self.text_after = None
for c in s:
self.__char_position += 1
if c == '\n' and self.__last_was_cr:
self.__last_was_cr = False
elif helpers.char_is_unicode_eol(c):
self.__line += 1
self.__column = 0
self.__last_was_cr = (c == '\r')
else:
self.__column += 1
self.__last_was_cr = False
| 18,426 |
|
akfamily/akshare
|
087025d8d6f799b30ca114013e82c1ad22dc9294
|
akshare/utils/demjson.py
|
buffered_stream.__init__
|
(self, txt='', encoding=None)
| 1,620 | 1,622 |
def __init__(self, txt='', encoding=None):
self.reset()
self.set_text(txt, encoding)
|
https://github.com/akfamily/akshare/blob/087025d8d6f799b30ca114013e82c1ad22dc9294/project25/akshare/utils/demjson.py#L1620-L1622
| 25 |
[
0
] | 33.333333 |
[
1,
2
] | 66.666667 | false | 14.825334 | 3 | 1 | 33.333333 | 0 |
def __init__(self, txt='', encoding=None):
self.reset()
self.set_text(txt, encoding)
| 18,427 |
|||
akfamily/akshare
|
087025d8d6f799b30ca114013e82c1ad22dc9294
|
akshare/utils/demjson.py
|
buffered_stream.reset
|
(self)
|
Clears the state to nothing.
|
Clears the state to nothing.
| 1,624 | 1,635 |
def reset(self):
"""Clears the state to nothing."""
self.__pos = position_marker()
self.__saved_pos = [] # Stack of saved positions
self.__bom = helpers.make_raw_bytes([]) # contains copy of byte-order mark, if any
self.__codec = None # The CodecInfo
self.__encoding = None # The name of the codec's encoding
self.__input_is_bytes = False
self.__rawbuf = None
self.__raw_bytes = None
self.__cmax = 0
self.num_ws_skipped = 0
|
https://github.com/akfamily/akshare/blob/087025d8d6f799b30ca114013e82c1ad22dc9294/project25/akshare/utils/demjson.py#L1624-L1635
| 25 |
[
0,
1
] | 16.666667 |
[
2,
3,
4,
5,
6,
7,
8,
9,
10,
11
] | 83.333333 | false | 14.825334 | 12 | 1 | 16.666667 | 1 |
def reset(self):
self.__pos = position_marker()
self.__saved_pos = [] # Stack of saved positions
self.__bom = helpers.make_raw_bytes([]) # contains copy of byte-order mark, if any
self.__codec = None # The CodecInfo
self.__encoding = None # The name of the codec's encoding
self.__input_is_bytes = False
self.__rawbuf = None
self.__raw_bytes = None
self.__cmax = 0
self.num_ws_skipped = 0
| 18,428 |
|
akfamily/akshare
|
087025d8d6f799b30ca114013e82c1ad22dc9294
|
akshare/utils/demjson.py
|
buffered_stream.save_position
|
(self)
|
return True
| 1,637 | 1,639 |
def save_position(self):
self.__saved_pos.append(self.__pos.copy())
return True
|
https://github.com/akfamily/akshare/blob/087025d8d6f799b30ca114013e82c1ad22dc9294/project25/akshare/utils/demjson.py#L1637-L1639
| 25 |
[
0
] | 33.333333 |
[
1,
2
] | 66.666667 | false | 14.825334 | 3 | 1 | 33.333333 | 0 |
def save_position(self):
self.__saved_pos.append(self.__pos.copy())
return True
| 18,429 |
||
akfamily/akshare
|
087025d8d6f799b30ca114013e82c1ad22dc9294
|
akshare/utils/demjson.py
|
buffered_stream.clear_saved_position
|
(self)
| 1,641 | 1,646 |
def clear_saved_position(self):
if self.__saved_pos:
self.__saved_pos.pop()
return True
else:
return False
|
https://github.com/akfamily/akshare/blob/087025d8d6f799b30ca114013e82c1ad22dc9294/project25/akshare/utils/demjson.py#L1641-L1646
| 25 |
[
0
] | 16.666667 |
[
1,
2,
3,
5
] | 66.666667 | false | 14.825334 | 6 | 2 | 33.333333 | 0 |
def clear_saved_position(self):
if self.__saved_pos:
self.__saved_pos.pop()
return True
else:
return False
| 18,430 |
|||
akfamily/akshare
|
087025d8d6f799b30ca114013e82c1ad22dc9294
|
akshare/utils/demjson.py
|
buffered_stream.restore_position
|
(self)
| 1,648 | 1,655 |
def restore_position(self):
try:
old_pos = self.__saved_pos.pop() # Can raise IndexError
except IndexError as err:
raise IndexError("Attempt to restore buffer position that was never saved")
else:
self.__pos = old_pos
return True
|
https://github.com/akfamily/akshare/blob/087025d8d6f799b30ca114013e82c1ad22dc9294/project25/akshare/utils/demjson.py#L1648-L1655
| 25 |
[
0
] | 12.5 |
[
1,
2,
3,
4,
6,
7
] | 75 | false | 14.825334 | 8 | 2 | 25 | 0 |
def restore_position(self):
try:
old_pos = self.__saved_pos.pop() # Can raise IndexError
except IndexError as err:
raise IndexError("Attempt to restore buffer position that was never saved")
else:
self.__pos = old_pos
return True
| 18,431 |
|||
akfamily/akshare
|
087025d8d6f799b30ca114013e82c1ad22dc9294
|
akshare/utils/demjson.py
|
buffered_stream._find_codec
|
(self, encoding)
|
return self.__codec
| 1,657 | 1,669 |
def _find_codec(self, encoding):
if encoding is None:
self.__codec = None
self.__encoding = None
elif isinstance(encoding, codecs.CodecInfo):
self.__codec = encoding
self.__encoding = self.__codec.name
else:
self.__encoding = encoding
self.__codec = helpers.lookup_codec(encoding)
if not self.__codec:
raise JSONDecodeError('no codec available for character encoding', encoding)
return self.__codec
|
https://github.com/akfamily/akshare/blob/087025d8d6f799b30ca114013e82c1ad22dc9294/project25/akshare/utils/demjson.py#L1657-L1669
| 25 |
[
0
] | 7.692308 |
[
1,
2,
3,
4,
5,
6,
8,
9,
10,
11,
12
] | 84.615385 | false | 14.825334 | 13 | 4 | 15.384615 | 0 |
def _find_codec(self, encoding):
if encoding is None:
self.__codec = None
self.__encoding = None
elif isinstance(encoding, codecs.CodecInfo):
self.__codec = encoding
self.__encoding = self.__codec.name
else:
self.__encoding = encoding
self.__codec = helpers.lookup_codec(encoding)
if not self.__codec:
raise JSONDecodeError('no codec available for character encoding', encoding)
return self.__codec
| 18,432 |
||
akfamily/akshare
|
087025d8d6f799b30ca114013e82c1ad22dc9294
|
akshare/utils/demjson.py
|
buffered_stream.set_text
|
(self, txt, encoding=None)
|
Changes the input text document and rewinds the position to
the start of the new document.
|
Changes the input text document and rewinds the position to
the start of the new document.
| 1,671 | 1,698 |
def set_text(self, txt, encoding=None):
"""Changes the input text document and rewinds the position to
the start of the new document.
"""
import sys
self.rewind()
self.__codec = None
self.__bom = None
self.__rawbuf = ''
self.__cmax = 0 # max number of chars in input
try:
decoded = helpers.unicode_decode(txt, encoding)
except JSONError:
raise
except Exception as err:
# Re-raise as a JSONDecodeError
e2 = sys.exc_info()
newerr = JSONDecodeError("a Unicode decoding error occurred")
# Simulate Python 3's: "raise X from Y" exception chaining
newerr.__cause__ = err
newerr.__traceback__ = e2[2]
raise newerr
else:
self.__codec = decoded.codec
self.__bom = decoded.bom
self.__rawbuf = decoded.string
self.__cmax = len(self.__rawbuf)
|
https://github.com/akfamily/akshare/blob/087025d8d6f799b30ca114013e82c1ad22dc9294/project25/akshare/utils/demjson.py#L1671-L1698
| 25 |
[
0,
1,
2,
3,
4
] | 17.857143 |
[
5,
6,
7,
8,
9,
10,
11,
12,
13,
14,
15,
17,
18,
20,
21,
22,
24,
25,
26,
27
] | 71.428571 | false | 14.825334 | 28 | 3 | 28.571429 | 2 |
def set_text(self, txt, encoding=None):
import sys
self.rewind()
self.__codec = None
self.__bom = None
self.__rawbuf = ''
self.__cmax = 0 # max number of chars in input
try:
decoded = helpers.unicode_decode(txt, encoding)
except JSONError:
raise
except Exception as err:
# Re-raise as a JSONDecodeError
e2 = sys.exc_info()
newerr = JSONDecodeError("a Unicode decoding error occurred")
# Simulate Python 3's: "raise X from Y" exception chaining
newerr.__cause__ = err
newerr.__traceback__ = e2[2]
raise newerr
else:
self.__codec = decoded.codec
self.__bom = decoded.bom
self.__rawbuf = decoded.string
self.__cmax = len(self.__rawbuf)
| 18,433 |
|
akfamily/akshare
|
087025d8d6f799b30ca114013e82c1ad22dc9294
|
akshare/utils/demjson.py
|
buffered_stream.__repr__
|
(self)
|
return '<%s at %r text %r>' % (self.__class__.__name__, self.__pos, self.text_context)
| 1,700 | 1,701 |
def __repr__(self):
return '<%s at %r text %r>' % (self.__class__.__name__, self.__pos, self.text_context)
|
https://github.com/akfamily/akshare/blob/087025d8d6f799b30ca114013e82c1ad22dc9294/project25/akshare/utils/demjson.py#L1700-L1701
| 25 |
[
0
] | 50 |
[
1
] | 50 | false | 14.825334 | 2 | 1 | 50 | 0 |
def __repr__(self):
return '<%s at %r text %r>' % (self.__class__.__name__, self.__pos, self.text_context)
| 18,434 |
||
akfamily/akshare
|
087025d8d6f799b30ca114013e82c1ad22dc9294
|
akshare/utils/demjson.py
|
buffered_stream.rewind
|
(self)
|
Resets the position back to the start of the input text.
|
Resets the position back to the start of the input text.
| 1,703 | 1,705 |
def rewind(self):
"""Resets the position back to the start of the input text."""
self.__pos.rewind()
|
https://github.com/akfamily/akshare/blob/087025d8d6f799b30ca114013e82c1ad22dc9294/project25/akshare/utils/demjson.py#L1703-L1705
| 25 |
[
0,
1
] | 66.666667 |
[
2
] | 33.333333 | false | 14.825334 | 3 | 1 | 66.666667 | 1 |
def rewind(self):
self.__pos.rewind()
| 18,435 |
|
akfamily/akshare
|
087025d8d6f799b30ca114013e82c1ad22dc9294
|
akshare/utils/demjson.py
|
buffered_stream.codec
|
(self)
|
return self.__codec
|
The codec object used to perform Unicode decoding, or None.
|
The codec object used to perform Unicode decoding, or None.
| 1,708 | 1,710 |
def codec(self):
"""The codec object used to perform Unicode decoding, or None."""
return self.__codec
|
https://github.com/akfamily/akshare/blob/087025d8d6f799b30ca114013e82c1ad22dc9294/project25/akshare/utils/demjson.py#L1708-L1710
| 25 |
[
0,
1
] | 66.666667 |
[
2
] | 33.333333 | false | 14.825334 | 3 | 1 | 66.666667 | 1 |
def codec(self):
return self.__codec
| 18,436 |
akfamily/akshare
|
087025d8d6f799b30ca114013e82c1ad22dc9294
|
akshare/utils/demjson.py
|
buffered_stream.bom
|
(self)
|
return self.__bom
|
The Unicode Byte-Order Mark (BOM), if any, that was present
at the start of the input text. The returned BOM is a string
of the raw bytes, and is not Unicode-decoded.
|
The Unicode Byte-Order Mark (BOM), if any, that was present
at the start of the input text. The returned BOM is a string
of the raw bytes, and is not Unicode-decoded.
| 1,713 | 1,719 |
def bom(self):
"""The Unicode Byte-Order Mark (BOM), if any, that was present
at the start of the input text. The returned BOM is a string
of the raw bytes, and is not Unicode-decoded.
"""
return self.__bom
|
https://github.com/akfamily/akshare/blob/087025d8d6f799b30ca114013e82c1ad22dc9294/project25/akshare/utils/demjson.py#L1713-L1719
| 25 |
[
0,
1,
2,
3,
4,
5
] | 85.714286 |
[
6
] | 14.285714 | false | 14.825334 | 7 | 1 | 85.714286 | 3 |
def bom(self):
return self.__bom
| 18,437 |
akfamily/akshare
|
087025d8d6f799b30ca114013e82c1ad22dc9294
|
akshare/utils/demjson.py
|
buffered_stream.cpos
|
(self)
|
return self.__pos.char_position
|
The current character offset from the start of the document.
|
The current character offset from the start of the document.
| 1,722 | 1,724 |
def cpos(self):
"""The current character offset from the start of the document."""
return self.__pos.char_position
|
https://github.com/akfamily/akshare/blob/087025d8d6f799b30ca114013e82c1ad22dc9294/project25/akshare/utils/demjson.py#L1722-L1724
| 25 |
[
0,
1
] | 66.666667 |
[
2
] | 33.333333 | false | 14.825334 | 3 | 1 | 66.666667 | 1 |
def cpos(self):
return self.__pos.char_position
| 18,438 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.