nwo
stringlengths 10
28
| sha
stringlengths 40
40
| path
stringlengths 11
97
| identifier
stringlengths 1
64
| parameters
stringlengths 2
2.24k
| return_statement
stringlengths 0
2.17k
| docstring
stringlengths 0
5.45k
| docstring_summary
stringlengths 0
3.83k
| func_begin
int64 1
13.4k
| func_end
int64 2
13.4k
| function
stringlengths 28
56.4k
| url
stringlengths 106
209
| project
int64 1
48
| executed_lines
list | executed_lines_pc
float64 0
153
| missing_lines
list | missing_lines_pc
float64 0
100
| covered
bool 2
classes | filecoverage
float64 2.53
100
| function_lines
int64 2
1.46k
| mccabe
int64 1
253
| coverage
float64 0
100
| docstring_lines
int64 0
112
| function_nodoc
stringlengths 9
56.4k
| id
int64 0
29.8k
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
akfamily/akshare
|
087025d8d6f799b30ca114013e82c1ad22dc9294
|
akshare/stock/stock_us_sina.py
|
get_us_stock_name
|
()
|
return big_df[["name", "cname", "symbol"]]
|
u.s. stock's english name, chinese name and symbol
you should use symbol to get apply into the next function
https://finance.sina.com.cn/stock/usstock/sector.shtml
:return: stock's english name, chinese name and symbol
:rtype: pandas.DataFrame
|
u.s. stock's english name, chinese name and symbol
you should use symbol to get apply into the next function
https://finance.sina.com.cn/stock/usstock/sector.shtml
:return: stock's english name, chinese name and symbol
:rtype: pandas.DataFrame
| 52 | 81 |
def get_us_stock_name() -> pd.DataFrame:
"""
u.s. stock's english name, chinese name and symbol
you should use symbol to get apply into the next function
https://finance.sina.com.cn/stock/usstock/sector.shtml
:return: stock's english name, chinese name and symbol
:rtype: pandas.DataFrame
"""
big_df = pd.DataFrame()
page_count = get_us_page_count()
for page in tqdm(range(1, page_count + 1)):
# page = "1"
us_js_decode = "US_CategoryService.getList?page={}&num=20&sort=&asc=0&market=&id=".format(
page
)
js_code = py_mini_racer.MiniRacer()
js_code.eval(js_hash_text)
dict_list = js_code.call("d", us_js_decode) # 执行js解密代码
us_sina_stock_dict_payload.update({"page": "{}".format(page)})
res = requests.get(
us_sina_stock_list_url.format(dict_list),
params=us_sina_stock_dict_payload,
)
data_json = json.loads(
res.text[res.text.find("({") + 1 : res.text.rfind(");")]
)
big_df = pd.concat(
[big_df, pd.DataFrame(data_json["data"])], ignore_index=True
)
return big_df[["name", "cname", "symbol"]]
|
https://github.com/akfamily/akshare/blob/087025d8d6f799b30ca114013e82c1ad22dc9294/project25/akshare/stock/stock_us_sina.py#L52-L81
| 25 |
[
0,
1,
2,
3,
4,
5,
6,
7
] | 26.666667 |
[
8,
9,
10,
12,
15,
16,
17,
18,
19,
23,
26,
29
] | 40 | false | 10.447761 | 30 | 2 | 60 | 5 |
def get_us_stock_name() -> pd.DataFrame:
big_df = pd.DataFrame()
page_count = get_us_page_count()
for page in tqdm(range(1, page_count + 1)):
# page = "1"
us_js_decode = "US_CategoryService.getList?page={}&num=20&sort=&asc=0&market=&id=".format(
page
)
js_code = py_mini_racer.MiniRacer()
js_code.eval(js_hash_text)
dict_list = js_code.call("d", us_js_decode) # 执行js解密代码
us_sina_stock_dict_payload.update({"page": "{}".format(page)})
res = requests.get(
us_sina_stock_list_url.format(dict_list),
params=us_sina_stock_dict_payload,
)
data_json = json.loads(
res.text[res.text.find("({") + 1 : res.text.rfind(");")]
)
big_df = pd.concat(
[big_df, pd.DataFrame(data_json["data"])], ignore_index=True
)
return big_df[["name", "cname", "symbol"]]
| 18,841 |
akfamily/akshare
|
087025d8d6f799b30ca114013e82c1ad22dc9294
|
akshare/stock/stock_us_sina.py
|
stock_us_spot
|
()
|
return big_df
|
新浪财经-所有美股的数据, 注意延迟 15 分钟
https://finance.sina.com.cn/stock/usstock/sector.shtml
:return: 美股所有股票实时行情
:rtype: pandas.DataFrame
|
新浪财经-所有美股的数据, 注意延迟 15 分钟
https://finance.sina.com.cn/stock/usstock/sector.shtml
:return: 美股所有股票实时行情
:rtype: pandas.DataFrame
| 84 | 112 |
def stock_us_spot() -> pd.DataFrame:
"""
新浪财经-所有美股的数据, 注意延迟 15 分钟
https://finance.sina.com.cn/stock/usstock/sector.shtml
:return: 美股所有股票实时行情
:rtype: pandas.DataFrame
"""
big_df = pd.DataFrame()
page_count = get_us_page_count()
for page in tqdm(range(1, page_count + 1)):
# page = "1"
us_js_decode = "US_CategoryService.getList?page={}&num=20&sort=&asc=0&market=&id=".format(
page
)
js_code = py_mini_racer.MiniRacer()
js_code.eval(js_hash_text)
dict_list = js_code.call("d", us_js_decode) # 执行js解密代码
us_sina_stock_dict_payload.update({"page": "{}".format(page)})
res = requests.get(
us_sina_stock_list_url.format(dict_list),
params=us_sina_stock_dict_payload,
)
data_json = json.loads(
res.text[res.text.find("({") + 1 : res.text.rfind(");")]
)
big_df = pd.concat(
[big_df, pd.DataFrame(data_json["data"])], ignore_index=True
)
return big_df
|
https://github.com/akfamily/akshare/blob/087025d8d6f799b30ca114013e82c1ad22dc9294/project25/akshare/stock/stock_us_sina.py#L84-L112
| 25 |
[
0,
1,
2,
3,
4,
5,
6
] | 24.137931 |
[
7,
8,
9,
11,
14,
15,
16,
17,
18,
22,
25,
28
] | 41.37931 | false | 10.447761 | 29 | 2 | 58.62069 | 4 |
def stock_us_spot() -> pd.DataFrame:
big_df = pd.DataFrame()
page_count = get_us_page_count()
for page in tqdm(range(1, page_count + 1)):
# page = "1"
us_js_decode = "US_CategoryService.getList?page={}&num=20&sort=&asc=0&market=&id=".format(
page
)
js_code = py_mini_racer.MiniRacer()
js_code.eval(js_hash_text)
dict_list = js_code.call("d", us_js_decode) # 执行js解密代码
us_sina_stock_dict_payload.update({"page": "{}".format(page)})
res = requests.get(
us_sina_stock_list_url.format(dict_list),
params=us_sina_stock_dict_payload,
)
data_json = json.loads(
res.text[res.text.find("({") + 1 : res.text.rfind(");")]
)
big_df = pd.concat(
[big_df, pd.DataFrame(data_json["data"])], ignore_index=True
)
return big_df
| 18,842 |
akfamily/akshare
|
087025d8d6f799b30ca114013e82c1ad22dc9294
|
akshare/stock/stock_us_sina.py
|
stock_us_daily
|
(symbol: str = "FB", adjust: str = "")
|
新浪财经-美股
https://finance.sina.com.cn/stock/usstock/sector.shtml
备注:
1. CIEN 新浪复权因子错误
2. AI 新浪复权因子错误, 该股票刚上市未发生复权, 但是返回复权因子
:param symbol: 可以使用 get_us_stock_name 获取
:type symbol: str
:param adjust: "": 返回未复权的数据 ; qfq: 返回前复权后的数据; qfq-factor: 返回前复权因子和调整;
:type adjust: str
:return: 指定 adjust 的数据
:rtype: pandas.DataFrame
|
新浪财经-美股
https://finance.sina.com.cn/stock/usstock/sector.shtml
备注:
1. CIEN 新浪复权因子错误
2. AI 新浪复权因子错误, 该股票刚上市未发生复权, 但是返回复权因子
:param symbol: 可以使用 get_us_stock_name 获取
:type symbol: str
:param adjust: "": 返回未复权的数据 ; qfq: 返回前复权后的数据; qfq-factor: 返回前复权因子和调整;
:type adjust: str
:return: 指定 adjust 的数据
:rtype: pandas.DataFrame
| 115 | 211 |
def stock_us_daily(symbol: str = "FB", adjust: str = "") -> pd.DataFrame:
"""
新浪财经-美股
https://finance.sina.com.cn/stock/usstock/sector.shtml
备注:
1. CIEN 新浪复权因子错误
2. AI 新浪复权因子错误, 该股票刚上市未发生复权, 但是返回复权因子
:param symbol: 可以使用 get_us_stock_name 获取
:type symbol: str
:param adjust: "": 返回未复权的数据 ; qfq: 返回前复权后的数据; qfq-factor: 返回前复权因子和调整;
:type adjust: str
:return: 指定 adjust 的数据
:rtype: pandas.DataFrame
"""
url = f"https://finance.sina.com.cn/staticdata/us/{symbol}"
res = requests.get(url)
js_code = py_mini_racer.MiniRacer()
js_code.eval(zh_js_decode)
dict_list = js_code.call(
"d", res.text.split("=")[1].split(";")[0].replace('"', "")
) # 执行js解密代码
data_df = pd.DataFrame(dict_list)
data_df["date"] = pd.to_datetime(data_df["date"]).dt.date
data_df.index = pd.to_datetime(data_df["date"])
del data_df["amount"]
del data_df["date"]
data_df = data_df.astype("float")
url = us_sina_stock_hist_qfq_url.format(symbol)
res = requests.get(url)
qfq_factor_df = pd.DataFrame(
eval(res.text.split("=")[1].split("\n")[0])["data"]
)
qfq_factor_df.rename(
columns={
"c": "adjust",
"d": "date",
"f": "qfq_factor",
},
inplace=True,
)
qfq_factor_df.index = pd.to_datetime(qfq_factor_df["date"])
del qfq_factor_df["date"]
# 处理复权因子
temp_date_range = pd.date_range(
"1900-01-01", qfq_factor_df.index[0].isoformat()
)
temp_df = pd.DataFrame(range(len(temp_date_range)), temp_date_range)
new_range = pd.merge(
temp_df, qfq_factor_df, left_index=True, right_index=True, how="left"
)
new_range = new_range.fillna(method="ffill")
new_range = new_range.iloc[:, [1, 2]]
if adjust == "qfq":
if len(new_range) == 1:
new_range.index.values[0] = pd.to_datetime(
str(data_df.index.date[0])
)
temp_df = pd.merge(
data_df, new_range, left_index=True, right_index=True, how="left"
)
temp_df.fillna(method="ffill", inplace=True)
temp_df.fillna(method="bfill", inplace=True)
temp_df = temp_df.astype(float)
temp_df["open"] = (
temp_df["open"] * temp_df["qfq_factor"] + temp_df["adjust"]
)
temp_df["high"] = (
temp_df["high"] * temp_df["qfq_factor"] + temp_df["adjust"]
)
temp_df["close"] = (
temp_df["close"] * temp_df["qfq_factor"] + temp_df["adjust"]
)
temp_df["low"] = (
temp_df["low"] * temp_df["qfq_factor"] + temp_df["adjust"]
)
temp_df = temp_df.apply(lambda x: round(x, 4))
temp_df = temp_df.astype("float")
# 处理复权因子错误的情况-开始
check_df = temp_df[["open", "high", "low", "close"]].copy()
check_df.dropna(inplace=True)
if check_df.empty:
data_df.reset_index(inplace=True)
return data_df
# 处理复权因子错误的情况-结束
result_data = temp_df.iloc[:, :-2]
result_data.reset_index(inplace=True)
return result_data
if adjust == "qfq-factor":
qfq_factor_df.reset_index(inplace=True)
return qfq_factor_df
if adjust == "":
data_df.reset_index(inplace=True)
return data_df
|
https://github.com/akfamily/akshare/blob/087025d8d6f799b30ca114013e82c1ad22dc9294/project25/akshare/stock/stock_us_sina.py#L115-L211
| 25 |
[
0,
1,
2,
3,
4,
5,
6,
7,
8,
9,
10,
11,
12,
13
] | 14.43299 |
[
14,
15,
16,
17,
18,
21,
22,
23,
24,
25,
26,
27,
28,
29,
32,
40,
41,
44,
47,
48,
51,
52,
54,
55,
56,
59,
62,
63,
64,
65,
68,
71,
74,
77,
78,
80,
81,
82,
83,
84,
86,
87,
88,
90,
91,
92,
94,
95,
96
] | 50.515464 | false | 10.447761 | 97 | 6 | 49.484536 | 11 |
def stock_us_daily(symbol: str = "FB", adjust: str = "") -> pd.DataFrame:
url = f"https://finance.sina.com.cn/staticdata/us/{symbol}"
res = requests.get(url)
js_code = py_mini_racer.MiniRacer()
js_code.eval(zh_js_decode)
dict_list = js_code.call(
"d", res.text.split("=")[1].split(";")[0].replace('"', "")
) # 执行js解密代码
data_df = pd.DataFrame(dict_list)
data_df["date"] = pd.to_datetime(data_df["date"]).dt.date
data_df.index = pd.to_datetime(data_df["date"])
del data_df["amount"]
del data_df["date"]
data_df = data_df.astype("float")
url = us_sina_stock_hist_qfq_url.format(symbol)
res = requests.get(url)
qfq_factor_df = pd.DataFrame(
eval(res.text.split("=")[1].split("\n")[0])["data"]
)
qfq_factor_df.rename(
columns={
"c": "adjust",
"d": "date",
"f": "qfq_factor",
},
inplace=True,
)
qfq_factor_df.index = pd.to_datetime(qfq_factor_df["date"])
del qfq_factor_df["date"]
# 处理复权因子
temp_date_range = pd.date_range(
"1900-01-01", qfq_factor_df.index[0].isoformat()
)
temp_df = pd.DataFrame(range(len(temp_date_range)), temp_date_range)
new_range = pd.merge(
temp_df, qfq_factor_df, left_index=True, right_index=True, how="left"
)
new_range = new_range.fillna(method="ffill")
new_range = new_range.iloc[:, [1, 2]]
if adjust == "qfq":
if len(new_range) == 1:
new_range.index.values[0] = pd.to_datetime(
str(data_df.index.date[0])
)
temp_df = pd.merge(
data_df, new_range, left_index=True, right_index=True, how="left"
)
temp_df.fillna(method="ffill", inplace=True)
temp_df.fillna(method="bfill", inplace=True)
temp_df = temp_df.astype(float)
temp_df["open"] = (
temp_df["open"] * temp_df["qfq_factor"] + temp_df["adjust"]
)
temp_df["high"] = (
temp_df["high"] * temp_df["qfq_factor"] + temp_df["adjust"]
)
temp_df["close"] = (
temp_df["close"] * temp_df["qfq_factor"] + temp_df["adjust"]
)
temp_df["low"] = (
temp_df["low"] * temp_df["qfq_factor"] + temp_df["adjust"]
)
temp_df = temp_df.apply(lambda x: round(x, 4))
temp_df = temp_df.astype("float")
# 处理复权因子错误的情况-开始
check_df = temp_df[["open", "high", "low", "close"]].copy()
check_df.dropna(inplace=True)
if check_df.empty:
data_df.reset_index(inplace=True)
return data_df
# 处理复权因子错误的情况-结束
result_data = temp_df.iloc[:, :-2]
result_data.reset_index(inplace=True)
return result_data
if adjust == "qfq-factor":
qfq_factor_df.reset_index(inplace=True)
return qfq_factor_df
if adjust == "":
data_df.reset_index(inplace=True)
return data_df
| 18,843 |
|
akfamily/akshare
|
087025d8d6f799b30ca114013e82c1ad22dc9294
|
akshare/stock/stock_us_sina.py
|
stock_us_fundamental
|
(
stock: str = "GOOGL", symbol: str = "info"
)
|
美股财务指标
https://www.macrotrends.net/stocks/stock-screener
:param stock: 美股 ticker, 可以通过调用 **ak.stock_us_fundamental(symbol="info")** 获取所有 ticker
:type stock: str
:param symbol: info: 返回所有美股列表, PE: 返回 PE 数据, PB: 返回 PB 数据
:type symbol: str
:return: 指定股票的财务数据
:rtype: pandas.DataFrame
|
美股财务指标
https://www.macrotrends.net/stocks/stock-screener
:param stock: 美股 ticker, 可以通过调用 **ak.stock_us_fundamental(symbol="info")** 获取所有 ticker
:type stock: str
:param symbol: info: 返回所有美股列表, PE: 返回 PE 数据, PB: 返回 PB 数据
:type symbol: str
:return: 指定股票的财务数据
:rtype: pandas.DataFrame
| 214 | 260 |
def stock_us_fundamental(
stock: str = "GOOGL", symbol: str = "info"
) -> pd.DataFrame:
"""
美股财务指标
https://www.macrotrends.net/stocks/stock-screener
:param stock: 美股 ticker, 可以通过调用 **ak.stock_us_fundamental(symbol="info")** 获取所有 ticker
:type stock: str
:param symbol: info: 返回所有美股列表, PE: 返回 PE 数据, PB: 返回 PB 数据
:type symbol: str
:return: 指定股票的财务数据
:rtype: pandas.DataFrame
"""
url = "https://www.macrotrends.net/stocks/stock-screener"
r = requests.get(url)
temp_text = r.text[
r.text.find("originalData") + 15 : r.text.find("filterArray") - 8
]
data_json = json.loads(temp_text)
temp_df = pd.DataFrame(data_json)
if symbol == "info":
del temp_df["name_link"]
return temp_df
else:
need_df = temp_df[temp_df["ticker"] == stock]
soup = BeautifulSoup(need_df["name_link"].values[0], "lxml")
base_url = "https://www.macrotrends.net" + soup.find("a")["href"]
if symbol == "PE":
url = base_url.rsplit("/", maxsplit=1)[0] + "/pe-ratio"
temp_df = pd.read_html(url)[0]
temp_df.columns = [
"date",
"stock_price",
"ttm_net_eps",
"pe_ratio",
]
return temp_df
elif symbol == "PB":
url = base_url.rsplit("/", maxsplit=1)[0] + "/price-book"
temp_df = pd.read_html(url)[0]
temp_df.columns = [
"date",
"stock_price",
"book_value_per_share",
"price_to_book_ratio",
]
return temp_df
|
https://github.com/akfamily/akshare/blob/087025d8d6f799b30ca114013e82c1ad22dc9294/project25/akshare/stock/stock_us_sina.py#L214-L260
| 25 |
[
0
] | 2.12766 |
[
13,
14,
15,
18,
19,
20,
21,
22,
24,
25,
26,
27,
28,
29,
30,
36,
37,
38,
39,
40,
46
] | 44.680851 | false | 10.447761 | 47 | 4 | 55.319149 | 8 |
def stock_us_fundamental(
stock: str = "GOOGL", symbol: str = "info"
) -> pd.DataFrame:
url = "https://www.macrotrends.net/stocks/stock-screener"
r = requests.get(url)
temp_text = r.text[
r.text.find("originalData") + 15 : r.text.find("filterArray") - 8
]
data_json = json.loads(temp_text)
temp_df = pd.DataFrame(data_json)
if symbol == "info":
del temp_df["name_link"]
return temp_df
else:
need_df = temp_df[temp_df["ticker"] == stock]
soup = BeautifulSoup(need_df["name_link"].values[0], "lxml")
base_url = "https://www.macrotrends.net" + soup.find("a")["href"]
if symbol == "PE":
url = base_url.rsplit("/", maxsplit=1)[0] + "/pe-ratio"
temp_df = pd.read_html(url)[0]
temp_df.columns = [
"date",
"stock_price",
"ttm_net_eps",
"pe_ratio",
]
return temp_df
elif symbol == "PB":
url = base_url.rsplit("/", maxsplit=1)[0] + "/price-book"
temp_df = pd.read_html(url)[0]
temp_df.columns = [
"date",
"stock_price",
"book_value_per_share",
"price_to_book_ratio",
]
return temp_df
| 18,844 |
|
akfamily/akshare
|
087025d8d6f799b30ca114013e82c1ad22dc9294
|
akshare/stock/stock_zh_kcb_sina.py
|
get_zh_kcb_page_count
|
()
|
所有股票的总页数
http://vip.stock.finance.sina.com.cn/mkt/#hs_a
:return: int 需要抓取的股票总页数
|
所有股票的总页数
http://vip.stock.finance.sina.com.cn/mkt/#hs_a
:return: int 需要抓取的股票总页数
| 26 | 37 |
def get_zh_kcb_page_count() -> int:
"""
所有股票的总页数
http://vip.stock.finance.sina.com.cn/mkt/#hs_a
:return: int 需要抓取的股票总页数
"""
res = requests.get(zh_sina_kcb_stock_count_url)
page_count = int(re.findall(re.compile(r"\d+"), res.text)[0]) / 80
if isinstance(page_count, int):
return page_count
else:
return int(page_count) + 1
|
https://github.com/akfamily/akshare/blob/087025d8d6f799b30ca114013e82c1ad22dc9294/project25/akshare/stock/stock_zh_kcb_sina.py#L26-L37
| 25 |
[
0,
1,
2,
3,
4,
5
] | 50 |
[
6,
7,
8,
9,
11
] | 41.666667 | false | 9.52381 | 12 | 2 | 58.333333 | 3 |
def get_zh_kcb_page_count() -> int:
res = requests.get(zh_sina_kcb_stock_count_url)
page_count = int(re.findall(re.compile(r"\d+"), res.text)[0]) / 80
if isinstance(page_count, int):
return page_count
else:
return int(page_count) + 1
| 18,845 |
|
akfamily/akshare
|
087025d8d6f799b30ca114013e82c1ad22dc9294
|
akshare/stock/stock_zh_kcb_sina.py
|
stock_zh_kcb_spot
|
()
|
return big_df
|
新浪财经-科创板实时行情数据, 大量抓取容易封IP
https://vip.stock.finance.sina.com.cn/mkt/#kcb
:return: 科创板实时行情数据
:rtype: pandas.DataFrame
|
新浪财经-科创板实时行情数据, 大量抓取容易封IP
https://vip.stock.finance.sina.com.cn/mkt/#kcb
:return: 科创板实时行情数据
:rtype: pandas.DataFrame
| 40 | 116 |
def stock_zh_kcb_spot() -> pd.DataFrame:
"""
新浪财经-科创板实时行情数据, 大量抓取容易封IP
https://vip.stock.finance.sina.com.cn/mkt/#kcb
:return: 科创板实时行情数据
:rtype: pandas.DataFrame
"""
big_df = pd.DataFrame()
page_count = get_zh_kcb_page_count()
zh_sina_stock_payload_copy = zh_sina_kcb_stock_payload.copy()
for page in tqdm(range(1, page_count + 1), leave=False):
zh_sina_stock_payload_copy.update({"page": page})
zh_sina_stock_payload_copy.update({"_s_r_a": "page"})
res = requests.get(zh_sina_kcb_stock_url, params=zh_sina_stock_payload_copy)
data_json = demjson.decode(res.text)
big_df = pd.concat([big_df, pd.DataFrame(data_json)], ignore_index=True)
big_df.columns = [
"代码",
"-",
"名称",
"最新价",
"涨跌额",
"涨跌幅",
'买入',
'卖出',
'昨收',
'今开',
'最高',
'最低',
'成交量',
'成交额',
'时点',
'市盈率',
'市净率',
'流通市值',
'总市值',
'换手率',
]
big_df = big_df[[
"代码",
"名称",
"最新价",
"涨跌额",
"涨跌幅",
'买入',
'卖出',
'昨收',
'今开',
'最高',
'最低',
'成交量',
'成交额',
'时点',
'市盈率',
'市净率',
'流通市值',
'总市值',
'换手率',
]]
big_df['最新价'] = pd.to_numeric(big_df['最新价'])
big_df['涨跌额'] = pd.to_numeric(big_df['涨跌额'])
big_df['涨跌幅'] = pd.to_numeric(big_df['涨跌幅'])
big_df['买入'] = pd.to_numeric(big_df['买入'])
big_df['卖出'] = pd.to_numeric(big_df['卖出'])
big_df['昨收'] = pd.to_numeric(big_df['昨收'])
big_df['今开'] = pd.to_numeric(big_df['今开'])
big_df['最高'] = pd.to_numeric(big_df['最高'])
big_df['最低'] = pd.to_numeric(big_df['最低'])
big_df['成交量'] = pd.to_numeric(big_df['成交量'])
big_df['成交额'] = pd.to_numeric(big_df['成交额'])
big_df['市盈率'] = pd.to_numeric(big_df['市盈率'])
big_df['市净率'] = pd.to_numeric(big_df['市净率'])
big_df['流通市值'] = pd.to_numeric(big_df['流通市值'])
big_df['总市值'] = pd.to_numeric(big_df['总市值'])
big_df['换手率'] = pd.to_numeric(big_df['换手率'])
return big_df
|
https://github.com/akfamily/akshare/blob/087025d8d6f799b30ca114013e82c1ad22dc9294/project25/akshare/stock/stock_zh_kcb_sina.py#L40-L116
| 25 |
[
0,
1,
2,
3,
4,
5,
6
] | 9.090909 |
[
7,
8,
9,
10,
11,
12,
13,
14,
15,
16,
38,
60,
61,
62,
63,
64,
65,
66,
67,
68,
69,
70,
71,
72,
73,
74,
75,
76
] | 36.363636 | false | 9.52381 | 77 | 2 | 63.636364 | 4 |
def stock_zh_kcb_spot() -> pd.DataFrame:
big_df = pd.DataFrame()
page_count = get_zh_kcb_page_count()
zh_sina_stock_payload_copy = zh_sina_kcb_stock_payload.copy()
for page in tqdm(range(1, page_count + 1), leave=False):
zh_sina_stock_payload_copy.update({"page": page})
zh_sina_stock_payload_copy.update({"_s_r_a": "page"})
res = requests.get(zh_sina_kcb_stock_url, params=zh_sina_stock_payload_copy)
data_json = demjson.decode(res.text)
big_df = pd.concat([big_df, pd.DataFrame(data_json)], ignore_index=True)
big_df.columns = [
"代码",
"-",
"名称",
"最新价",
"涨跌额",
"涨跌幅",
'买入',
'卖出',
'昨收',
'今开',
'最高',
'最低',
'成交量',
'成交额',
'时点',
'市盈率',
'市净率',
'流通市值',
'总市值',
'换手率',
]
big_df = big_df[[
"代码",
"名称",
"最新价",
"涨跌额",
"涨跌幅",
'买入',
'卖出',
'昨收',
'今开',
'最高',
'最低',
'成交量',
'成交额',
'时点',
'市盈率',
'市净率',
'流通市值',
'总市值',
'换手率',
]]
big_df['最新价'] = pd.to_numeric(big_df['最新价'])
big_df['涨跌额'] = pd.to_numeric(big_df['涨跌额'])
big_df['涨跌幅'] = pd.to_numeric(big_df['涨跌幅'])
big_df['买入'] = pd.to_numeric(big_df['买入'])
big_df['卖出'] = pd.to_numeric(big_df['卖出'])
big_df['昨收'] = pd.to_numeric(big_df['昨收'])
big_df['今开'] = pd.to_numeric(big_df['今开'])
big_df['最高'] = pd.to_numeric(big_df['最高'])
big_df['最低'] = pd.to_numeric(big_df['最低'])
big_df['成交量'] = pd.to_numeric(big_df['成交量'])
big_df['成交额'] = pd.to_numeric(big_df['成交额'])
big_df['市盈率'] = pd.to_numeric(big_df['市盈率'])
big_df['市净率'] = pd.to_numeric(big_df['市净率'])
big_df['流通市值'] = pd.to_numeric(big_df['流通市值'])
big_df['总市值'] = pd.to_numeric(big_df['总市值'])
big_df['换手率'] = pd.to_numeric(big_df['换手率'])
return big_df
| 18,846 |
akfamily/akshare
|
087025d8d6f799b30ca114013e82c1ad22dc9294
|
akshare/stock/stock_zh_kcb_sina.py
|
stock_zh_kcb_daily
|
(symbol: str = "sh688399", adjust: str = "")
|
新浪财经-科创板股票的历史行情数据, 大量抓取容易封IP
https://finance.sina.com.cn/realstock/company/sh688005/nc.shtml
:param symbol: 股票代码; 带市场标识的股票代码
:type symbol: str
:param adjust: 默认不复权的数据; qfq: 返回前复权后的数据; hfq: 返回后复权后的数据; hfq-factor: 返回后复权因子; hfq-factor: 返回前复权因子
:type adjust: str
:return: 科创板股票的历史行情数据
:rtype: pandas.DataFrame
|
新浪财经-科创板股票的历史行情数据, 大量抓取容易封IP
https://finance.sina.com.cn/realstock/company/sh688005/nc.shtml
:param symbol: 股票代码; 带市场标识的股票代码
:type symbol: str
:param adjust: 默认不复权的数据; qfq: 返回前复权后的数据; hfq: 返回后复权后的数据; hfq-factor: 返回后复权因子; hfq-factor: 返回前复权因子
:type adjust: str
:return: 科创板股票的历史行情数据
:rtype: pandas.DataFrame
| 119 | 238 |
def stock_zh_kcb_daily(symbol: str = "sh688399", adjust: str = "") -> pd.DataFrame:
"""
新浪财经-科创板股票的历史行情数据, 大量抓取容易封IP
https://finance.sina.com.cn/realstock/company/sh688005/nc.shtml
:param symbol: 股票代码; 带市场标识的股票代码
:type symbol: str
:param adjust: 默认不复权的数据; qfq: 返回前复权后的数据; hfq: 返回后复权后的数据; hfq-factor: 返回后复权因子; hfq-factor: 返回前复权因子
:type adjust: str
:return: 科创板股票的历史行情数据
:rtype: pandas.DataFrame
"""
res = requests.get(
zh_sina_kcb_stock_hist_url.format(
symbol, datetime.datetime.now().strftime("%Y_%m_%d"), symbol
)
)
data_json = demjson.decode(res.text[res.text.find("[") : res.text.rfind("]") + 1])
data_df = pd.DataFrame(data_json)
data_df.index = pd.to_datetime(data_df["d"])
data_df.index.name = "date"
del data_df["d"]
r = requests.get(zh_sina_kcb_stock_amount_url.format(symbol, symbol))
amount_data_json = demjson.decode(r.text[r.text.find("[") : r.text.rfind("]") + 1])
amount_data_df = pd.DataFrame(amount_data_json)
amount_data_df.index = pd.to_datetime(amount_data_df.date)
del amount_data_df["date"]
temp_df = pd.merge(
data_df, amount_data_df, left_index=True, right_index=True, how="left"
)
temp_df.fillna(method="ffill", inplace=True)
temp_df = temp_df.astype(float)
temp_df["amount"] = temp_df["amount"] * 10000
temp_df["turnover"] = temp_df["v"] / temp_df["amount"]
temp_df.columns = [
"open",
"high",
"low",
"close",
"volume",
"after_volume",
"after_amount",
"outstanding_share",
"turnover",
]
if not adjust:
temp_df.reset_index(inplace=True)
temp_df['date'] = pd.to_datetime(temp_df['date']).dt.date
return temp_df
if adjust == "hfq":
res = requests.get(zh_sina_kcb_stock_hfq_url.format(symbol))
hfq_factor_df = pd.DataFrame(
eval(res.text.split("=")[1].split("\n")[0])["data"]
)
hfq_factor_df.columns = ["date", "hfq_factor"]
hfq_factor_df.index = pd.to_datetime(hfq_factor_df.date)
del hfq_factor_df["date"]
temp_df = pd.merge(
temp_df, hfq_factor_df, left_index=True, right_index=True, how="left"
)
temp_df.fillna(method="ffill", inplace=True)
temp_df = temp_df.astype(float)
temp_df["open"] = temp_df["open"] * temp_df["hfq_factor"]
temp_df["high"] = temp_df["high"] * temp_df["hfq_factor"]
temp_df["close"] = temp_df["close"] * temp_df["hfq_factor"]
temp_df["low"] = temp_df["low"] * temp_df["hfq_factor"]
temp_df = temp_df.iloc[:, :-1]
temp_df.reset_index(inplace=True)
temp_df['date'] = pd.to_datetime(temp_df['date']).dt.date
return temp_df
if adjust == "qfq":
res = requests.get(zh_sina_kcb_stock_qfq_url.format(symbol))
qfq_factor_df = pd.DataFrame(
eval(res.text.split("=")[1].split("\n")[0])["data"]
)
qfq_factor_df.columns = ["date", "qfq_factor"]
qfq_factor_df.index = pd.to_datetime(qfq_factor_df.date)
del qfq_factor_df["date"]
temp_df = pd.merge(
temp_df, qfq_factor_df, left_index=True, right_index=True, how="left"
)
temp_df.fillna(method="ffill", inplace=True)
temp_df = temp_df.astype(float)
temp_df["open"] = temp_df["open"] / temp_df["qfq_factor"]
temp_df["high"] = temp_df["high"] / temp_df["qfq_factor"]
temp_df["close"] = temp_df["close"] / temp_df["qfq_factor"]
temp_df["low"] = temp_df["low"] / temp_df["qfq_factor"]
temp_df = temp_df.iloc[:, :-1]
temp_df.reset_index(inplace=True)
temp_df['date'] = pd.to_datetime(temp_df['date']).dt.date
return temp_df
if adjust == "hfq-factor":
res = requests.get(zh_sina_kcb_stock_hfq_url.format(symbol))
hfq_factor_df = pd.DataFrame(
eval(res.text.split("=")[1].split("\n")[0])["data"]
)
hfq_factor_df.columns = ["date", "hfq_factor"]
hfq_factor_df.index = pd.to_datetime(hfq_factor_df.date)
del hfq_factor_df["date"]
hfq_factor_df.reset_index(inplace=True)
hfq_factor_df['date'] = pd.to_datetime(hfq_factor_df['date']).dt.date
return hfq_factor_df
if adjust == "qfq-factor":
res = requests.get(zh_sina_kcb_stock_qfq_url.format(symbol))
qfq_factor_df = pd.DataFrame(
eval(res.text.split("=")[1].split("\n")[0])["data"]
)
qfq_factor_df.columns = ["date", "qfq_factor"]
qfq_factor_df.index = pd.to_datetime(qfq_factor_df.date)
del qfq_factor_df["date"]
qfq_factor_df.reset_index(inplace=True)
qfq_factor_df['date'] = pd.to_datetime(qfq_factor_df['date']).dt.date
return qfq_factor_df
|
https://github.com/akfamily/akshare/blob/087025d8d6f799b30ca114013e82c1ad22dc9294/project25/akshare/stock/stock_zh_kcb_sina.py#L119-L238
| 25 |
[
0,
1,
2,
3,
4,
5,
6,
7,
8,
9,
10
] | 9.166667 |
[
11,
16,
17,
18,
19,
20,
22,
23,
24,
25,
26,
27,
30,
31,
32,
33,
34,
46,
47,
48,
49,
51,
52,
53,
56,
57,
58,
60,
63,
64,
65,
66,
67,
68,
69,
70,
71,
72,
74,
75,
76,
79,
80,
81,
83,
86,
87,
88,
89,
90,
91,
92,
93,
94,
95,
97,
98,
99,
102,
103,
104,
105,
106,
107,
109,
110,
111,
114,
115,
116,
117,
118,
119
] | 60.833333 | false | 9.52381 | 120 | 6 | 39.166667 | 8 |
def stock_zh_kcb_daily(symbol: str = "sh688399", adjust: str = "") -> pd.DataFrame:
res = requests.get(
zh_sina_kcb_stock_hist_url.format(
symbol, datetime.datetime.now().strftime("%Y_%m_%d"), symbol
)
)
data_json = demjson.decode(res.text[res.text.find("[") : res.text.rfind("]") + 1])
data_df = pd.DataFrame(data_json)
data_df.index = pd.to_datetime(data_df["d"])
data_df.index.name = "date"
del data_df["d"]
r = requests.get(zh_sina_kcb_stock_amount_url.format(symbol, symbol))
amount_data_json = demjson.decode(r.text[r.text.find("[") : r.text.rfind("]") + 1])
amount_data_df = pd.DataFrame(amount_data_json)
amount_data_df.index = pd.to_datetime(amount_data_df.date)
del amount_data_df["date"]
temp_df = pd.merge(
data_df, amount_data_df, left_index=True, right_index=True, how="left"
)
temp_df.fillna(method="ffill", inplace=True)
temp_df = temp_df.astype(float)
temp_df["amount"] = temp_df["amount"] * 10000
temp_df["turnover"] = temp_df["v"] / temp_df["amount"]
temp_df.columns = [
"open",
"high",
"low",
"close",
"volume",
"after_volume",
"after_amount",
"outstanding_share",
"turnover",
]
if not adjust:
temp_df.reset_index(inplace=True)
temp_df['date'] = pd.to_datetime(temp_df['date']).dt.date
return temp_df
if adjust == "hfq":
res = requests.get(zh_sina_kcb_stock_hfq_url.format(symbol))
hfq_factor_df = pd.DataFrame(
eval(res.text.split("=")[1].split("\n")[0])["data"]
)
hfq_factor_df.columns = ["date", "hfq_factor"]
hfq_factor_df.index = pd.to_datetime(hfq_factor_df.date)
del hfq_factor_df["date"]
temp_df = pd.merge(
temp_df, hfq_factor_df, left_index=True, right_index=True, how="left"
)
temp_df.fillna(method="ffill", inplace=True)
temp_df = temp_df.astype(float)
temp_df["open"] = temp_df["open"] * temp_df["hfq_factor"]
temp_df["high"] = temp_df["high"] * temp_df["hfq_factor"]
temp_df["close"] = temp_df["close"] * temp_df["hfq_factor"]
temp_df["low"] = temp_df["low"] * temp_df["hfq_factor"]
temp_df = temp_df.iloc[:, :-1]
temp_df.reset_index(inplace=True)
temp_df['date'] = pd.to_datetime(temp_df['date']).dt.date
return temp_df
if adjust == "qfq":
res = requests.get(zh_sina_kcb_stock_qfq_url.format(symbol))
qfq_factor_df = pd.DataFrame(
eval(res.text.split("=")[1].split("\n")[0])["data"]
)
qfq_factor_df.columns = ["date", "qfq_factor"]
qfq_factor_df.index = pd.to_datetime(qfq_factor_df.date)
del qfq_factor_df["date"]
temp_df = pd.merge(
temp_df, qfq_factor_df, left_index=True, right_index=True, how="left"
)
temp_df.fillna(method="ffill", inplace=True)
temp_df = temp_df.astype(float)
temp_df["open"] = temp_df["open"] / temp_df["qfq_factor"]
temp_df["high"] = temp_df["high"] / temp_df["qfq_factor"]
temp_df["close"] = temp_df["close"] / temp_df["qfq_factor"]
temp_df["low"] = temp_df["low"] / temp_df["qfq_factor"]
temp_df = temp_df.iloc[:, :-1]
temp_df.reset_index(inplace=True)
temp_df['date'] = pd.to_datetime(temp_df['date']).dt.date
return temp_df
if adjust == "hfq-factor":
res = requests.get(zh_sina_kcb_stock_hfq_url.format(symbol))
hfq_factor_df = pd.DataFrame(
eval(res.text.split("=")[1].split("\n")[0])["data"]
)
hfq_factor_df.columns = ["date", "hfq_factor"]
hfq_factor_df.index = pd.to_datetime(hfq_factor_df.date)
del hfq_factor_df["date"]
hfq_factor_df.reset_index(inplace=True)
hfq_factor_df['date'] = pd.to_datetime(hfq_factor_df['date']).dt.date
return hfq_factor_df
if adjust == "qfq-factor":
res = requests.get(zh_sina_kcb_stock_qfq_url.format(symbol))
qfq_factor_df = pd.DataFrame(
eval(res.text.split("=")[1].split("\n")[0])["data"]
)
qfq_factor_df.columns = ["date", "qfq_factor"]
qfq_factor_df.index = pd.to_datetime(qfq_factor_df.date)
del qfq_factor_df["date"]
qfq_factor_df.reset_index(inplace=True)
qfq_factor_df['date'] = pd.to_datetime(qfq_factor_df['date']).dt.date
return qfq_factor_df
| 18,847 |
|
akfamily/akshare
|
087025d8d6f799b30ca114013e82c1ad22dc9294
|
akshare/stock/stock_board_industry_em.py
|
stock_board_industry_name_em
|
()
|
return temp_df
|
东方财富网-沪深板块-行业板块-名称
http://quote.eastmoney.com/center/boardlist.html#industry_board
:return: 行业板块-名称
:rtype: pandas.DataFrame
|
东方财富网-沪深板块-行业板块-名称
http://quote.eastmoney.com/center/boardlist.html#industry_board
:return: 行业板块-名称
:rtype: pandas.DataFrame
| 12 | 106 |
def stock_board_industry_name_em() -> pd.DataFrame:
"""
东方财富网-沪深板块-行业板块-名称
http://quote.eastmoney.com/center/boardlist.html#industry_board
:return: 行业板块-名称
:rtype: pandas.DataFrame
"""
url = "http://17.push2.eastmoney.com/api/qt/clist/get"
params = {
"pn": "1",
"pz": "2000",
"po": "1",
"np": "1",
"ut": "bd1d9ddb04089700cf9c27f6f7426281",
"fltt": "2",
"invt": "2",
"fid": "f3",
"fs": "m:90 t:2 f:!50",
"fields": "f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f12,f13,f14,f15,f16,f17,f18,f20,f21,f23,f24,f25,f26,f22,f33,f11,f62,f128,f136,f115,f152,f124,f107,f104,f105,f140,f141,f207,f208,f209,f222",
"_": "1626075887768",
}
r = requests.get(url, params=params)
data_json = r.json()
temp_df = pd.DataFrame(data_json["data"]["diff"])
temp_df.reset_index(inplace=True)
temp_df["index"] = temp_df.index + 1
temp_df.columns = [
"排名",
"-",
"最新价",
"涨跌幅",
"涨跌额",
"-",
"_",
"-",
"换手率",
"-",
"-",
"-",
"板块代码",
"-",
"板块名称",
"-",
"-",
"-",
"-",
"总市值",
"-",
"-",
"-",
"-",
"-",
"-",
"-",
"-",
"上涨家数",
"下跌家数",
"-",
"-",
"-",
"领涨股票",
"-",
"-",
"领涨股票-涨跌幅",
"-",
"-",
"-",
"-",
"-",
]
temp_df = temp_df[
[
"排名",
"板块名称",
"板块代码",
"最新价",
"涨跌额",
"涨跌幅",
"总市值",
"换手率",
"上涨家数",
"下跌家数",
"领涨股票",
"领涨股票-涨跌幅",
]
]
temp_df["最新价"] = pd.to_numeric(temp_df["最新价"], errors="coerce")
temp_df["涨跌额"] = pd.to_numeric(temp_df["涨跌额"], errors="coerce")
temp_df["涨跌幅"] = pd.to_numeric(temp_df["涨跌幅"], errors="coerce")
temp_df["总市值"] = pd.to_numeric(temp_df["总市值"], errors="coerce")
temp_df["换手率"] = pd.to_numeric(temp_df["换手率"], errors="coerce")
temp_df["上涨家数"] = pd.to_numeric(temp_df["上涨家数"], errors="coerce")
temp_df["下跌家数"] = pd.to_numeric(temp_df["下跌家数"], errors="coerce")
temp_df["领涨股票-涨跌幅"] = pd.to_numeric(temp_df["领涨股票-涨跌幅"], errors="coerce")
return temp_df
|
https://github.com/akfamily/akshare/blob/087025d8d6f799b30ca114013e82c1ad22dc9294/project25/akshare/stock/stock_board_industry_em.py#L12-L106
| 25 |
[
0,
1,
2,
3,
4,
5,
6
] | 7.368421 |
[
7,
8,
21,
22,
23,
24,
25,
26,
70,
86,
87,
88,
89,
90,
91,
92,
93,
94
] | 18.947368 | false | 7.920792 | 95 | 1 | 81.052632 | 4 |
def stock_board_industry_name_em() -> pd.DataFrame:
url = "http://17.push2.eastmoney.com/api/qt/clist/get"
params = {
"pn": "1",
"pz": "2000",
"po": "1",
"np": "1",
"ut": "bd1d9ddb04089700cf9c27f6f7426281",
"fltt": "2",
"invt": "2",
"fid": "f3",
"fs": "m:90 t:2 f:!50",
"fields": "f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f12,f13,f14,f15,f16,f17,f18,f20,f21,f23,f24,f25,f26,f22,f33,f11,f62,f128,f136,f115,f152,f124,f107,f104,f105,f140,f141,f207,f208,f209,f222",
"_": "1626075887768",
}
r = requests.get(url, params=params)
data_json = r.json()
temp_df = pd.DataFrame(data_json["data"]["diff"])
temp_df.reset_index(inplace=True)
temp_df["index"] = temp_df.index + 1
temp_df.columns = [
"排名",
"-",
"最新价",
"涨跌幅",
"涨跌额",
"-",
"_",
"-",
"换手率",
"-",
"-",
"-",
"板块代码",
"-",
"板块名称",
"-",
"-",
"-",
"-",
"总市值",
"-",
"-",
"-",
"-",
"-",
"-",
"-",
"-",
"上涨家数",
"下跌家数",
"-",
"-",
"-",
"领涨股票",
"-",
"-",
"领涨股票-涨跌幅",
"-",
"-",
"-",
"-",
"-",
]
temp_df = temp_df[
[
"排名",
"板块名称",
"板块代码",
"最新价",
"涨跌额",
"涨跌幅",
"总市值",
"换手率",
"上涨家数",
"下跌家数",
"领涨股票",
"领涨股票-涨跌幅",
]
]
temp_df["最新价"] = pd.to_numeric(temp_df["最新价"], errors="coerce")
temp_df["涨跌额"] = pd.to_numeric(temp_df["涨跌额"], errors="coerce")
temp_df["涨跌幅"] = pd.to_numeric(temp_df["涨跌幅"], errors="coerce")
temp_df["总市值"] = pd.to_numeric(temp_df["总市值"], errors="coerce")
temp_df["换手率"] = pd.to_numeric(temp_df["换手率"], errors="coerce")
temp_df["上涨家数"] = pd.to_numeric(temp_df["上涨家数"], errors="coerce")
temp_df["下跌家数"] = pd.to_numeric(temp_df["下跌家数"], errors="coerce")
temp_df["领涨股票-涨跌幅"] = pd.to_numeric(temp_df["领涨股票-涨跌幅"], errors="coerce")
return temp_df
| 18,848 |
akfamily/akshare
|
087025d8d6f799b30ca114013e82c1ad22dc9294
|
akshare/stock/stock_board_industry_em.py
|
stock_board_industry_hist_em
|
(
symbol: str = "小金属",
start_date: str = "20211201",
end_date: str = "20220401",
period: str = "日k",
adjust: str = "",
)
|
return temp_df
|
东方财富网-沪深板块-行业板块-历史行情
https://quote.eastmoney.com/bk/90.BK1027.html
:param symbol: 板块名称
:type symbol: str
:param start_date: 开始时间
:type start_date: str
:param end_date: 结束时间
:type end_date: str
:param period: 周期; choice of {"日k", "周k", "月k"}
:type period: str
:param adjust: choice of {'': 不复权, "qfq": 前复权, "hfq": 后复权}
:type adjust: str
:return: 历史行情
:rtype: pandas.DataFrame
|
东方财富网-沪深板块-行业板块-历史行情
https://quote.eastmoney.com/bk/90.BK1027.html
:param symbol: 板块名称
:type symbol: str
:param start_date: 开始时间
:type start_date: str
:param end_date: 结束时间
:type end_date: str
:param period: 周期; choice of {"日k", "周k", "月k"}
:type period: str
:param adjust: choice of {'': 不复权, "qfq": 前复权, "hfq": 后复权}
:type adjust: str
:return: 历史行情
:rtype: pandas.DataFrame
| 109 | 199 |
def stock_board_industry_hist_em(
symbol: str = "小金属",
start_date: str = "20211201",
end_date: str = "20220401",
period: str = "日k",
adjust: str = "",
) -> pd.DataFrame:
"""
东方财富网-沪深板块-行业板块-历史行情
https://quote.eastmoney.com/bk/90.BK1027.html
:param symbol: 板块名称
:type symbol: str
:param start_date: 开始时间
:type start_date: str
:param end_date: 结束时间
:type end_date: str
:param period: 周期; choice of {"日k", "周k", "月k"}
:type period: str
:param adjust: choice of {'': 不复权, "qfq": 前复权, "hfq": 后复权}
:type adjust: str
:return: 历史行情
:rtype: pandas.DataFrame
"""
period_map = {
"日k": '101',
"周k": '102',
"月k": '103',
}
stock_board_concept_em_map = stock_board_industry_name_em()
stock_board_code = stock_board_concept_em_map[
stock_board_concept_em_map["板块名称"] == symbol
]["板块代码"].values[0]
adjust_map = {"": "0", "qfq": "1", "hfq": "2"}
url = "http://7.push2his.eastmoney.com/api/qt/stock/kline/get"
params = {
"secid": f"90.{stock_board_code}",
"ut": "fa5fd1943c7b386f172d6893dbfba10b",
"fields1": "f1,f2,f3,f4,f5,f6",
"fields2": "f51,f52,f53,f54,f55,f56,f57,f58,f59,f60,f61",
"klt": period_map[period],
"fqt": adjust_map[adjust],
"beg": start_date,
"end": end_date,
"smplmt": "10000",
"lmt": "1000000",
"_": "1626079488673",
}
r = requests.get(url, params=params)
data_json = r.json()
temp_df = pd.DataFrame(
[item.split(",") for item in data_json["data"]["klines"]]
)
temp_df.columns = [
"日期",
"开盘",
"收盘",
"最高",
"最低",
"成交量",
"成交额",
"振幅",
"涨跌幅",
"涨跌额",
"换手率",
]
temp_df = temp_df[
[
"日期",
"开盘",
"收盘",
"最高",
"最低",
"涨跌幅",
"涨跌额",
"成交量",
"成交额",
"振幅",
"换手率",
]
]
temp_df["开盘"] = pd.to_numeric(temp_df["开盘"], errors="coerce")
temp_df["收盘"] = pd.to_numeric(temp_df["收盘"], errors="coerce")
temp_df["最高"] = pd.to_numeric(temp_df["最高"], errors="coerce")
temp_df["最低"] = pd.to_numeric(temp_df["最低"], errors="coerce")
temp_df["涨跌幅"] = pd.to_numeric(temp_df["涨跌幅"], errors="coerce")
temp_df["涨跌额"] = pd.to_numeric(temp_df["涨跌额"], errors="coerce")
temp_df["成交量"] = pd.to_numeric(temp_df["成交量"], errors="coerce")
temp_df["成交额"] = pd.to_numeric(temp_df["成交额"], errors="coerce")
temp_df["振幅"] = pd.to_numeric(temp_df["振幅"], errors="coerce")
temp_df["换手率"] = pd.to_numeric(temp_df["换手率"], errors="coerce")
return temp_df
|
https://github.com/akfamily/akshare/blob/087025d8d6f799b30ca114013e82c1ad22dc9294/project25/akshare/stock/stock_board_industry_em.py#L109-L199
| 25 |
[
0
] | 1.098901 |
[
23,
28,
29,
32,
33,
34,
47,
48,
49,
52,
65,
80,
81,
82,
83,
84,
85,
86,
87,
88,
89,
90
] | 24.175824 | false | 7.920792 | 91 | 2 | 75.824176 | 14 |
def stock_board_industry_hist_em(
symbol: str = "小金属",
start_date: str = "20211201",
end_date: str = "20220401",
period: str = "日k",
adjust: str = "",
) -> pd.DataFrame:
period_map = {
"日k": '101',
"周k": '102',
"月k": '103',
}
stock_board_concept_em_map = stock_board_industry_name_em()
stock_board_code = stock_board_concept_em_map[
stock_board_concept_em_map["板块名称"] == symbol
]["板块代码"].values[0]
adjust_map = {"": "0", "qfq": "1", "hfq": "2"}
url = "http://7.push2his.eastmoney.com/api/qt/stock/kline/get"
params = {
"secid": f"90.{stock_board_code}",
"ut": "fa5fd1943c7b386f172d6893dbfba10b",
"fields1": "f1,f2,f3,f4,f5,f6",
"fields2": "f51,f52,f53,f54,f55,f56,f57,f58,f59,f60,f61",
"klt": period_map[period],
"fqt": adjust_map[adjust],
"beg": start_date,
"end": end_date,
"smplmt": "10000",
"lmt": "1000000",
"_": "1626079488673",
}
r = requests.get(url, params=params)
data_json = r.json()
temp_df = pd.DataFrame(
[item.split(",") for item in data_json["data"]["klines"]]
)
temp_df.columns = [
"日期",
"开盘",
"收盘",
"最高",
"最低",
"成交量",
"成交额",
"振幅",
"涨跌幅",
"涨跌额",
"换手率",
]
temp_df = temp_df[
[
"日期",
"开盘",
"收盘",
"最高",
"最低",
"涨跌幅",
"涨跌额",
"成交量",
"成交额",
"振幅",
"换手率",
]
]
temp_df["开盘"] = pd.to_numeric(temp_df["开盘"], errors="coerce")
temp_df["收盘"] = pd.to_numeric(temp_df["收盘"], errors="coerce")
temp_df["最高"] = pd.to_numeric(temp_df["最高"], errors="coerce")
temp_df["最低"] = pd.to_numeric(temp_df["最低"], errors="coerce")
temp_df["涨跌幅"] = pd.to_numeric(temp_df["涨跌幅"], errors="coerce")
temp_df["涨跌额"] = pd.to_numeric(temp_df["涨跌额"], errors="coerce")
temp_df["成交量"] = pd.to_numeric(temp_df["成交量"], errors="coerce")
temp_df["成交额"] = pd.to_numeric(temp_df["成交额"], errors="coerce")
temp_df["振幅"] = pd.to_numeric(temp_df["振幅"], errors="coerce")
temp_df["换手率"] = pd.to_numeric(temp_df["换手率"], errors="coerce")
return temp_df
| 18,849 |
akfamily/akshare
|
087025d8d6f799b30ca114013e82c1ad22dc9294
|
akshare/stock/stock_board_industry_em.py
|
stock_board_industry_hist_min_em
|
(
symbol: str = "小金属", period: str = "5"
)
|
return temp_df
|
东方财富网-沪深板块-行业板块-分时历史行情
http://quote.eastmoney.com/bk/90.BK1027.html
:param symbol: 板块名称
:type symbol: str
:param period: choice of {"1", "5", "15", "30", "60"}
:type period: str
:return: 分时历史行情
:rtype: pandas.DataFrame
|
东方财富网-沪深板块-行业板块-分时历史行情
http://quote.eastmoney.com/bk/90.BK1027.html
:param symbol: 板块名称
:type symbol: str
:param period: choice of {"1", "5", "15", "30", "60"}
:type period: str
:return: 分时历史行情
:rtype: pandas.DataFrame
| 202 | 276 |
def stock_board_industry_hist_min_em(
symbol: str = "小金属", period: str = "5"
) -> pd.DataFrame:
"""
东方财富网-沪深板块-行业板块-分时历史行情
http://quote.eastmoney.com/bk/90.BK1027.html
:param symbol: 板块名称
:type symbol: str
:param period: choice of {"1", "5", "15", "30", "60"}
:type period: str
:return: 分时历史行情
:rtype: pandas.DataFrame
"""
stock_board_concept_em_map = stock_board_industry_name_em()
stock_board_code = stock_board_concept_em_map[
stock_board_concept_em_map["板块名称"] == symbol
]["板块代码"].values[0]
url = "http://7.push2his.eastmoney.com/api/qt/stock/kline/get"
params = {
"secid": f"90.{stock_board_code}",
"ut": "fa5fd1943c7b386f172d6893dbfba10b",
"fields1": "f1,f2,f3,f4,f5,f6",
"fields2": "f51,f52,f53,f54,f55,f56,f57,f58,f59,f60,f61",
"klt": period,
"fqt": "1",
"beg": "0",
"end": "20500101",
"smplmt": "10000",
"lmt": "1000000",
"_": "1626079488673",
}
r = requests.get(url, params=params)
data_json = r.json()
temp_df = pd.DataFrame(
[item.split(",") for item in data_json["data"]["klines"]]
)
temp_df.columns = [
"日期时间",
"开盘",
"收盘",
"最高",
"最低",
"成交量",
"成交额",
"振幅",
"涨跌幅",
"涨跌额",
"换手率",
]
temp_df = temp_df[
[
"日期时间",
"开盘",
"收盘",
"最高",
"最低",
"涨跌幅",
"涨跌额",
"成交量",
"成交额",
"振幅",
"换手率",
]
]
temp_df["开盘"] = pd.to_numeric(temp_df["开盘"], errors="coerce")
temp_df["收盘"] = pd.to_numeric(temp_df["收盘"], errors="coerce")
temp_df["最高"] = pd.to_numeric(temp_df["最高"], errors="coerce")
temp_df["最低"] = pd.to_numeric(temp_df["最低"], errors="coerce")
temp_df["涨跌幅"] = pd.to_numeric(temp_df["涨跌幅"], errors="coerce")
temp_df["涨跌额"] = pd.to_numeric(temp_df["涨跌额"], errors="coerce")
temp_df["成交量"] = pd.to_numeric(temp_df["成交量"], errors="coerce")
temp_df["成交额"] = pd.to_numeric(temp_df["成交额"], errors="coerce")
temp_df["振幅"] = pd.to_numeric(temp_df["振幅"], errors="coerce")
temp_df["换手率"] = pd.to_numeric(temp_df["换手率"], errors="coerce")
return temp_df
|
https://github.com/akfamily/akshare/blob/087025d8d6f799b30ca114013e82c1ad22dc9294/project25/akshare/stock/stock_board_industry_em.py#L202-L276
| 25 |
[
0
] | 1.333333 |
[
13,
14,
17,
18,
31,
32,
33,
36,
49,
64,
65,
66,
67,
68,
69,
70,
71,
72,
73,
74
] | 26.666667 | false | 7.920792 | 75 | 2 | 73.333333 | 8 |
def stock_board_industry_hist_min_em(
symbol: str = "小金属", period: str = "5"
) -> pd.DataFrame:
stock_board_concept_em_map = stock_board_industry_name_em()
stock_board_code = stock_board_concept_em_map[
stock_board_concept_em_map["板块名称"] == symbol
]["板块代码"].values[0]
url = "http://7.push2his.eastmoney.com/api/qt/stock/kline/get"
params = {
"secid": f"90.{stock_board_code}",
"ut": "fa5fd1943c7b386f172d6893dbfba10b",
"fields1": "f1,f2,f3,f4,f5,f6",
"fields2": "f51,f52,f53,f54,f55,f56,f57,f58,f59,f60,f61",
"klt": period,
"fqt": "1",
"beg": "0",
"end": "20500101",
"smplmt": "10000",
"lmt": "1000000",
"_": "1626079488673",
}
r = requests.get(url, params=params)
data_json = r.json()
temp_df = pd.DataFrame(
[item.split(",") for item in data_json["data"]["klines"]]
)
temp_df.columns = [
"日期时间",
"开盘",
"收盘",
"最高",
"最低",
"成交量",
"成交额",
"振幅",
"涨跌幅",
"涨跌额",
"换手率",
]
temp_df = temp_df[
[
"日期时间",
"开盘",
"收盘",
"最高",
"最低",
"涨跌幅",
"涨跌额",
"成交量",
"成交额",
"振幅",
"换手率",
]
]
temp_df["开盘"] = pd.to_numeric(temp_df["开盘"], errors="coerce")
temp_df["收盘"] = pd.to_numeric(temp_df["收盘"], errors="coerce")
temp_df["最高"] = pd.to_numeric(temp_df["最高"], errors="coerce")
temp_df["最低"] = pd.to_numeric(temp_df["最低"], errors="coerce")
temp_df["涨跌幅"] = pd.to_numeric(temp_df["涨跌幅"], errors="coerce")
temp_df["涨跌额"] = pd.to_numeric(temp_df["涨跌额"], errors="coerce")
temp_df["成交量"] = pd.to_numeric(temp_df["成交量"], errors="coerce")
temp_df["成交额"] = pd.to_numeric(temp_df["成交额"], errors="coerce")
temp_df["振幅"] = pd.to_numeric(temp_df["振幅"], errors="coerce")
temp_df["换手率"] = pd.to_numeric(temp_df["换手率"], errors="coerce")
return temp_df
| 18,850 |
akfamily/akshare
|
087025d8d6f799b30ca114013e82c1ad22dc9294
|
akshare/stock/stock_board_industry_em.py
|
stock_board_industry_cons_em
|
(symbol: str = "小金属") -> pd
|
return temp_df
|
东方财富网-沪深板块-行业板块-板块成份
https://data.eastmoney.com/bkzj/BK1027.html
:param symbol: 板块名称
:type symbol: str
:return: 板块成份
:rtype: pandas.DataFrame
|
东方财富网-沪深板块-行业板块-板块成份
https://data.eastmoney.com/bkzj/BK1027.html
:param symbol: 板块名称
:type symbol: str
:return: 板块成份
:rtype: pandas.DataFrame
| 279 | 379 |
def stock_board_industry_cons_em(symbol: str = "小金属") -> pd.DataFrame:
"""
东方财富网-沪深板块-行业板块-板块成份
https://data.eastmoney.com/bkzj/BK1027.html
:param symbol: 板块名称
:type symbol: str
:return: 板块成份
:rtype: pandas.DataFrame
"""
stock_board_concept_em_map = stock_board_industry_name_em()
stock_board_code = stock_board_concept_em_map[
stock_board_concept_em_map["板块名称"] == symbol
]["板块代码"].values[0]
url = "http://29.push2.eastmoney.com/api/qt/clist/get"
params = {
"pn": "1",
"pz": "2000",
"po": "1",
"np": "1",
"ut": "bd1d9ddb04089700cf9c27f6f7426281",
"fltt": "2",
"invt": "2",
"fid": "f3",
"fs": f"b:{stock_board_code} f:!50",
"fields": "f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f12,f13,f14,f15,f16,f17,f18,f20,f21,f23,f24,f25,f22,f11,f62,f128,f136,f115,f152,f45",
"_": "1626081702127",
}
r = requests.get(url, params=params)
data_json = r.json()
temp_df = pd.DataFrame(data_json["data"]["diff"])
temp_df.reset_index(inplace=True)
temp_df["index"] = range(1, len(temp_df) + 1)
temp_df.columns = [
"序号",
"_",
"最新价",
"涨跌幅",
"涨跌额",
"成交量",
"成交额",
"振幅",
"换手率",
"市盈率-动态",
"_",
"_",
"代码",
"_",
"名称",
"最高",
"最低",
"今开",
"昨收",
"_",
"_",
"_",
"市净率",
"_",
"_",
"_",
"_",
"_",
"_",
"_",
"_",
"_",
"_",
]
temp_df = temp_df[
[
"序号",
"代码",
"名称",
"最新价",
"涨跌幅",
"涨跌额",
"成交量",
"成交额",
"振幅",
"最高",
"最低",
"今开",
"昨收",
"换手率",
"市盈率-动态",
"市净率",
]
]
temp_df["最新价"] = pd.to_numeric(temp_df["最新价"], errors="coerce")
temp_df["涨跌幅"] = pd.to_numeric(temp_df["涨跌幅"], errors="coerce")
temp_df["涨跌额"] = pd.to_numeric(temp_df["涨跌额"], errors="coerce")
temp_df["成交量"] = pd.to_numeric(temp_df["成交量"], errors="coerce")
temp_df["成交额"] = pd.to_numeric(temp_df["成交额"], errors="coerce")
temp_df["振幅"] = pd.to_numeric(temp_df["振幅"], errors="coerce")
temp_df["最高"] = pd.to_numeric(temp_df["最高"], errors="coerce")
temp_df["最低"] = pd.to_numeric(temp_df["最低"], errors="coerce")
temp_df["今开"] = pd.to_numeric(temp_df["今开"], errors="coerce")
temp_df["昨收"] = pd.to_numeric(temp_df["昨收"], errors="coerce")
temp_df["换手率"] = pd.to_numeric(temp_df["换手率"], errors="coerce")
temp_df["市盈率-动态"] = pd.to_numeric(temp_df["市盈率-动态"], errors="coerce")
temp_df["市净率"] = pd.to_numeric(temp_df["市净率"], errors="coerce")
return temp_df
|
https://github.com/akfamily/akshare/blob/087025d8d6f799b30ca114013e82c1ad22dc9294/project25/akshare/stock/stock_board_industry_em.py#L279-L379
| 25 |
[
0,
1,
2,
3,
4,
5,
6,
7,
8
] | 8.910891 |
[
9,
10,
13,
14,
27,
28,
29,
30,
31,
32,
67,
87,
88,
89,
90,
91,
92,
93,
94,
95,
96,
97,
98,
99,
100
] | 24.752475 | false | 7.920792 | 101 | 1 | 75.247525 | 6 |
def stock_board_industry_cons_em(symbol: str = "小金属") -> pd.DataFrame:
stock_board_concept_em_map = stock_board_industry_name_em()
stock_board_code = stock_board_concept_em_map[
stock_board_concept_em_map["板块名称"] == symbol
]["板块代码"].values[0]
url = "http://29.push2.eastmoney.com/api/qt/clist/get"
params = {
"pn": "1",
"pz": "2000",
"po": "1",
"np": "1",
"ut": "bd1d9ddb04089700cf9c27f6f7426281",
"fltt": "2",
"invt": "2",
"fid": "f3",
"fs": f"b:{stock_board_code} f:!50",
"fields": "f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f12,f13,f14,f15,f16,f17,f18,f20,f21,f23,f24,f25,f22,f11,f62,f128,f136,f115,f152,f45",
"_": "1626081702127",
}
r = requests.get(url, params=params)
data_json = r.json()
temp_df = pd.DataFrame(data_json["data"]["diff"])
temp_df.reset_index(inplace=True)
temp_df["index"] = range(1, len(temp_df) + 1)
temp_df.columns = [
"序号",
"_",
"最新价",
"涨跌幅",
"涨跌额",
"成交量",
"成交额",
"振幅",
"换手率",
"市盈率-动态",
"_",
"_",
"代码",
"_",
"名称",
"最高",
"最低",
"今开",
"昨收",
"_",
"_",
"_",
"市净率",
"_",
"_",
"_",
"_",
"_",
"_",
"_",
"_",
"_",
"_",
]
temp_df = temp_df[
[
"序号",
"代码",
"名称",
"最新价",
"涨跌幅",
"涨跌额",
"成交量",
"成交额",
"振幅",
"最高",
"最低",
"今开",
"昨收",
"换手率",
"市盈率-动态",
"市净率",
]
]
temp_df["最新价"] = pd.to_numeric(temp_df["最新价"], errors="coerce")
temp_df["涨跌幅"] = pd.to_numeric(temp_df["涨跌幅"], errors="coerce")
temp_df["涨跌额"] = pd.to_numeric(temp_df["涨跌额"], errors="coerce")
temp_df["成交量"] = pd.to_numeric(temp_df["成交量"], errors="coerce")
temp_df["成交额"] = pd.to_numeric(temp_df["成交额"], errors="coerce")
temp_df["振幅"] = pd.to_numeric(temp_df["振幅"], errors="coerce")
temp_df["最高"] = pd.to_numeric(temp_df["最高"], errors="coerce")
temp_df["最低"] = pd.to_numeric(temp_df["最低"], errors="coerce")
temp_df["今开"] = pd.to_numeric(temp_df["今开"], errors="coerce")
temp_df["昨收"] = pd.to_numeric(temp_df["昨收"], errors="coerce")
temp_df["换手率"] = pd.to_numeric(temp_df["换手率"], errors="coerce")
temp_df["市盈率-动态"] = pd.to_numeric(temp_df["市盈率-动态"], errors="coerce")
temp_df["市净率"] = pd.to_numeric(temp_df["市净率"], errors="coerce")
return temp_df
| 18,851 |
akfamily/akshare
|
087025d8d6f799b30ca114013e82c1ad22dc9294
|
akshare/stock/stock_repurchase_em.py
|
stock_repurchase_em
|
()
|
return big_df
|
东方财富网-数据中心-股票回购-股票回购数据
https://data.eastmoney.com/gphg/hglist.html
:return: 股票回购数据
:rtype: pandas.DataFrame
|
东方财富网-数据中心-股票回购-股票回购数据
https://data.eastmoney.com/gphg/hglist.html
:return: 股票回购数据
:rtype: pandas.DataFrame
| 13 | 116 |
def stock_repurchase_em() -> pd.DataFrame:
"""
东方财富网-数据中心-股票回购-股票回购数据
https://data.eastmoney.com/gphg/hglist.html
:return: 股票回购数据
:rtype: pandas.DataFrame
"""
url = "https://datacenter-web.eastmoney.com/api/data/v1/get"
params = {
"sortColumns": "UPD,DIM_DATE,DIM_SCODE",
"sortTypes": "-1,-1,-1",
"pageSize": "500",
"pageNumber": "1",
"reportName": "RPTA_WEB_GETHGLIST_NEW",
"columns": "ALL",
"source": "WEB",
}
r = requests.get(url, params=params)
data_json = r.json()
total_page = data_json["result"]["pages"]
big_df = pd.DataFrame()
for page in tqdm(range(1, int(total_page) + 1), leave=False):
params.update({"pageNumber": page})
r = requests.get(url, params=params)
data_json = r.json()
temp_df = pd.DataFrame(data_json["result"]["data"])
big_df = pd.concat([big_df, temp_df], ignore_index=True)
big_df.rename(
{
"DIM_SCODE": "股票代码",
"SECURITYSHORTNAME": "股票简称",
"NEWPRICE": "最新价",
"REPURPRICECAP": "计划回购价格区间",
"REPURNUMLOWER": "计划回购数量区间-下限",
"REPURNUMCAP": "计划回购数量区间-上限",
"ZSZXX": "占公告前一日总股本比例-下限",
"ZSZSX": "占公告前一日总股本比例-上限",
"JEXX": "计划回购金额区间-下限",
"JESX": "计划回购金额区间-上限",
"DIM_TRADEDATE": "回购起始时间",
"REPURPROGRESS": "实施进度",
"REPURPRICELOWER1": "已回购股份价格区间-下限",
"REPURPRICECAP1": "已回购股份价格区间-上限",
"REPURNUM": "已回购股份数量",
"REPURAMOUNT": "已回购金额",
"UPDATEDATE": "最新公告日期",
},
axis="columns",
inplace=True,
)
big_df = big_df[
[
"股票代码",
"股票简称",
"最新价",
"计划回购价格区间",
"计划回购数量区间-下限",
"计划回购数量区间-上限",
"占公告前一日总股本比例-下限",
"占公告前一日总股本比例-上限",
"计划回购金额区间-下限",
"计划回购金额区间-上限",
"回购起始时间",
"实施进度",
"已回购股份价格区间-下限",
"已回购股份价格区间-上限",
"已回购股份数量",
"已回购金额",
"最新公告日期",
]
]
big_df.reset_index(inplace=True)
big_df.rename(
{
"index": "序号",
},
axis="columns",
inplace=True,
)
big_df["序号"] = big_df.index + 1
process_map = {
"001": "董事会预案",
"002": "股东大会通过",
"003": "股东大会否决",
"004": "实施中",
"005": "停止实施",
"006": "完成实施",
}
big_df["实施进度"] = big_df["实施进度"].map(process_map)
big_df["回购起始时间"] = pd.to_datetime(big_df["回购起始时间"]).dt.date
big_df["最新公告日期"] = pd.to_datetime(big_df["最新公告日期"]).dt.date
big_df["最新价"] = pd.to_numeric(big_df["最新价"])
big_df["计划回购价格区间"] = pd.to_numeric(big_df["计划回购价格区间"])
big_df["计划回购数量区间-下限"] = pd.to_numeric(big_df["计划回购数量区间-下限"])
big_df["计划回购数量区间-上限"] = pd.to_numeric(big_df["计划回购数量区间-上限"])
big_df["占公告前一日总股本比例-上限"] = pd.to_numeric(big_df["占公告前一日总股本比例-上限"])
big_df["占公告前一日总股本比例-下限"] = pd.to_numeric(big_df["占公告前一日总股本比例-下限"])
big_df["计划回购金额区间-上限"] = pd.to_numeric(big_df["计划回购金额区间-上限"])
big_df["计划回购金额区间-下限"] = pd.to_numeric(big_df["计划回购金额区间-下限"])
big_df["已回购股份价格区间-下限"] = pd.to_numeric(big_df["已回购股份价格区间-下限"])
big_df["已回购股份价格区间-上限"] = pd.to_numeric(big_df["已回购股份价格区间-上限"])
big_df["已回购股份数量"] = pd.to_numeric(big_df["已回购股份数量"])
big_df["已回购金额"] = pd.to_numeric(big_df["已回购金额"])
return big_df
|
https://github.com/akfamily/akshare/blob/087025d8d6f799b30ca114013e82c1ad22dc9294/project25/akshare/stock/stock_repurchase_em.py#L13-L116
| 25 |
[
0,
1,
2,
3,
4,
5,
6
] | 6.730769 |
[
7,
8,
17,
18,
19,
20,
21,
22,
23,
24,
25,
26,
27,
50,
71,
72,
79,
80,
88,
89,
90,
91,
92,
93,
94,
95,
96,
97,
98,
99,
100,
101,
102,
103
] | 32.692308 | false | 14.285714 | 104 | 2 | 67.307692 | 4 |
def stock_repurchase_em() -> pd.DataFrame:
url = "https://datacenter-web.eastmoney.com/api/data/v1/get"
params = {
"sortColumns": "UPD,DIM_DATE,DIM_SCODE",
"sortTypes": "-1,-1,-1",
"pageSize": "500",
"pageNumber": "1",
"reportName": "RPTA_WEB_GETHGLIST_NEW",
"columns": "ALL",
"source": "WEB",
}
r = requests.get(url, params=params)
data_json = r.json()
total_page = data_json["result"]["pages"]
big_df = pd.DataFrame()
for page in tqdm(range(1, int(total_page) + 1), leave=False):
params.update({"pageNumber": page})
r = requests.get(url, params=params)
data_json = r.json()
temp_df = pd.DataFrame(data_json["result"]["data"])
big_df = pd.concat([big_df, temp_df], ignore_index=True)
big_df.rename(
{
"DIM_SCODE": "股票代码",
"SECURITYSHORTNAME": "股票简称",
"NEWPRICE": "最新价",
"REPURPRICECAP": "计划回购价格区间",
"REPURNUMLOWER": "计划回购数量区间-下限",
"REPURNUMCAP": "计划回购数量区间-上限",
"ZSZXX": "占公告前一日总股本比例-下限",
"ZSZSX": "占公告前一日总股本比例-上限",
"JEXX": "计划回购金额区间-下限",
"JESX": "计划回购金额区间-上限",
"DIM_TRADEDATE": "回购起始时间",
"REPURPROGRESS": "实施进度",
"REPURPRICELOWER1": "已回购股份价格区间-下限",
"REPURPRICECAP1": "已回购股份价格区间-上限",
"REPURNUM": "已回购股份数量",
"REPURAMOUNT": "已回购金额",
"UPDATEDATE": "最新公告日期",
},
axis="columns",
inplace=True,
)
big_df = big_df[
[
"股票代码",
"股票简称",
"最新价",
"计划回购价格区间",
"计划回购数量区间-下限",
"计划回购数量区间-上限",
"占公告前一日总股本比例-下限",
"占公告前一日总股本比例-上限",
"计划回购金额区间-下限",
"计划回购金额区间-上限",
"回购起始时间",
"实施进度",
"已回购股份价格区间-下限",
"已回购股份价格区间-上限",
"已回购股份数量",
"已回购金额",
"最新公告日期",
]
]
big_df.reset_index(inplace=True)
big_df.rename(
{
"index": "序号",
},
axis="columns",
inplace=True,
)
big_df["序号"] = big_df.index + 1
process_map = {
"001": "董事会预案",
"002": "股东大会通过",
"003": "股东大会否决",
"004": "实施中",
"005": "停止实施",
"006": "完成实施",
}
big_df["实施进度"] = big_df["实施进度"].map(process_map)
big_df["回购起始时间"] = pd.to_datetime(big_df["回购起始时间"]).dt.date
big_df["最新公告日期"] = pd.to_datetime(big_df["最新公告日期"]).dt.date
big_df["最新价"] = pd.to_numeric(big_df["最新价"])
big_df["计划回购价格区间"] = pd.to_numeric(big_df["计划回购价格区间"])
big_df["计划回购数量区间-下限"] = pd.to_numeric(big_df["计划回购数量区间-下限"])
big_df["计划回购数量区间-上限"] = pd.to_numeric(big_df["计划回购数量区间-上限"])
big_df["占公告前一日总股本比例-上限"] = pd.to_numeric(big_df["占公告前一日总股本比例-上限"])
big_df["占公告前一日总股本比例-下限"] = pd.to_numeric(big_df["占公告前一日总股本比例-下限"])
big_df["计划回购金额区间-上限"] = pd.to_numeric(big_df["计划回购金额区间-上限"])
big_df["计划回购金额区间-下限"] = pd.to_numeric(big_df["计划回购金额区间-下限"])
big_df["已回购股份价格区间-下限"] = pd.to_numeric(big_df["已回购股份价格区间-下限"])
big_df["已回购股份价格区间-上限"] = pd.to_numeric(big_df["已回购股份价格区间-上限"])
big_df["已回购股份数量"] = pd.to_numeric(big_df["已回购股份数量"])
big_df["已回购金额"] = pd.to_numeric(big_df["已回购金额"])
return big_df
| 18,852 |
akfamily/akshare
|
087025d8d6f799b30ca114013e82c1ad22dc9294
|
akshare/stock/stock_share_changes_cninfo.py
|
stock_share_change_cninfo
|
(
symbol: str = "002594",
start_date: str = "20091227",
end_date: str = "20220713",
)
|
return data_df
|
巨潮资讯-股本股东-公司股本变动
http://webapi.cninfo.com.cn/#/apiDoc
查询 p_stock2215 接口
:param symbol: 股票代码
:type symbol: str
:param start_date: 开始变动日期
:type start_date: str
:param end_date: 结束变动日期
:type end_date: str
:return: 公司股本变动
:rtype: pandas.DataFrame
|
巨潮资讯-股本股东-公司股本变动
http://webapi.cninfo.com.cn/#/apiDoc
查询 p_stock2215 接口
:param symbol: 股票代码
:type symbol: str
:param start_date: 开始变动日期
:type start_date: str
:param end_date: 结束变动日期
:type end_date: str
:return: 公司股本变动
:rtype: pandas.DataFrame
| 46 | 146 |
def stock_share_change_cninfo(
symbol: str = "002594",
start_date: str = "20091227",
end_date: str = "20220713",
) -> pd.DataFrame:
"""
巨潮资讯-股本股东-公司股本变动
http://webapi.cninfo.com.cn/#/apiDoc
查询 p_stock2215 接口
:param symbol: 股票代码
:type symbol: str
:param start_date: 开始变动日期
:type start_date: str
:param end_date: 结束变动日期
:type end_date: str
:return: 公司股本变动
:rtype: pandas.DataFrame
"""
url = "http://webapi.cninfo.com.cn/api/stock/p_stock2215"
params = {
"scode": symbol,
"sdate": "-".join([start_date[:4], start_date[4:6], start_date[6:]]),
"edate": "-".join([end_date[:4], end_date[4:6], end_date[6:]]),
}
random_time_str = str(int(time.time()))
js_code = py_mini_racer.MiniRacer()
js_code.eval(js_str)
mcode = js_code.call("mcode", random_time_str)
headers = {
"Accept": "*/*",
"Accept-Encoding": "gzip, deflate",
"Accept-Language": "zh-CN,zh;q=0.9,en;q=0.8",
"Cache-Control": "no-cache",
"Content-Length": "0",
"Host": "webapi.cninfo.com.cn",
"mcode": mcode,
"Origin": "http://webapi.cninfo.com.cn",
"Pragma": "no-cache",
"Proxy-Connection": "keep-alive",
"Referer": "http://webapi.cninfo.com.cn/",
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/93.0.4577.63 Safari/537.36",
"X-Requested-With": "XMLHttpRequest",
}
r = requests.post(url, params=params, headers=headers)
data_json = r.json()
temp_df = pd.DataFrame(data_json["records"])
cols_map = {
"SECCODE": "证券代码",
"SECNAME": "证券简称",
"ORGNAME": "机构名称",
"DECLAREDATE": "公告日期",
"VARYDATE": "变动日期",
"F001V": "变动原因编码",
"F002V": "变动原因",
"F003N": "总股本",
"F004N": "未流通股份",
"F005N": "发起人股份",
"F006N": "国家持股",
"F007N": "国有法人持股",
"F008N": "境内法人持股",
"F009N": "境外法人持股",
"F010N": "自然人持股",
"F011N": "募集法人股",
"F012N": "内部职工股",
"F013N": "转配股",
"F014N": "其他流通受限股份",
"F015N": "优先股",
"F016N": "其他未流通股",
"F021N": "已流通股份",
"F022N": "人民币普通股",
"F023N": "境内上市外资股-B股",
"F024N": "境外上市外资股-H股",
"F025N": "高管股",
"F026N": "其他流通股",
"F028N": "流通受限股份",
"F017N": "配售法人股",
"F018N": "战略投资者持股",
"F019N": "证券投资基金持股",
"F020N": "一般法人持股",
"F029N": "国家持股-受限",
"F030N": "国有法人持股-受限",
"F031N": "其他内资持股-受限",
"F032N": "其中:境内法人持股",
"F033N": "其中:境内自然人持股",
"F034N": "外资持股-受限",
"F035N": "其中:境外法人持股",
"F036N": "其中:境外自然人持股",
"F037N": "其中:限售高管股",
"F038N": "其中:限售B股",
"F040N": "其中:限售H股",
"F027C": "最新记录标识",
"F049N": "其他",
"F050N": "控股股东、实际控制人",
}
ignore_cols = ["最新记录标识", "其他"]
temp_df.rename(columns=cols_map, inplace=True)
temp_df.fillna(np.nan, inplace=True)
temp_df["公告日期"] = pd.to_datetime(temp_df["公告日期"]).dt.date
temp_df["变动日期"] = pd.to_datetime(temp_df["变动日期"]).dt.date
data_df = temp_df[[c for c in temp_df.columns if c not in ignore_cols]]
return data_df
|
https://github.com/akfamily/akshare/blob/087025d8d6f799b30ca114013e82c1ad22dc9294/project25/akshare/stock/stock_share_changes_cninfo.py#L46-L146
| 25 |
[
0
] | 0.990099 |
[
18,
19,
24,
25,
26,
27,
28,
43,
44,
45,
46,
94,
95,
96,
97,
98,
99,
100
] | 17.821782 | false | 31.034483 | 101 | 2 | 82.178218 | 11 |
def stock_share_change_cninfo(
symbol: str = "002594",
start_date: str = "20091227",
end_date: str = "20220713",
) -> pd.DataFrame:
url = "http://webapi.cninfo.com.cn/api/stock/p_stock2215"
params = {
"scode": symbol,
"sdate": "-".join([start_date[:4], start_date[4:6], start_date[6:]]),
"edate": "-".join([end_date[:4], end_date[4:6], end_date[6:]]),
}
random_time_str = str(int(time.time()))
js_code = py_mini_racer.MiniRacer()
js_code.eval(js_str)
mcode = js_code.call("mcode", random_time_str)
headers = {
"Accept": "*/*",
"Accept-Encoding": "gzip, deflate",
"Accept-Language": "zh-CN,zh;q=0.9,en;q=0.8",
"Cache-Control": "no-cache",
"Content-Length": "0",
"Host": "webapi.cninfo.com.cn",
"mcode": mcode,
"Origin": "http://webapi.cninfo.com.cn",
"Pragma": "no-cache",
"Proxy-Connection": "keep-alive",
"Referer": "http://webapi.cninfo.com.cn/",
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/93.0.4577.63 Safari/537.36",
"X-Requested-With": "XMLHttpRequest",
}
r = requests.post(url, params=params, headers=headers)
data_json = r.json()
temp_df = pd.DataFrame(data_json["records"])
cols_map = {
"SECCODE": "证券代码",
"SECNAME": "证券简称",
"ORGNAME": "机构名称",
"DECLAREDATE": "公告日期",
"VARYDATE": "变动日期",
"F001V": "变动原因编码",
"F002V": "变动原因",
"F003N": "总股本",
"F004N": "未流通股份",
"F005N": "发起人股份",
"F006N": "国家持股",
"F007N": "国有法人持股",
"F008N": "境内法人持股",
"F009N": "境外法人持股",
"F010N": "自然人持股",
"F011N": "募集法人股",
"F012N": "内部职工股",
"F013N": "转配股",
"F014N": "其他流通受限股份",
"F015N": "优先股",
"F016N": "其他未流通股",
"F021N": "已流通股份",
"F022N": "人民币普通股",
"F023N": "境内上市外资股-B股",
"F024N": "境外上市外资股-H股",
"F025N": "高管股",
"F026N": "其他流通股",
"F028N": "流通受限股份",
"F017N": "配售法人股",
"F018N": "战略投资者持股",
"F019N": "证券投资基金持股",
"F020N": "一般法人持股",
"F029N": "国家持股-受限",
"F030N": "国有法人持股-受限",
"F031N": "其他内资持股-受限",
"F032N": "其中:境内法人持股",
"F033N": "其中:境内自然人持股",
"F034N": "外资持股-受限",
"F035N": "其中:境外法人持股",
"F036N": "其中:境外自然人持股",
"F037N": "其中:限售高管股",
"F038N": "其中:限售B股",
"F040N": "其中:限售H股",
"F027C": "最新记录标识",
"F049N": "其他",
"F050N": "控股股东、实际控制人",
}
ignore_cols = ["最新记录标识", "其他"]
temp_df.rename(columns=cols_map, inplace=True)
temp_df.fillna(np.nan, inplace=True)
temp_df["公告日期"] = pd.to_datetime(temp_df["公告日期"]).dt.date
temp_df["变动日期"] = pd.to_datetime(temp_df["变动日期"]).dt.date
data_df = temp_df[[c for c in temp_df.columns if c not in ignore_cols]]
return data_df
| 18,853 |
akfamily/akshare
|
087025d8d6f799b30ca114013e82c1ad22dc9294
|
akshare/stock/stock_hold_control_cninfo.py
|
stock_hold_control_cninfo
|
(symbol: str = "全部") ->
|
return temp_df
|
巨潮资讯-数据中心-专题统计-股东股本-实际控制人持股变动
http://webapi.cninfo.com.cn/#/thematicStatistics
:param symbol: choice of {"单独控制", "实际控制人", "一致行动人", "家族控制", "全部"}; 从 2010 开始
:type symbol: str
:return: 实际控制人持股变动
:rtype: pandas.DataFrame
|
巨潮资讯-数据中心-专题统计-股东股本-实际控制人持股变动
http://webapi.cninfo.com.cn/#/thematicStatistics
:param symbol: choice of {"单独控制", "实际控制人", "一致行动人", "家族控制", "全部"}; 从 2010 开始
:type symbol: str
:return: 实际控制人持股变动
:rtype: pandas.DataFrame
| 49 | 116 |
def stock_hold_control_cninfo(symbol: str = "全部") -> pd.DataFrame:
"""
巨潮资讯-数据中心-专题统计-股东股本-实际控制人持股变动
http://webapi.cninfo.com.cn/#/thematicStatistics
:param symbol: choice of {"单独控制", "实际控制人", "一致行动人", "家族控制", "全部"}; 从 2010 开始
:type symbol: str
:return: 实际控制人持股变动
:rtype: pandas.DataFrame
"""
symbol_map = {
"单独控制": "069001",
"实际控制人": "069002",
"一致行动人": "069003",
"家族控制": "069004",
"全部": "",
}
url = "http://webapi.cninfo.com.cn/api/sysapi/p_sysapi1033"
random_time_str = str(int(time.time()))
js_code = py_mini_racer.MiniRacer()
js_code.eval(js_str)
mcode = js_code.call("mcode", random_time_str)
headers = {
"Accept": "*/*",
"Accept-Encoding": "gzip, deflate",
"Accept-Language": "zh-CN,zh;q=0.9,en;q=0.8",
"Cache-Control": "no-cache",
"Content-Length": "0",
"Host": "webapi.cninfo.com.cn",
"mcode": mcode,
"Origin": "http://webapi.cninfo.com.cn",
"Pragma": "no-cache",
"Proxy-Connection": "keep-alive",
"Referer": "http://webapi.cninfo.com.cn/",
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/93.0.4577.63 Safari/537.36",
"X-Requested-With": "XMLHttpRequest",
}
params = {
"ctype": symbol_map[symbol],
}
r = requests.post(url, headers=headers, params=params)
data_json = r.json()
temp_df = pd.DataFrame(data_json["records"])
temp_df.columns = [
"控股比例",
"控股数量",
"证券简称",
"实际控制人名称",
"直接控制人名称",
"控制类型",
"证券代码",
"变动日期",
]
temp_df = temp_df[
[
"证券代码",
"证券简称",
"变动日期",
"实际控制人名称",
"控股数量",
"控股比例",
"直接控制人名称",
"控制类型",
]
]
temp_df["变动日期"] = pd.to_datetime(temp_df["变动日期"]).dt.date
temp_df["控股数量"] = pd.to_numeric(temp_df["控股数量"])
temp_df["控股比例"] = pd.to_numeric(temp_df["控股比例"])
return temp_df
|
https://github.com/akfamily/akshare/blob/087025d8d6f799b30ca114013e82c1ad22dc9294/project25/akshare/stock/stock_hold_control_cninfo.py#L49-L116
| 25 |
[
0,
1,
2,
3,
4,
5,
6,
7,
8
] | 13.235294 |
[
9,
16,
17,
18,
19,
20,
21,
36,
39,
40,
41,
42,
52,
64,
65,
66,
67
] | 25 | false | 18.518519 | 68 | 1 | 75 | 6 |
def stock_hold_control_cninfo(symbol: str = "全部") -> pd.DataFrame:
symbol_map = {
"单独控制": "069001",
"实际控制人": "069002",
"一致行动人": "069003",
"家族控制": "069004",
"全部": "",
}
url = "http://webapi.cninfo.com.cn/api/sysapi/p_sysapi1033"
random_time_str = str(int(time.time()))
js_code = py_mini_racer.MiniRacer()
js_code.eval(js_str)
mcode = js_code.call("mcode", random_time_str)
headers = {
"Accept": "*/*",
"Accept-Encoding": "gzip, deflate",
"Accept-Language": "zh-CN,zh;q=0.9,en;q=0.8",
"Cache-Control": "no-cache",
"Content-Length": "0",
"Host": "webapi.cninfo.com.cn",
"mcode": mcode,
"Origin": "http://webapi.cninfo.com.cn",
"Pragma": "no-cache",
"Proxy-Connection": "keep-alive",
"Referer": "http://webapi.cninfo.com.cn/",
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/93.0.4577.63 Safari/537.36",
"X-Requested-With": "XMLHttpRequest",
}
params = {
"ctype": symbol_map[symbol],
}
r = requests.post(url, headers=headers, params=params)
data_json = r.json()
temp_df = pd.DataFrame(data_json["records"])
temp_df.columns = [
"控股比例",
"控股数量",
"证券简称",
"实际控制人名称",
"直接控制人名称",
"控制类型",
"证券代码",
"变动日期",
]
temp_df = temp_df[
[
"证券代码",
"证券简称",
"变动日期",
"实际控制人名称",
"控股数量",
"控股比例",
"直接控制人名称",
"控制类型",
]
]
temp_df["变动日期"] = pd.to_datetime(temp_df["变动日期"]).dt.date
temp_df["控股数量"] = pd.to_numeric(temp_df["控股数量"])
temp_df["控股比例"] = pd.to_numeric(temp_df["控股比例"])
return temp_df
| 18,854 |
akfamily/akshare
|
087025d8d6f799b30ca114013e82c1ad22dc9294
|
akshare/stock/stock_hold_control_cninfo.py
|
stock_hold_management_detail_cninfo
|
(symbol: str = "增持") ->
|
return temp_df
|
巨潮资讯-数据中心-专题统计-股东股本-高管持股变动明细
http://webapi.cninfo.com.cn/#/thematicStatistics
:param symbol: choice of {"增持", "减持"}
:type symbol: str
:return: 高管持股变动明细
:rtype: pandas.DataFrame
|
巨潮资讯-数据中心-专题统计-股东股本-高管持股变动明细
http://webapi.cninfo.com.cn/#/thematicStatistics
:param symbol: choice of {"增持", "减持"}
:type symbol: str
:return: 高管持股变动明细
:rtype: pandas.DataFrame
| 119 | 207 |
def stock_hold_management_detail_cninfo(symbol: str = "增持") -> pd.DataFrame:
"""
巨潮资讯-数据中心-专题统计-股东股本-高管持股变动明细
http://webapi.cninfo.com.cn/#/thematicStatistics
:param symbol: choice of {"增持", "减持"}
:type symbol: str
:return: 高管持股变动明细
:rtype: pandas.DataFrame
"""
symbol_map = {
"增持": "B",
"减持": "S",
}
current_date = datetime.datetime.now().date().isoformat()
url = "http://webapi.cninfo.com.cn/api/sysapi/p_sysapi1030"
random_time_str = str(int(time.time()))
js_code = py_mini_racer.MiniRacer()
js_code.eval(js_str)
mcode = js_code.call("mcode", random_time_str)
headers = {
"Accept": "*/*",
"Accept-Encoding": "gzip, deflate",
"Accept-Language": "zh-CN,zh;q=0.9,en;q=0.8",
"Cache-Control": "no-cache",
"Content-Length": "0",
"Host": "webapi.cninfo.com.cn",
"mcode": mcode,
"Origin": "http://webapi.cninfo.com.cn",
"Pragma": "no-cache",
"Proxy-Connection": "keep-alive",
"Referer": "http://webapi.cninfo.com.cn/",
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/93.0.4577.63 Safari/537.36",
"X-Requested-With": "XMLHttpRequest",
}
params = {
"sdate": str(int(current_date[:4]) - 1) + current_date[4:],
"edate": current_date,
"varytype": symbol_map[symbol],
}
r = requests.post(url, headers=headers, params=params)
data_json = r.json()
temp_df = pd.DataFrame(data_json["records"])
temp_df.columns = [
"证券简称",
"公告日期",
"高管姓名",
"期末市值",
"成交均价",
"证券代码",
"变动比例",
"变动数量",
"截止日期",
"期末持股数量",
"期初持股数量",
"变动人与董监高关系",
"董监高职务",
"董监高姓名",
"数据来源",
"持股变动原因",
]
temp_df = temp_df[
[
"证券代码",
"证券简称",
"截止日期",
"公告日期",
"高管姓名",
"董监高姓名",
"董监高职务",
"变动人与董监高关系",
"期初持股数量",
"期末持股数量",
"变动数量",
"变动比例",
"成交均价",
"期末市值",
"持股变动原因",
"数据来源",
]
]
temp_df["截止日期"] = pd.to_datetime(temp_df["截止日期"]).dt.date
temp_df["公告日期"] = pd.to_datetime(temp_df["公告日期"]).dt.date
temp_df["期初持股数量"] = pd.to_numeric(temp_df["期初持股数量"], errors="coerce")
temp_df["期末持股数量"] = pd.to_numeric(temp_df["期末持股数量"], errors="coerce")
temp_df["变动数量"] = pd.to_numeric(temp_df["变动数量"], errors="coerce")
temp_df["变动比例"] = pd.to_numeric(temp_df["变动比例"], errors="coerce")
temp_df["成交均价"] = pd.to_numeric(temp_df["成交均价"], errors="coerce")
temp_df["期末市值"] = pd.to_numeric(temp_df["期末市值"], errors="coerce")
return temp_df
|
https://github.com/akfamily/akshare/blob/087025d8d6f799b30ca114013e82c1ad22dc9294/project25/akshare/stock/stock_hold_control_cninfo.py#L119-L207
| 25 |
[
0,
1,
2,
3,
4,
5,
6,
7,
8
] | 10.11236 |
[
9,
13,
14,
15,
16,
17,
18,
19,
34,
39,
40,
41,
42,
60,
80,
81,
82,
83,
84,
85,
86,
87,
88
] | 25.842697 | false | 18.518519 | 89 | 1 | 74.157303 | 6 |
def stock_hold_management_detail_cninfo(symbol: str = "增持") -> pd.DataFrame:
symbol_map = {
"增持": "B",
"减持": "S",
}
current_date = datetime.datetime.now().date().isoformat()
url = "http://webapi.cninfo.com.cn/api/sysapi/p_sysapi1030"
random_time_str = str(int(time.time()))
js_code = py_mini_racer.MiniRacer()
js_code.eval(js_str)
mcode = js_code.call("mcode", random_time_str)
headers = {
"Accept": "*/*",
"Accept-Encoding": "gzip, deflate",
"Accept-Language": "zh-CN,zh;q=0.9,en;q=0.8",
"Cache-Control": "no-cache",
"Content-Length": "0",
"Host": "webapi.cninfo.com.cn",
"mcode": mcode,
"Origin": "http://webapi.cninfo.com.cn",
"Pragma": "no-cache",
"Proxy-Connection": "keep-alive",
"Referer": "http://webapi.cninfo.com.cn/",
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/93.0.4577.63 Safari/537.36",
"X-Requested-With": "XMLHttpRequest",
}
params = {
"sdate": str(int(current_date[:4]) - 1) + current_date[4:],
"edate": current_date,
"varytype": symbol_map[symbol],
}
r = requests.post(url, headers=headers, params=params)
data_json = r.json()
temp_df = pd.DataFrame(data_json["records"])
temp_df.columns = [
"证券简称",
"公告日期",
"高管姓名",
"期末市值",
"成交均价",
"证券代码",
"变动比例",
"变动数量",
"截止日期",
"期末持股数量",
"期初持股数量",
"变动人与董监高关系",
"董监高职务",
"董监高姓名",
"数据来源",
"持股变动原因",
]
temp_df = temp_df[
[
"证券代码",
"证券简称",
"截止日期",
"公告日期",
"高管姓名",
"董监高姓名",
"董监高职务",
"变动人与董监高关系",
"期初持股数量",
"期末持股数量",
"变动数量",
"变动比例",
"成交均价",
"期末市值",
"持股变动原因",
"数据来源",
]
]
temp_df["截止日期"] = pd.to_datetime(temp_df["截止日期"]).dt.date
temp_df["公告日期"] = pd.to_datetime(temp_df["公告日期"]).dt.date
temp_df["期初持股数量"] = pd.to_numeric(temp_df["期初持股数量"], errors="coerce")
temp_df["期末持股数量"] = pd.to_numeric(temp_df["期末持股数量"], errors="coerce")
temp_df["变动数量"] = pd.to_numeric(temp_df["变动数量"], errors="coerce")
temp_df["变动比例"] = pd.to_numeric(temp_df["变动比例"], errors="coerce")
temp_df["成交均价"] = pd.to_numeric(temp_df["成交均价"], errors="coerce")
temp_df["期末市值"] = pd.to_numeric(temp_df["期末市值"], errors="coerce")
return temp_df
| 18,855 |
akfamily/akshare
|
087025d8d6f799b30ca114013e82c1ad22dc9294
|
akshare/stock/stock_us_famous.py
|
stock_us_famous_spot_em
|
(symbol: str = "科技类") -> pd
|
return temp_df
|
东方财富网-行情中心-美股市场-知名美股
http://quote.eastmoney.com/center/gridlist.html#us_wellknown
:symbol: choice of {'科技类', '金融类', '医药食品类', '媒体类', '汽车能源类', '制造零售类'}
:type: str
:return: 知名美股实时行情
:rtype: pandas.DataFrame
|
东方财富网-行情中心-美股市场-知名美股
http://quote.eastmoney.com/center/gridlist.html#us_wellknown
:symbol: choice of {'科技类', '金融类', '医药食品类', '媒体类', '汽车能源类', '制造零售类'}
:type: str
:return: 知名美股实时行情
:rtype: pandas.DataFrame
| 12 | 110 |
def stock_us_famous_spot_em(symbol: str = "科技类") -> pd.DataFrame:
"""
东方财富网-行情中心-美股市场-知名美股
http://quote.eastmoney.com/center/gridlist.html#us_wellknown
:symbol: choice of {'科技类', '金融类', '医药食品类', '媒体类', '汽车能源类', '制造零售类'}
:type: str
:return: 知名美股实时行情
:rtype: pandas.DataFrame
"""
market_map = {
"科技类": "0216",
"金融类": "0217",
"医药食品类": "0218",
"媒体类": "0220",
"汽车能源类": "0219",
"制造零售类": "0221",
}
url = "http://69.push2.eastmoney.com/api/qt/clist/get"
params = {
"pn": "1",
"pz": "2000",
"po": "1",
"np": "1",
"ut": "bd1d9ddb04089700cf9c27f6f7426281",
"fltt": "2",
"invt": "2",
"fid": "f3",
"fs": f"b:MK{market_map[symbol]}",
"fields": "f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f12,f13,f14,f15,f16,f17,f18,f20,f21,f23,f24,f25,f26,f22,f33,f11,f62,f128,f136,f115,f152",
"_": "1631271634231",
}
r = requests.get(url, params=params)
data_json = r.json()
temp_df = pd.DataFrame(data_json["data"]["diff"])
temp_df.columns = [
"_",
"最新价",
"涨跌幅",
"涨跌额",
"_",
"_",
"_",
"_",
"_",
"_",
"_",
"简称",
"编码",
"名称",
"最高价",
"最低价",
"开盘价",
"昨收价",
"总市值",
"_",
"_",
"_",
"_",
"_",
"_",
"_",
"_",
"市盈率",
"_",
"_",
"_",
"_",
"_",
]
temp_df.reset_index(inplace=True)
temp_df["index"] = range(1, len(temp_df) + 1)
temp_df.rename(columns={"index": "序号"}, inplace=True)
temp_df["代码"] = temp_df["编码"].astype(str) + "." + temp_df["简称"]
temp_df = temp_df[
[
"序号",
"名称",
"最新价",
"涨跌额",
"涨跌幅",
"开盘价",
"最高价",
"最低价",
"昨收价",
"总市值",
"市盈率",
"代码",
]
]
temp_df["最新价"] = pd.to_numeric(temp_df["最新价"], errors="coerce")
temp_df["涨跌额"] = pd.to_numeric(temp_df["涨跌额"], errors="coerce")
temp_df["涨跌幅"] = pd.to_numeric(temp_df["涨跌幅"], errors="coerce")
temp_df["开盘价"] = pd.to_numeric(temp_df["开盘价"], errors="coerce")
temp_df["最高价"] = pd.to_numeric(temp_df["最高价"], errors="coerce")
temp_df["最低价"] = pd.to_numeric(temp_df["最低价"], errors="coerce")
temp_df["昨收价"] = pd.to_numeric(temp_df["昨收价"], errors="coerce")
temp_df["总市值"] = pd.to_numeric(temp_df["总市值"], errors="coerce")
temp_df["市盈率"] = pd.to_numeric(temp_df["市盈率"], errors="coerce")
return temp_df
|
https://github.com/akfamily/akshare/blob/087025d8d6f799b30ca114013e82c1ad22dc9294/project25/akshare/stock/stock_us_famous.py#L12-L110
| 25 |
[
0,
1,
2,
3,
4,
5,
6,
7,
8
] | 9.090909 |
[
9,
17,
18,
31,
32,
33,
34,
69,
70,
71,
72,
73,
89,
90,
91,
92,
93,
94,
95,
96,
97,
98
] | 22.222222 | false | 16.666667 | 99 | 1 | 77.777778 | 6 |
def stock_us_famous_spot_em(symbol: str = "科技类") -> pd.DataFrame:
market_map = {
"科技类": "0216",
"金融类": "0217",
"医药食品类": "0218",
"媒体类": "0220",
"汽车能源类": "0219",
"制造零售类": "0221",
}
url = "http://69.push2.eastmoney.com/api/qt/clist/get"
params = {
"pn": "1",
"pz": "2000",
"po": "1",
"np": "1",
"ut": "bd1d9ddb04089700cf9c27f6f7426281",
"fltt": "2",
"invt": "2",
"fid": "f3",
"fs": f"b:MK{market_map[symbol]}",
"fields": "f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f12,f13,f14,f15,f16,f17,f18,f20,f21,f23,f24,f25,f26,f22,f33,f11,f62,f128,f136,f115,f152",
"_": "1631271634231",
}
r = requests.get(url, params=params)
data_json = r.json()
temp_df = pd.DataFrame(data_json["data"]["diff"])
temp_df.columns = [
"_",
"最新价",
"涨跌幅",
"涨跌额",
"_",
"_",
"_",
"_",
"_",
"_",
"_",
"简称",
"编码",
"名称",
"最高价",
"最低价",
"开盘价",
"昨收价",
"总市值",
"_",
"_",
"_",
"_",
"_",
"_",
"_",
"_",
"市盈率",
"_",
"_",
"_",
"_",
"_",
]
temp_df.reset_index(inplace=True)
temp_df["index"] = range(1, len(temp_df) + 1)
temp_df.rename(columns={"index": "序号"}, inplace=True)
temp_df["代码"] = temp_df["编码"].astype(str) + "." + temp_df["简称"]
temp_df = temp_df[
[
"序号",
"名称",
"最新价",
"涨跌额",
"涨跌幅",
"开盘价",
"最高价",
"最低价",
"昨收价",
"总市值",
"市盈率",
"代码",
]
]
temp_df["最新价"] = pd.to_numeric(temp_df["最新价"], errors="coerce")
temp_df["涨跌额"] = pd.to_numeric(temp_df["涨跌额"], errors="coerce")
temp_df["涨跌幅"] = pd.to_numeric(temp_df["涨跌幅"], errors="coerce")
temp_df["开盘价"] = pd.to_numeric(temp_df["开盘价"], errors="coerce")
temp_df["最高价"] = pd.to_numeric(temp_df["最高价"], errors="coerce")
temp_df["最低价"] = pd.to_numeric(temp_df["最低价"], errors="coerce")
temp_df["昨收价"] = pd.to_numeric(temp_df["昨收价"], errors="coerce")
temp_df["总市值"] = pd.to_numeric(temp_df["总市值"], errors="coerce")
temp_df["市盈率"] = pd.to_numeric(temp_df["市盈率"], errors="coerce")
return temp_df
| 18,856 |
akfamily/akshare
|
087025d8d6f799b30ca114013e82c1ad22dc9294
|
akshare/stock/stock_weibo_nlp.py
|
stock_js_weibo_nlp_time
|
()
|
return r.json()["data"]["timescale"]
|
https://datacenter.jin10.com/market
:return: 特定时间表示的字典
:rtype: dict
|
https://datacenter.jin10.com/market
:return: 特定时间表示的字典
:rtype: dict
| 19 | 45 |
def stock_js_weibo_nlp_time() -> Dict:
"""
https://datacenter.jin10.com/market
:return: 特定时间表示的字典
:rtype: dict
"""
url = "https://datacenter-api.jin10.com/weibo/config"
payload = {"_": int(time.time() * 1000)}
headers = {
"authority": "datacenter-api.jin10.com",
"pragma": "no-cache",
"cache-control": "no-cache",
"accept": "*/*",
"x-app-id": "rU6QIu7JHe2gOUeR",
"sec-fetch-dest": "empty",
"x-csrf-token": "",
"x-version": "1.0.0",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.116 Safari/537.36",
"origin": "https://datacenter.jin10.com",
"sec-fetch-site": "same-site",
"sec-fetch-mode": "cors",
"referer": "https://datacenter.jin10.com/market",
"accept-language": "zh-CN,zh;q=0.9,en;q=0.8",
}
r = requests.get(url, headers=headers, data=payload)
return r.json()["data"]["timescale"]
|
https://github.com/akfamily/akshare/blob/087025d8d6f799b30ca114013e82c1ad22dc9294/project25/akshare/stock/stock_weibo_nlp.py#L19-L45
| 25 |
[
0,
1,
2,
3,
4,
5
] | 22.222222 |
[
6,
7,
8,
25,
26
] | 18.518519 | false | 33.333333 | 27 | 1 | 81.481481 | 3 |
def stock_js_weibo_nlp_time() -> Dict:
url = "https://datacenter-api.jin10.com/weibo/config"
payload = {"_": int(time.time() * 1000)}
headers = {
"authority": "datacenter-api.jin10.com",
"pragma": "no-cache",
"cache-control": "no-cache",
"accept": "*/*",
"x-app-id": "rU6QIu7JHe2gOUeR",
"sec-fetch-dest": "empty",
"x-csrf-token": "",
"x-version": "1.0.0",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.116 Safari/537.36",
"origin": "https://datacenter.jin10.com",
"sec-fetch-site": "same-site",
"sec-fetch-mode": "cors",
"referer": "https://datacenter.jin10.com/market",
"accept-language": "zh-CN,zh;q=0.9,en;q=0.8",
}
r = requests.get(url, headers=headers, data=payload)
return r.json()["data"]["timescale"]
| 18,857 |
akfamily/akshare
|
087025d8d6f799b30ca114013e82c1ad22dc9294
|
akshare/stock/stock_weibo_nlp.py
|
stock_js_weibo_report
|
(time_period: str = "CNHOUR12")
|
return temp_df
|
金十数据中心-实时监控-微博舆情报告
https://datacenter.jin10.com/market
:param time_period: {'CNHOUR2': '2小时', 'CNHOUR6': '6小时', 'CNHOUR12': '12小时', 'CNHOUR24': '1天', 'CNDAY7': '1周', 'CNDAY30': '1月'}
:type time_period: str
:return: 指定时间段的微博舆情报告
:rtype: pandas.DataFrame
|
金十数据中心-实时监控-微博舆情报告
https://datacenter.jin10.com/market
:param time_period: {'CNHOUR2': '2小时', 'CNHOUR6': '6小时', 'CNHOUR12': '12小时', 'CNHOUR24': '1天', 'CNDAY7': '1周', 'CNDAY30': '1月'}
:type time_period: str
:return: 指定时间段的微博舆情报告
:rtype: pandas.DataFrame
| 48 | 82 |
def stock_js_weibo_report(time_period: str = "CNHOUR12") -> pd.DataFrame:
"""
金十数据中心-实时监控-微博舆情报告
https://datacenter.jin10.com/market
:param time_period: {'CNHOUR2': '2小时', 'CNHOUR6': '6小时', 'CNHOUR12': '12小时', 'CNHOUR24': '1天', 'CNDAY7': '1周', 'CNDAY30': '1月'}
:type time_period: str
:return: 指定时间段的微博舆情报告
:rtype: pandas.DataFrame
"""
url = "https://datacenter-api.jin10.com/weibo/list"
payload = {
"timescale": time_period,
"_": int(time.time() * 1000)
}
headers = {
'authority': 'datacenter-api.jin10.com',
'pragma': 'no-cache',
'cache-control': 'no-cache',
'accept': '*/*',
'x-app-id': 'rU6QIu7JHe2gOUeR',
'sec-fetch-dest': 'empty',
'x-csrf-token': '',
'x-version': '1.0.0',
'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.116 Safari/537.36',
'origin': 'https://datacenter.jin10.com',
'sec-fetch-site': 'same-site',
'sec-fetch-mode': 'cors',
'referer': 'https://datacenter.jin10.com/market',
'accept-language': 'zh-CN,zh;q=0.9,en;q=0.8'
}
r = requests.get(url, params=payload, headers=headers)
temp_df = pd.DataFrame(r.json()["data"])
temp_df['rate'] = pd.to_numeric(temp_df['rate'])
return temp_df
|
https://github.com/akfamily/akshare/blob/087025d8d6f799b30ca114013e82c1ad22dc9294/project25/akshare/stock/stock_weibo_nlp.py#L48-L82
| 25 |
[
0,
1,
2,
3,
4,
5,
6,
7,
8
] | 25.714286 |
[
9,
10,
14,
31,
32,
33,
34
] | 20 | false | 33.333333 | 35 | 1 | 80 | 6 |
def stock_js_weibo_report(time_period: str = "CNHOUR12") -> pd.DataFrame:
url = "https://datacenter-api.jin10.com/weibo/list"
payload = {
"timescale": time_period,
"_": int(time.time() * 1000)
}
headers = {
'authority': 'datacenter-api.jin10.com',
'pragma': 'no-cache',
'cache-control': 'no-cache',
'accept': '*/*',
'x-app-id': 'rU6QIu7JHe2gOUeR',
'sec-fetch-dest': 'empty',
'x-csrf-token': '',
'x-version': '1.0.0',
'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.116 Safari/537.36',
'origin': 'https://datacenter.jin10.com',
'sec-fetch-site': 'same-site',
'sec-fetch-mode': 'cors',
'referer': 'https://datacenter.jin10.com/market',
'accept-language': 'zh-CN,zh;q=0.9,en;q=0.8'
}
r = requests.get(url, params=payload, headers=headers)
temp_df = pd.DataFrame(r.json()["data"])
temp_df['rate'] = pd.to_numeric(temp_df['rate'])
return temp_df
| 18,858 |
akfamily/akshare
|
087025d8d6f799b30ca114013e82c1ad22dc9294
|
akshare/stock/stock_cg_lawsuit.py
|
stock_cg_lawsuit_cninfo
|
(
symbol: str = "全部", start_date: str = "20180630", end_date: str = "20210927"
)
|
return temp_df
|
巨潮资讯-数据中心-专题统计-公司治理-公司诉讼
http://webapi.cninfo.com.cn/#/thematicStatistics
:param symbol: choice of {"全部", "深市主板", "沪市", "创业板", "科创板"}
:type symbol: str
:param start_date: 开始统计时间
:type start_date: str
:param end_date: 结束统计时间
:type end_date: str
:return: 对外担保
:rtype: pandas.DataFrame
|
巨潮资讯-数据中心-专题统计-公司治理-公司诉讼
http://webapi.cninfo.com.cn/#/thematicStatistics
:param symbol: choice of {"全部", "深市主板", "沪市", "创业板", "科创板"}
:type symbol: str
:param start_date: 开始统计时间
:type start_date: str
:param end_date: 结束统计时间
:type end_date: str
:return: 对外担保
:rtype: pandas.DataFrame
| 45 | 113 |
def stock_cg_lawsuit_cninfo(
symbol: str = "全部", start_date: str = "20180630", end_date: str = "20210927"
) -> pd.DataFrame:
"""
巨潮资讯-数据中心-专题统计-公司治理-公司诉讼
http://webapi.cninfo.com.cn/#/thematicStatistics
:param symbol: choice of {"全部", "深市主板", "沪市", "创业板", "科创板"}
:type symbol: str
:param start_date: 开始统计时间
:type start_date: str
:param end_date: 结束统计时间
:type end_date: str
:return: 对外担保
:rtype: pandas.DataFrame
"""
symbol_map = {
"全部": '',
"深市主板": '012002',
"沪市": '012001',
"创业板": '012015',
"科创板": '012029',
}
url = "http://webapi.cninfo.com.cn/api/sysapi/p_sysapi1055"
random_time_str = str(int(time.time()))
js_code = py_mini_racer.MiniRacer()
js_code.eval(js_str)
mcode = js_code.call("mcode", random_time_str)
headers = {
"Accept": "*/*",
"Accept-Encoding": "gzip, deflate",
"Accept-Language": "zh-CN,zh;q=0.9,en;q=0.8",
"Cache-Control": "no-cache",
"Content-Length": "0",
"Host": "webapi.cninfo.com.cn",
"mcode": mcode,
"Origin": "http://webapi.cninfo.com.cn",
"Pragma": "no-cache",
"Proxy-Connection": "keep-alive",
"Referer": "http://webapi.cninfo.com.cn/",
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/93.0.4577.63 Safari/537.36",
"X-Requested-With": "XMLHttpRequest",
}
params = {
"sdate": "-".join([start_date[:4], start_date[4:6], start_date[6:]]),
"edate": "-".join([end_date[:4], end_date[4:6], end_date[6:]]),
"market": symbol_map[symbol],
}
r = requests.post(url, headers=headers, params=params)
data_json = r.json()
temp_df = pd.DataFrame(data_json["records"])
temp_df.columns = [
"公告统计区间",
"诉讼金额",
"诉讼次数",
"证券简称",
"证券代码",
]
temp_df = temp_df[
[
"证券代码",
"证券简称",
"公告统计区间",
"诉讼次数",
"诉讼金额",
]
]
temp_df["诉讼次数"] = pd.to_numeric(temp_df["诉讼次数"])
temp_df["诉讼金额"] = pd.to_numeric(temp_df["诉讼金额"])
return temp_df
|
https://github.com/akfamily/akshare/blob/087025d8d6f799b30ca114013e82c1ad22dc9294/project25/akshare/stock/stock_cg_lawsuit.py#L45-L113
| 25 |
[
0
] | 1.449275 |
[
15,
22,
23,
24,
25,
26,
27,
42,
47,
48,
49,
50,
57,
66,
67,
68
] | 23.188406 | false | 30.769231 | 69 | 1 | 76.811594 | 10 |
def stock_cg_lawsuit_cninfo(
symbol: str = "全部", start_date: str = "20180630", end_date: str = "20210927"
) -> pd.DataFrame:
symbol_map = {
"全部": '',
"深市主板": '012002',
"沪市": '012001',
"创业板": '012015',
"科创板": '012029',
}
url = "http://webapi.cninfo.com.cn/api/sysapi/p_sysapi1055"
random_time_str = str(int(time.time()))
js_code = py_mini_racer.MiniRacer()
js_code.eval(js_str)
mcode = js_code.call("mcode", random_time_str)
headers = {
"Accept": "*/*",
"Accept-Encoding": "gzip, deflate",
"Accept-Language": "zh-CN,zh;q=0.9,en;q=0.8",
"Cache-Control": "no-cache",
"Content-Length": "0",
"Host": "webapi.cninfo.com.cn",
"mcode": mcode,
"Origin": "http://webapi.cninfo.com.cn",
"Pragma": "no-cache",
"Proxy-Connection": "keep-alive",
"Referer": "http://webapi.cninfo.com.cn/",
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/93.0.4577.63 Safari/537.36",
"X-Requested-With": "XMLHttpRequest",
}
params = {
"sdate": "-".join([start_date[:4], start_date[4:6], start_date[6:]]),
"edate": "-".join([end_date[:4], end_date[4:6], end_date[6:]]),
"market": symbol_map[symbol],
}
r = requests.post(url, headers=headers, params=params)
data_json = r.json()
temp_df = pd.DataFrame(data_json["records"])
temp_df.columns = [
"公告统计区间",
"诉讼金额",
"诉讼次数",
"证券简称",
"证券代码",
]
temp_df = temp_df[
[
"证券代码",
"证券简称",
"公告统计区间",
"诉讼次数",
"诉讼金额",
]
]
temp_df["诉讼次数"] = pd.to_numeric(temp_df["诉讼次数"])
temp_df["诉讼金额"] = pd.to_numeric(temp_df["诉讼金额"])
return temp_df
| 18,859 |
akfamily/akshare
|
087025d8d6f799b30ca114013e82c1ad22dc9294
|
akshare/stock/stock_zh_b_sina.py
|
_get_zh_b_page_count
|
()
|
所有股票的总页数
http://vip.stock.finance.sina.com.cn/mkt/#hs_b
:return: 需要采集的股票总页数
:rtype: int
|
所有股票的总页数
http://vip.stock.finance.sina.com.cn/mkt/#hs_b
:return: 需要采集的股票总页数
:rtype: int
| 27 | 40 |
def _get_zh_b_page_count() -> int:
"""
所有股票的总页数
http://vip.stock.finance.sina.com.cn/mkt/#hs_b
:return: 需要采集的股票总页数
:rtype: int
"""
url = "http://vip.stock.finance.sina.com.cn/quotes_service/api/json_v2.php/Market_Center.getHQNodeStockCount?node=hs_b"
r = requests.get(url)
page_count = int(re.findall(re.compile(r"\d+"), r.text)[0]) / 80
if isinstance(page_count, int):
return page_count
else:
return int(page_count) + 1
|
https://github.com/akfamily/akshare/blob/087025d8d6f799b30ca114013e82c1ad22dc9294/project25/akshare/stock/stock_zh_b_sina.py#L27-L40
| 25 |
[
0,
1,
2,
3,
4,
5,
6
] | 50 |
[
7,
8,
9,
10,
11,
13
] | 42.857143 | false | 6.363636 | 14 | 2 | 57.142857 | 4 |
def _get_zh_b_page_count() -> int:
url = "http://vip.stock.finance.sina.com.cn/quotes_service/api/json_v2.php/Market_Center.getHQNodeStockCount?node=hs_b"
r = requests.get(url)
page_count = int(re.findall(re.compile(r"\d+"), r.text)[0]) / 80
if isinstance(page_count, int):
return page_count
else:
return int(page_count) + 1
| 18,860 |
|
akfamily/akshare
|
087025d8d6f799b30ca114013e82c1ad22dc9294
|
akshare/stock/stock_zh_b_sina.py
|
stock_zh_b_spot
|
()
|
return big_df
|
新浪财经-所有 B 股的实时行情数据; 重复运行本函数会被新浪暂时封 IP
http://vip.stock.finance.sina.com.cn/mkt/#qbgg_hk
:return: 所有股票的实时行情数据
:rtype: pandas.DataFrame
|
新浪财经-所有 B 股的实时行情数据; 重复运行本函数会被新浪暂时封 IP
http://vip.stock.finance.sina.com.cn/mkt/#qbgg_hk
:return: 所有股票的实时行情数据
:rtype: pandas.DataFrame
| 43 | 134 |
def stock_zh_b_spot() -> pd.DataFrame:
"""
新浪财经-所有 B 股的实时行情数据; 重复运行本函数会被新浪暂时封 IP
http://vip.stock.finance.sina.com.cn/mkt/#qbgg_hk
:return: 所有股票的实时行情数据
:rtype: pandas.DataFrame
"""
big_df = pd.DataFrame()
page_count = _get_zh_b_page_count()
zh_sina_stock_payload_copy = {
'page': '1',
'num': '80',
'sort': 'symbol',
'asc': '1',
'node': 'hs_b',
'symbol': '',
'_s_r_a': 'page',
}
for page in tqdm(range(1, page_count + 1), leave=False, desc="Please wait for a moment"):
zh_sina_stock_payload_copy.update({"page": page})
r = requests.get(zh_sina_a_stock_url, params=zh_sina_stock_payload_copy)
data_json = demjson.decode(r.text)
big_df = big_df.append(pd.DataFrame(data_json), ignore_index=True)
big_df = big_df.astype(
{
"trade": "float",
"pricechange": "float",
"changepercent": "float",
"buy": "float",
"sell": "float",
"settlement": "float",
"open": "float",
"high": "float",
"low": "float",
"volume": "float",
"amount": "float",
"per": "float",
"pb": "float",
"mktcap": "float",
"nmc": "float",
"turnoverratio": "float",
}
)
big_df.columns = [
'代码',
'_',
'名称',
'最新价',
'涨跌额',
'涨跌幅',
'买入',
'卖出',
'昨收',
'今开',
'最高',
'最低',
'成交量',
'成交额',
'_',
'_',
'_',
'_',
'_',
'_',
]
big_df = big_df[[
'代码',
'名称',
'最新价',
'涨跌额',
'涨跌幅',
'买入',
'卖出',
'昨收',
'今开',
'最高',
'最低',
'成交量',
'成交额',
]]
big_df['最新价'] = pd.to_numeric(big_df['最新价'])
big_df['涨跌额'] = pd.to_numeric(big_df['涨跌额'])
big_df['涨跌幅'] = pd.to_numeric(big_df['涨跌幅'])
big_df['买入'] = pd.to_numeric(big_df['买入'])
big_df['卖出'] = pd.to_numeric(big_df['卖出'])
big_df['昨收'] = pd.to_numeric(big_df['昨收'])
big_df['今开'] = pd.to_numeric(big_df['今开'])
big_df['最高'] = pd.to_numeric(big_df['最高'])
big_df['最低'] = pd.to_numeric(big_df['最低'])
big_df['成交量'] = pd.to_numeric(big_df['成交量'])
big_df['成交额'] = pd.to_numeric(big_df['成交额'])
return big_df
|
https://github.com/akfamily/akshare/blob/087025d8d6f799b30ca114013e82c1ad22dc9294/project25/akshare/stock/stock_zh_b_sina.py#L43-L134
| 25 |
[
0,
1,
2,
3,
4,
5,
6
] | 7.608696 |
[
7,
8,
9,
18,
19,
20,
21,
22,
23,
43,
65,
80,
81,
82,
83,
84,
85,
86,
87,
88,
89,
90,
91
] | 25 | false | 6.363636 | 92 | 2 | 75 | 4 |
def stock_zh_b_spot() -> pd.DataFrame:
big_df = pd.DataFrame()
page_count = _get_zh_b_page_count()
zh_sina_stock_payload_copy = {
'page': '1',
'num': '80',
'sort': 'symbol',
'asc': '1',
'node': 'hs_b',
'symbol': '',
'_s_r_a': 'page',
}
for page in tqdm(range(1, page_count + 1), leave=False, desc="Please wait for a moment"):
zh_sina_stock_payload_copy.update({"page": page})
r = requests.get(zh_sina_a_stock_url, params=zh_sina_stock_payload_copy)
data_json = demjson.decode(r.text)
big_df = big_df.append(pd.DataFrame(data_json), ignore_index=True)
big_df = big_df.astype(
{
"trade": "float",
"pricechange": "float",
"changepercent": "float",
"buy": "float",
"sell": "float",
"settlement": "float",
"open": "float",
"high": "float",
"low": "float",
"volume": "float",
"amount": "float",
"per": "float",
"pb": "float",
"mktcap": "float",
"nmc": "float",
"turnoverratio": "float",
}
)
big_df.columns = [
'代码',
'_',
'名称',
'最新价',
'涨跌额',
'涨跌幅',
'买入',
'卖出',
'昨收',
'今开',
'最高',
'最低',
'成交量',
'成交额',
'_',
'_',
'_',
'_',
'_',
'_',
]
big_df = big_df[[
'代码',
'名称',
'最新价',
'涨跌额',
'涨跌幅',
'买入',
'卖出',
'昨收',
'今开',
'最高',
'最低',
'成交量',
'成交额',
]]
big_df['最新价'] = pd.to_numeric(big_df['最新价'])
big_df['涨跌额'] = pd.to_numeric(big_df['涨跌额'])
big_df['涨跌幅'] = pd.to_numeric(big_df['涨跌幅'])
big_df['买入'] = pd.to_numeric(big_df['买入'])
big_df['卖出'] = pd.to_numeric(big_df['卖出'])
big_df['昨收'] = pd.to_numeric(big_df['昨收'])
big_df['今开'] = pd.to_numeric(big_df['今开'])
big_df['最高'] = pd.to_numeric(big_df['最高'])
big_df['最低'] = pd.to_numeric(big_df['最低'])
big_df['成交量'] = pd.to_numeric(big_df['成交量'])
big_df['成交额'] = pd.to_numeric(big_df['成交额'])
return big_df
| 18,861 |
akfamily/akshare
|
087025d8d6f799b30ca114013e82c1ad22dc9294
|
akshare/stock/stock_zh_b_sina.py
|
stock_zh_b_daily
|
(
symbol: str = "sh900901",
start_date: str = "19900101",
end_date: str = "21000118",
adjust: str = "",
)
|
新浪财经-B 股-个股的历史行情数据, 大量抓取容易封 IP
https://finance.sina.com.cn/realstock/company/sh689009/nc.shtml
:param start_date: 20201103; 开始日期
:type start_date: str
:param end_date: 20201103; 结束日期
:type end_date: str
:param symbol: sh600000
:type symbol: str
:param adjust: 默认为空: 返回不复权的数据; qfq: 返回前复权后的数据; hfq: 返回后复权后的数据; hfq-factor: 返回后复权因子; hfq-factor: 返回前复权因子
:type adjust: str
:return: specific data
:rtype: pandas.DataFrame
|
新浪财经-B 股-个股的历史行情数据, 大量抓取容易封 IP
https://finance.sina.com.cn/realstock/company/sh689009/nc.shtml
:param start_date: 20201103; 开始日期
:type start_date: str
:param end_date: 20201103; 结束日期
:type end_date: str
:param symbol: sh600000
:type symbol: str
:param adjust: 默认为空: 返回不复权的数据; qfq: 返回前复权后的数据; hfq: 返回后复权后的数据; hfq-factor: 返回后复权因子; hfq-factor: 返回前复权因子
:type adjust: str
:return: specific data
:rtype: pandas.DataFrame
| 137 | 286 |
def stock_zh_b_daily(
symbol: str = "sh900901",
start_date: str = "19900101",
end_date: str = "21000118",
adjust: str = "",
) -> pd.DataFrame:
"""
新浪财经-B 股-个股的历史行情数据, 大量抓取容易封 IP
https://finance.sina.com.cn/realstock/company/sh689009/nc.shtml
:param start_date: 20201103; 开始日期
:type start_date: str
:param end_date: 20201103; 结束日期
:type end_date: str
:param symbol: sh600000
:type symbol: str
:param adjust: 默认为空: 返回不复权的数据; qfq: 返回前复权后的数据; hfq: 返回后复权后的数据; hfq-factor: 返回后复权因子; hfq-factor: 返回前复权因子
:type adjust: str
:return: specific data
:rtype: pandas.DataFrame
"""
def _fq_factor(method: str) -> pd.DataFrame:
if method == "hfq":
res = requests.get(zh_sina_a_stock_hfq_url.format(symbol))
hfq_factor_df = pd.DataFrame(
eval(res.text.split("=")[1].split("\n")[0])["data"]
)
if hfq_factor_df.shape[0] == 0:
raise ValueError("sina hfq factor not available")
hfq_factor_df.columns = ["date", "hfq_factor"]
hfq_factor_df.index = pd.to_datetime(hfq_factor_df.date)
del hfq_factor_df["date"]
hfq_factor_df.reset_index(inplace=True)
return hfq_factor_df
else:
res = requests.get(zh_sina_a_stock_qfq_url.format(symbol))
qfq_factor_df = pd.DataFrame(
eval(res.text.split("=")[1].split("\n")[0])["data"]
)
if qfq_factor_df.shape[0] == 0:
raise ValueError("sina hfq factor not available")
qfq_factor_df.columns = ["date", "qfq_factor"]
qfq_factor_df.index = pd.to_datetime(qfq_factor_df.date)
del qfq_factor_df["date"]
qfq_factor_df.reset_index(inplace=True)
return qfq_factor_df
if adjust in ("hfq-factor", "qfq-factor"):
return _fq_factor(adjust.split("-")[0])
res = requests.get(zh_sina_a_stock_hist_url.format(symbol))
js_code = py_mini_racer.MiniRacer()
js_code.eval(hk_js_decode)
dict_list = js_code.call(
"d", res.text.split("=")[1].split(";")[0].replace('"', "")
) # 执行js解密代码
data_df = pd.DataFrame(dict_list)
data_df.index = pd.to_datetime(data_df["date"]).dt.date
del data_df["date"]
data_df = data_df.astype("float")
r = requests.get(zh_sina_a_stock_amount_url.format(symbol, symbol))
amount_data_json = demjson.decode(r.text[r.text.find("["): r.text.rfind("]") + 1])
amount_data_df = pd.DataFrame(amount_data_json)
amount_data_df.index = pd.to_datetime(amount_data_df.date)
del amount_data_df["date"]
temp_df = pd.merge(
data_df, amount_data_df, left_index=True, right_index=True, how="outer"
)
temp_df.fillna(method="ffill", inplace=True)
temp_df = temp_df.astype(float)
temp_df["amount"] = temp_df["amount"] * 10000
temp_df["turnover"] = temp_df["volume"] / temp_df["amount"]
temp_df.columns = [
"open",
"high",
"low",
"close",
"volume",
"outstanding_share",
"turnover",
]
if adjust == "":
temp_df = temp_df[start_date:end_date]
temp_df.drop_duplicates(subset=["open", "high", "low", "close", "volume"], inplace=True)
temp_df["open"] = round(temp_df["open"], 2)
temp_df["high"] = round(temp_df["high"], 2)
temp_df["low"] = round(temp_df["low"], 2)
temp_df["close"] = round(temp_df["close"], 2)
temp_df.dropna(inplace=True)
temp_df.drop_duplicates(inplace=True)
temp_df.reset_index(inplace=True)
return temp_df
if adjust == "hfq":
res = requests.get(zh_sina_a_stock_hfq_url.format(symbol))
hfq_factor_df = pd.DataFrame(
eval(res.text.split("=")[1].split("\n")[0])["data"]
)
hfq_factor_df.columns = ["date", "hfq_factor"]
hfq_factor_df.index = pd.to_datetime(hfq_factor_df.date)
del hfq_factor_df["date"]
temp_df = pd.merge(
temp_df, hfq_factor_df, left_index=True, right_index=True, how="outer"
)
temp_df.fillna(method="ffill", inplace=True)
temp_df = temp_df.astype(float)
temp_df.dropna(inplace=True)
temp_df.drop_duplicates(subset=["open", "high", "low", "close", "volume"], inplace=True)
temp_df["open"] = temp_df["open"] * temp_df["hfq_factor"]
temp_df["high"] = temp_df["high"] * temp_df["hfq_factor"]
temp_df["close"] = temp_df["close"] * temp_df["hfq_factor"]
temp_df["low"] = temp_df["low"] * temp_df["hfq_factor"]
temp_df = temp_df.iloc[:, :-1]
temp_df = temp_df[start_date:end_date]
temp_df["open"] = round(temp_df["open"], 2)
temp_df["high"] = round(temp_df["high"], 2)
temp_df["low"] = round(temp_df["low"], 2)
temp_df["close"] = round(temp_df["close"], 2)
temp_df.dropna(inplace=True)
temp_df.reset_index(inplace=True)
return temp_df
if adjust == "qfq":
res = requests.get(zh_sina_a_stock_qfq_url.format(symbol))
qfq_factor_df = pd.DataFrame(
eval(res.text.split("=")[1].split("\n")[0])["data"]
)
qfq_factor_df.columns = ["date", "qfq_factor"]
qfq_factor_df.index = pd.to_datetime(qfq_factor_df.date)
del qfq_factor_df["date"]
temp_df = pd.merge(
temp_df, qfq_factor_df, left_index=True, right_index=True, how="outer"
)
temp_df.fillna(method="ffill", inplace=True)
temp_df = temp_df.astype(float)
temp_df.dropna(inplace=True)
temp_df.drop_duplicates(subset=["open", "high", "low", "close", "volume"], inplace=True)
temp_df["open"] = temp_df["open"] / temp_df["qfq_factor"]
temp_df["high"] = temp_df["high"] / temp_df["qfq_factor"]
temp_df["close"] = temp_df["close"] / temp_df["qfq_factor"]
temp_df["low"] = temp_df["low"] / temp_df["qfq_factor"]
temp_df = temp_df.iloc[:, :-1]
temp_df = temp_df[start_date:end_date]
temp_df["open"] = round(temp_df["open"], 2)
temp_df["high"] = round(temp_df["high"], 2)
temp_df["low"] = round(temp_df["low"], 2)
temp_df["close"] = round(temp_df["close"], 2)
temp_df.dropna(inplace=True)
temp_df.reset_index(inplace=True)
return temp_df
|
https://github.com/akfamily/akshare/blob/087025d8d6f799b30ca114013e82c1ad22dc9294/project25/akshare/stock/stock_zh_b_sina.py#L137-L286
| 25 |
[
0
] | 0.666667 |
[
20,
21,
22,
23,
26,
27,
28,
29,
30,
31,
32,
34,
35,
38,
39,
40,
41,
42,
43,
44,
46,
47,
49,
50,
51,
52,
55,
56,
57,
58,
59,
60,
61,
62,
63,
64,
67,
68,
69,
70,
71,
80,
81,
82,
83,
84,
85,
86,
87,
88,
89,
90,
92,
93,
94,
97,
98,
99,
100,
103,
104,
105,
106,
107,
108,
109,
110,
111,
112,
113,
114,
115,
116,
117,
118,
119,
121,
122,
123,
126,
127,
128,
130,
133,
134,
135,
136,
137,
138,
139,
140,
141,
142,
143,
144,
145,
146,
147,
148,
149
] | 66.666667 | false | 6.363636 | 150 | 9 | 33.333333 | 12 |
def stock_zh_b_daily(
symbol: str = "sh900901",
start_date: str = "19900101",
end_date: str = "21000118",
adjust: str = "",
) -> pd.DataFrame:
def _fq_factor(method: str) -> pd.DataFrame:
if method == "hfq":
res = requests.get(zh_sina_a_stock_hfq_url.format(symbol))
hfq_factor_df = pd.DataFrame(
eval(res.text.split("=")[1].split("\n")[0])["data"]
)
if hfq_factor_df.shape[0] == 0:
raise ValueError("sina hfq factor not available")
hfq_factor_df.columns = ["date", "hfq_factor"]
hfq_factor_df.index = pd.to_datetime(hfq_factor_df.date)
del hfq_factor_df["date"]
hfq_factor_df.reset_index(inplace=True)
return hfq_factor_df
else:
res = requests.get(zh_sina_a_stock_qfq_url.format(symbol))
qfq_factor_df = pd.DataFrame(
eval(res.text.split("=")[1].split("\n")[0])["data"]
)
if qfq_factor_df.shape[0] == 0:
raise ValueError("sina hfq factor not available")
qfq_factor_df.columns = ["date", "qfq_factor"]
qfq_factor_df.index = pd.to_datetime(qfq_factor_df.date)
del qfq_factor_df["date"]
qfq_factor_df.reset_index(inplace=True)
return qfq_factor_df
if adjust in ("hfq-factor", "qfq-factor"):
return _fq_factor(adjust.split("-")[0])
res = requests.get(zh_sina_a_stock_hist_url.format(symbol))
js_code = py_mini_racer.MiniRacer()
js_code.eval(hk_js_decode)
dict_list = js_code.call(
"d", res.text.split("=")[1].split(";")[0].replace('"', "")
) # 执行js解密代码
data_df = pd.DataFrame(dict_list)
data_df.index = pd.to_datetime(data_df["date"]).dt.date
del data_df["date"]
data_df = data_df.astype("float")
r = requests.get(zh_sina_a_stock_amount_url.format(symbol, symbol))
amount_data_json = demjson.decode(r.text[r.text.find("["): r.text.rfind("]") + 1])
amount_data_df = pd.DataFrame(amount_data_json)
amount_data_df.index = pd.to_datetime(amount_data_df.date)
del amount_data_df["date"]
temp_df = pd.merge(
data_df, amount_data_df, left_index=True, right_index=True, how="outer"
)
temp_df.fillna(method="ffill", inplace=True)
temp_df = temp_df.astype(float)
temp_df["amount"] = temp_df["amount"] * 10000
temp_df["turnover"] = temp_df["volume"] / temp_df["amount"]
temp_df.columns = [
"open",
"high",
"low",
"close",
"volume",
"outstanding_share",
"turnover",
]
if adjust == "":
temp_df = temp_df[start_date:end_date]
temp_df.drop_duplicates(subset=["open", "high", "low", "close", "volume"], inplace=True)
temp_df["open"] = round(temp_df["open"], 2)
temp_df["high"] = round(temp_df["high"], 2)
temp_df["low"] = round(temp_df["low"], 2)
temp_df["close"] = round(temp_df["close"], 2)
temp_df.dropna(inplace=True)
temp_df.drop_duplicates(inplace=True)
temp_df.reset_index(inplace=True)
return temp_df
if adjust == "hfq":
res = requests.get(zh_sina_a_stock_hfq_url.format(symbol))
hfq_factor_df = pd.DataFrame(
eval(res.text.split("=")[1].split("\n")[0])["data"]
)
hfq_factor_df.columns = ["date", "hfq_factor"]
hfq_factor_df.index = pd.to_datetime(hfq_factor_df.date)
del hfq_factor_df["date"]
temp_df = pd.merge(
temp_df, hfq_factor_df, left_index=True, right_index=True, how="outer"
)
temp_df.fillna(method="ffill", inplace=True)
temp_df = temp_df.astype(float)
temp_df.dropna(inplace=True)
temp_df.drop_duplicates(subset=["open", "high", "low", "close", "volume"], inplace=True)
temp_df["open"] = temp_df["open"] * temp_df["hfq_factor"]
temp_df["high"] = temp_df["high"] * temp_df["hfq_factor"]
temp_df["close"] = temp_df["close"] * temp_df["hfq_factor"]
temp_df["low"] = temp_df["low"] * temp_df["hfq_factor"]
temp_df = temp_df.iloc[:, :-1]
temp_df = temp_df[start_date:end_date]
temp_df["open"] = round(temp_df["open"], 2)
temp_df["high"] = round(temp_df["high"], 2)
temp_df["low"] = round(temp_df["low"], 2)
temp_df["close"] = round(temp_df["close"], 2)
temp_df.dropna(inplace=True)
temp_df.reset_index(inplace=True)
return temp_df
if adjust == "qfq":
res = requests.get(zh_sina_a_stock_qfq_url.format(symbol))
qfq_factor_df = pd.DataFrame(
eval(res.text.split("=")[1].split("\n")[0])["data"]
)
qfq_factor_df.columns = ["date", "qfq_factor"]
qfq_factor_df.index = pd.to_datetime(qfq_factor_df.date)
del qfq_factor_df["date"]
temp_df = pd.merge(
temp_df, qfq_factor_df, left_index=True, right_index=True, how="outer"
)
temp_df.fillna(method="ffill", inplace=True)
temp_df = temp_df.astype(float)
temp_df.dropna(inplace=True)
temp_df.drop_duplicates(subset=["open", "high", "low", "close", "volume"], inplace=True)
temp_df["open"] = temp_df["open"] / temp_df["qfq_factor"]
temp_df["high"] = temp_df["high"] / temp_df["qfq_factor"]
temp_df["close"] = temp_df["close"] / temp_df["qfq_factor"]
temp_df["low"] = temp_df["low"] / temp_df["qfq_factor"]
temp_df = temp_df.iloc[:, :-1]
temp_df = temp_df[start_date:end_date]
temp_df["open"] = round(temp_df["open"], 2)
temp_df["high"] = round(temp_df["high"], 2)
temp_df["low"] = round(temp_df["low"], 2)
temp_df["close"] = round(temp_df["close"], 2)
temp_df.dropna(inplace=True)
temp_df.reset_index(inplace=True)
return temp_df
| 18,862 |
|
akfamily/akshare
|
087025d8d6f799b30ca114013e82c1ad22dc9294
|
akshare/stock/stock_zh_b_sina.py
|
stock_zh_b_minute
|
(
symbol: str = "sh900901", period: str = "1", adjust: str = ""
)
|
股票及股票指数历史行情数据-分钟数据
http://finance.sina.com.cn/realstock/company/sh900901/nc.shtml
:param symbol: sh900901
:type symbol: str
:param period: 1, 5, 15, 30, 60 分钟的数据
:type period: str
:param adjust: 默认为空: 返回不复权的数据; qfq: 返回前复权后的数据; hfq: 返回后复权后的数据;
:type adjust: str
:return: specific data
:rtype: pandas.DataFrame
|
股票及股票指数历史行情数据-分钟数据
http://finance.sina.com.cn/realstock/company/sh900901/nc.shtml
:param symbol: sh900901
:type symbol: str
:param period: 1, 5, 15, 30, 60 分钟的数据
:type period: str
:param adjust: 默认为空: 返回不复权的数据; qfq: 返回前复权后的数据; hfq: 返回后复权后的数据;
:type adjust: str
:return: specific data
:rtype: pandas.DataFrame
| 289 | 358 |
def stock_zh_b_minute(
symbol: str = "sh900901", period: str = "1", adjust: str = ""
) -> pd.DataFrame:
"""
股票及股票指数历史行情数据-分钟数据
http://finance.sina.com.cn/realstock/company/sh900901/nc.shtml
:param symbol: sh900901
:type symbol: str
:param period: 1, 5, 15, 30, 60 分钟的数据
:type period: str
:param adjust: 默认为空: 返回不复权的数据; qfq: 返回前复权后的数据; hfq: 返回后复权后的数据;
:type adjust: str
:return: specific data
:rtype: pandas.DataFrame
"""
url = "https://quotes.sina.cn/cn/api/jsonp_v2.php/=/CN_MarketDataService.getKLineData"
params = {
"symbol": symbol,
"scale": period,
"datalen": "20000",
}
r = requests.get(url, params=params)
temp_df = pd.DataFrame(json.loads(r.text.split("=(")[1].split(");")[0])).iloc[:, :6]
if temp_df.empty:
print(f"{symbol} 股票数据不存在,请检查是否已退市")
return None
try:
stock_zh_b_daily(symbol=symbol, adjust="qfq")
except:
return temp_df
if adjust == "":
return temp_df
if adjust == "qfq":
temp_df[["date", "time"]] = temp_df["day"].str.split(" ", expand=True)
# 处理没有最后一分钟的情况
need_df = temp_df[[True if "09:31:00" <= item <= "15:00:00" else False for item in temp_df["time"]]]
need_df.drop_duplicates(subset=['date'], keep='last', inplace=True)
need_df.index = pd.to_datetime(need_df["date"])
stock_zh_b_daily_qfq_df = stock_zh_b_daily(symbol=symbol, adjust="qfq")
stock_zh_b_daily_qfq_df.index = pd.to_datetime(stock_zh_b_daily_qfq_df['date'])
result_df = stock_zh_b_daily_qfq_df.iloc[-len(need_df):, :]["close"].astype(float) / need_df["close"].astype(float)
temp_df.index = pd.to_datetime(temp_df["date"])
merged_df = pd.merge(temp_df, result_df, left_index=True, right_index=True)
merged_df["open"] = merged_df["open"].astype(float) * merged_df["close_y"]
merged_df["high"] = merged_df["high"].astype(float) * merged_df["close_y"]
merged_df["low"] = merged_df["low"].astype(float) * merged_df["close_y"]
merged_df["close"] = merged_df["close_x"].astype(float) * merged_df["close_y"]
temp_df = merged_df[["day", "open", "high", "low", "close", "volume"]]
temp_df.reset_index(drop=True, inplace=True)
return temp_df
if adjust == "hfq":
temp_df[["date", "time"]] = temp_df["day"].str.split(" ", expand=True)
# 处理没有最后一分钟的情况
need_df = temp_df[[True if "09:31:00" <= item <= "15:00:00" else False for item in temp_df["time"]]]
need_df.drop_duplicates(subset=['date'], keep='last', inplace=True)
need_df.index = pd.to_datetime(need_df["date"])
stock_zh_b_daily_hfq_df = stock_zh_b_daily(symbol=symbol, adjust="hfq")
stock_zh_b_daily_hfq_df.index = pd.to_datetime(stock_zh_b_daily_hfq_df['date'])
result_df = stock_zh_b_daily_hfq_df.iloc[-len(need_df):, :]["close"].astype(float) / need_df["close"].astype(float)
temp_df.index = pd.to_datetime(temp_df["date"])
merged_df = pd.merge(temp_df, result_df, left_index=True, right_index=True)
merged_df["open"] = merged_df["open"].astype(float) * merged_df["close_y"]
merged_df["high"] = merged_df["high"].astype(float) * merged_df["close_y"]
merged_df["low"] = merged_df["low"].astype(float) * merged_df["close_y"]
merged_df["close"] = merged_df["close_x"].astype(float) * merged_df["close_y"]
temp_df = merged_df[["day", "open", "high", "low", "close", "volume"]]
temp_df.reset_index(drop=True, inplace=True)
return temp_df
|
https://github.com/akfamily/akshare/blob/087025d8d6f799b30ca114013e82c1ad22dc9294/project25/akshare/stock/stock_zh_b_sina.py#L289-L358
| 25 |
[
0
] | 1.428571 |
[
15,
16,
21,
22,
23,
24,
25,
26,
27,
28,
29,
31,
32,
34,
35,
37,
38,
39,
40,
41,
42,
43,
44,
45,
46,
47,
48,
49,
50,
51,
52,
53,
55,
56,
57,
58,
59,
60,
61,
62,
63,
64,
65,
66,
67,
68,
69
] | 67.142857 | false | 6.363636 | 70 | 8 | 32.857143 | 10 |
def stock_zh_b_minute(
symbol: str = "sh900901", period: str = "1", adjust: str = ""
) -> pd.DataFrame:
url = "https://quotes.sina.cn/cn/api/jsonp_v2.php/=/CN_MarketDataService.getKLineData"
params = {
"symbol": symbol,
"scale": period,
"datalen": "20000",
}
r = requests.get(url, params=params)
temp_df = pd.DataFrame(json.loads(r.text.split("=(")[1].split(");")[0])).iloc[:, :6]
if temp_df.empty:
print(f"{symbol} 股票数据不存在,请检查是否已退市")
return None
try:
stock_zh_b_daily(symbol=symbol, adjust="qfq")
except:
return temp_df
if adjust == "":
return temp_df
if adjust == "qfq":
temp_df[["date", "time"]] = temp_df["day"].str.split(" ", expand=True)
# 处理没有最后一分钟的情况
need_df = temp_df[[True if "09:31:00" <= item <= "15:00:00" else False for item in temp_df["time"]]]
need_df.drop_duplicates(subset=['date'], keep='last', inplace=True)
need_df.index = pd.to_datetime(need_df["date"])
stock_zh_b_daily_qfq_df = stock_zh_b_daily(symbol=symbol, adjust="qfq")
stock_zh_b_daily_qfq_df.index = pd.to_datetime(stock_zh_b_daily_qfq_df['date'])
result_df = stock_zh_b_daily_qfq_df.iloc[-len(need_df):, :]["close"].astype(float) / need_df["close"].astype(float)
temp_df.index = pd.to_datetime(temp_df["date"])
merged_df = pd.merge(temp_df, result_df, left_index=True, right_index=True)
merged_df["open"] = merged_df["open"].astype(float) * merged_df["close_y"]
merged_df["high"] = merged_df["high"].astype(float) * merged_df["close_y"]
merged_df["low"] = merged_df["low"].astype(float) * merged_df["close_y"]
merged_df["close"] = merged_df["close_x"].astype(float) * merged_df["close_y"]
temp_df = merged_df[["day", "open", "high", "low", "close", "volume"]]
temp_df.reset_index(drop=True, inplace=True)
return temp_df
if adjust == "hfq":
temp_df[["date", "time"]] = temp_df["day"].str.split(" ", expand=True)
# 处理没有最后一分钟的情况
need_df = temp_df[[True if "09:31:00" <= item <= "15:00:00" else False for item in temp_df["time"]]]
need_df.drop_duplicates(subset=['date'], keep='last', inplace=True)
need_df.index = pd.to_datetime(need_df["date"])
stock_zh_b_daily_hfq_df = stock_zh_b_daily(symbol=symbol, adjust="hfq")
stock_zh_b_daily_hfq_df.index = pd.to_datetime(stock_zh_b_daily_hfq_df['date'])
result_df = stock_zh_b_daily_hfq_df.iloc[-len(need_df):, :]["close"].astype(float) / need_df["close"].astype(float)
temp_df.index = pd.to_datetime(temp_df["date"])
merged_df = pd.merge(temp_df, result_df, left_index=True, right_index=True)
merged_df["open"] = merged_df["open"].astype(float) * merged_df["close_y"]
merged_df["high"] = merged_df["high"].astype(float) * merged_df["close_y"]
merged_df["low"] = merged_df["low"].astype(float) * merged_df["close_y"]
merged_df["close"] = merged_df["close_x"].astype(float) * merged_df["close_y"]
temp_df = merged_df[["day", "open", "high", "low", "close", "volume"]]
temp_df.reset_index(drop=True, inplace=True)
return temp_df
| 18,863 |
|
mwaskom/seaborn
|
a47b97e4b98c809db55cbd283de21acba89fe186
|
seaborn/_docstrings.py
|
DocstringComponents.__init__
|
(self, comp_dict, strip_whitespace=True)
|
Read entries from a dict, optionally stripping outer whitespace.
|
Read entries from a dict, optionally stripping outer whitespace.
| 10 | 23 |
def __init__(self, comp_dict, strip_whitespace=True):
"""Read entries from a dict, optionally stripping outer whitespace."""
if strip_whitespace:
entries = {}
for key, val in comp_dict.items():
m = re.match(self.regexp, val)
if m is None:
entries[key] = val
else:
entries[key] = m.group(1)
else:
entries = comp_dict.copy()
self.entries = entries
|
https://github.com/mwaskom/seaborn/blob/a47b97e4b98c809db55cbd283de21acba89fe186/project26/seaborn/_docstrings.py#L10-L23
| 26 |
[
0,
1,
2,
3,
4,
5,
6,
7,
8,
9,
10,
11,
12,
13
] | 100 |
[] | 0 | true | 87.5 | 14 | 4 | 100 | 1 |
def __init__(self, comp_dict, strip_whitespace=True):
if strip_whitespace:
entries = {}
for key, val in comp_dict.items():
m = re.match(self.regexp, val)
if m is None:
entries[key] = val
else:
entries[key] = m.group(1)
else:
entries = comp_dict.copy()
self.entries = entries
| 18,910 |
|
mwaskom/seaborn
|
a47b97e4b98c809db55cbd283de21acba89fe186
|
seaborn/_docstrings.py
|
DocstringComponents.__getattr__
|
(self, attr)
|
Provide dot access to entries for clean raw docstrings.
|
Provide dot access to entries for clean raw docstrings.
| 25 | 41 |
def __getattr__(self, attr):
"""Provide dot access to entries for clean raw docstrings."""
if attr in self.entries:
return self.entries[attr]
else:
try:
return self.__getattribute__(attr)
except AttributeError as err:
# If Python is run with -OO, it will strip docstrings and our lookup
# from self.entries will fail. We check for __debug__, which is actually
# set to False by -O (it is True for normal execution).
# But we only want to see an error when building the docs;
# not something users should see, so this slight inconsistency is fine.
if __debug__:
raise err
else:
pass
|
https://github.com/mwaskom/seaborn/blob/a47b97e4b98c809db55cbd283de21acba89fe186/project26/seaborn/_docstrings.py#L25-L41
| 26 |
[
0,
1,
2,
3,
4
] | 29.411765 |
[
5,
6,
7,
13,
14
] | 29.411765 | false | 87.5 | 17 | 4 | 70.588235 | 1 |
def __getattr__(self, attr):
if attr in self.entries:
return self.entries[attr]
else:
try:
return self.__getattribute__(attr)
except AttributeError as err:
# If Python is run with -OO, it will strip docstrings and our lookup
# from self.entries will fail. We check for __debug__, which is actually
# set to False by -O (it is True for normal execution).
# But we only want to see an error when building the docs;
# not something users should see, so this slight inconsistency is fine.
if __debug__:
raise err
else:
pass
| 18,911 |
|
mwaskom/seaborn
|
a47b97e4b98c809db55cbd283de21acba89fe186
|
seaborn/_docstrings.py
|
DocstringComponents.from_nested_components
|
(cls, **kwargs)
|
return cls(kwargs, strip_whitespace=False)
|
Add multiple sub-sets of components.
|
Add multiple sub-sets of components.
| 44 | 46 |
def from_nested_components(cls, **kwargs):
"""Add multiple sub-sets of components."""
return cls(kwargs, strip_whitespace=False)
|
https://github.com/mwaskom/seaborn/blob/a47b97e4b98c809db55cbd283de21acba89fe186/project26/seaborn/_docstrings.py#L44-L46
| 26 |
[
0,
1,
2
] | 100 |
[] | 0 | true | 87.5 | 3 | 1 | 100 | 1 |
def from_nested_components(cls, **kwargs):
return cls(kwargs, strip_whitespace=False)
| 18,912 |
mwaskom/seaborn
|
a47b97e4b98c809db55cbd283de21acba89fe186
|
seaborn/_docstrings.py
|
DocstringComponents.from_function_params
|
(cls, func)
|
return cls(comp_dict)
|
Use the numpydoc parser to extract components from existing func.
|
Use the numpydoc parser to extract components from existing func.
| 49 | 59 |
def from_function_params(cls, func):
"""Use the numpydoc parser to extract components from existing func."""
params = NumpyDocString(pydoc.getdoc(func))["Parameters"]
comp_dict = {}
for p in params:
name = p.name
type = p.type
desc = "\n ".join(p.desc)
comp_dict[name] = f"{name} : {type}\n {desc}"
return cls(comp_dict)
|
https://github.com/mwaskom/seaborn/blob/a47b97e4b98c809db55cbd283de21acba89fe186/project26/seaborn/_docstrings.py#L49-L59
| 26 |
[
0,
1,
2,
3,
4,
5,
6,
7,
8,
9,
10
] | 100 |
[] | 0 | true | 87.5 | 11 | 2 | 100 | 1 |
def from_function_params(cls, func):
params = NumpyDocString(pydoc.getdoc(func))["Parameters"]
comp_dict = {}
for p in params:
name = p.name
type = p.type
desc = "\n ".join(p.desc)
comp_dict[name] = f"{name} : {type}\n {desc}"
return cls(comp_dict)
| 18,913 |
mwaskom/seaborn
|
a47b97e4b98c809db55cbd283de21acba89fe186
|
seaborn/_compat.py
|
MarkerStyle
|
(marker=None, fillstyle=None)
|
return mpl.markers.MarkerStyle(marker, fillstyle)
|
Allow MarkerStyle to accept a MarkerStyle object as parameter.
Supports matplotlib < 3.3.0
https://github.com/matplotlib/matplotlib/pull/16692
|
Allow MarkerStyle to accept a MarkerStyle object as parameter.
| 6 | 19 |
def MarkerStyle(marker=None, fillstyle=None):
"""
Allow MarkerStyle to accept a MarkerStyle object as parameter.
Supports matplotlib < 3.3.0
https://github.com/matplotlib/matplotlib/pull/16692
"""
if isinstance(marker, mpl.markers.MarkerStyle):
if fillstyle is None:
return marker
else:
marker = marker.get_marker()
return mpl.markers.MarkerStyle(marker, fillstyle)
|
https://github.com/mwaskom/seaborn/blob/a47b97e4b98c809db55cbd283de21acba89fe186/project26/seaborn/_compat.py#L6-L19
| 26 |
[
0,
1,
2,
3,
4,
5,
6,
7,
8,
9,
10,
11,
13
] | 92.857143 |
[
12
] | 7.142857 | false | 26.470588 | 14 | 3 | 92.857143 | 4 |
def MarkerStyle(marker=None, fillstyle=None):
if isinstance(marker, mpl.markers.MarkerStyle):
if fillstyle is None:
return marker
else:
marker = marker.get_marker()
return mpl.markers.MarkerStyle(marker, fillstyle)
| 18,914 |
mwaskom/seaborn
|
a47b97e4b98c809db55cbd283de21acba89fe186
|
seaborn/_compat.py
|
norm_from_scale
|
(scale, norm)
|
return new_norm
|
Produce a Normalize object given a Scale and min/max domain limits.
|
Produce a Normalize object given a Scale and min/max domain limits.
| 22 | 67 |
def norm_from_scale(scale, norm):
"""Produce a Normalize object given a Scale and min/max domain limits."""
# This is an internal maplotlib function that simplifies things to access
# It is likely to become part of the matplotlib API at some point:
# https://github.com/matplotlib/matplotlib/issues/20329
if isinstance(norm, mpl.colors.Normalize):
return norm
if scale is None:
return None
if norm is None:
vmin = vmax = None
else:
vmin, vmax = norm # TODO more helpful error if this fails?
class ScaledNorm(mpl.colors.Normalize):
def __call__(self, value, clip=None):
# From github.com/matplotlib/matplotlib/blob/v3.4.2/lib/matplotlib/colors.py
# See github.com/matplotlib/matplotlib/tree/v3.4.2/LICENSE
value, is_scalar = self.process_value(value)
self.autoscale_None(value)
if self.vmin > self.vmax:
raise ValueError("vmin must be less or equal to vmax")
if self.vmin == self.vmax:
return np.full_like(value, 0)
if clip is None:
clip = self.clip
if clip:
value = np.clip(value, self.vmin, self.vmax)
# ***** Seaborn changes start ****
t_value = self.transform(value).reshape(np.shape(value))
t_vmin, t_vmax = self.transform([self.vmin, self.vmax])
# ***** Seaborn changes end *****
if not np.isfinite([t_vmin, t_vmax]).all():
raise ValueError("Invalid vmin or vmax")
t_value -= t_vmin
t_value /= (t_vmax - t_vmin)
t_value = np.ma.masked_invalid(t_value, copy=False)
return t_value[0] if is_scalar else t_value
new_norm = ScaledNorm(vmin, vmax)
new_norm.transform = scale.get_transform().transform
return new_norm
|
https://github.com/mwaskom/seaborn/blob/a47b97e4b98c809db55cbd283de21acba89fe186/project26/seaborn/_compat.py#L22-L67
| 26 |
[
0,
1,
2,
3,
4
] | 10.869565 |
[
5,
6,
8,
9,
11,
12,
14,
16,
18,
21,
22,
23,
24,
25,
26,
27,
28,
29,
30,
32,
33,
35,
36,
37,
38,
39,
40,
42,
43,
45
] | 65.217391 | false | 26.470588 | 46 | 10 | 34.782609 | 1 |
def norm_from_scale(scale, norm):
# This is an internal maplotlib function that simplifies things to access
# It is likely to become part of the matplotlib API at some point:
# https://github.com/matplotlib/matplotlib/issues/20329
if isinstance(norm, mpl.colors.Normalize):
return norm
if scale is None:
return None
if norm is None:
vmin = vmax = None
else:
vmin, vmax = norm # TODO more helpful error if this fails?
class ScaledNorm(mpl.colors.Normalize):
def __call__(self, value, clip=None):
# From github.com/matplotlib/matplotlib/blob/v3.4.2/lib/matplotlib/colors.py
# See github.com/matplotlib/matplotlib/tree/v3.4.2/LICENSE
value, is_scalar = self.process_value(value)
self.autoscale_None(value)
if self.vmin > self.vmax:
raise ValueError("vmin must be less or equal to vmax")
if self.vmin == self.vmax:
return np.full_like(value, 0)
if clip is None:
clip = self.clip
if clip:
value = np.clip(value, self.vmin, self.vmax)
# ***** Seaborn changes start ****
t_value = self.transform(value).reshape(np.shape(value))
t_vmin, t_vmax = self.transform([self.vmin, self.vmax])
# ***** Seaborn changes end *****
if not np.isfinite([t_vmin, t_vmax]).all():
raise ValueError("Invalid vmin or vmax")
t_value -= t_vmin
t_value /= (t_vmax - t_vmin)
t_value = np.ma.masked_invalid(t_value, copy=False)
return t_value[0] if is_scalar else t_value
new_norm = ScaledNorm(vmin, vmax)
new_norm.transform = scale.get_transform().transform
return new_norm
| 18,915 |
mwaskom/seaborn
|
a47b97e4b98c809db55cbd283de21acba89fe186
|
seaborn/_compat.py
|
scale_factory
|
(scale, axis, **kwargs)
|
return scale
|
Backwards compatability for creation of independent scales.
Matplotlib scales require an Axis object for instantiation on < 3.4.
But the axis is not used, aside from extraction of the axis_name in LogScale.
|
Backwards compatability for creation of independent scales.
| 70 | 105 |
def scale_factory(scale, axis, **kwargs):
"""
Backwards compatability for creation of independent scales.
Matplotlib scales require an Axis object for instantiation on < 3.4.
But the axis is not used, aside from extraction of the axis_name in LogScale.
"""
modify_transform = False
if _version_predates(mpl, "3.4"):
if axis[0] in "xy":
modify_transform = True
axis = axis[0]
base = kwargs.pop("base", None)
if base is not None:
kwargs[f"base{axis}"] = base
nonpos = kwargs.pop("nonpositive", None)
if nonpos is not None:
kwargs[f"nonpos{axis}"] = nonpos
if isinstance(scale, str):
class Axis:
axis_name = axis
axis = Axis()
scale = mpl.scale.scale_factory(scale, axis, **kwargs)
if modify_transform:
transform = scale.get_transform()
transform.base = kwargs.get("base", 10)
if kwargs.get("nonpositive") == "mask":
# Setting a private attribute, but we only get here
# on an old matplotlib, so this won't break going forwards
transform._clip = False
return scale
|
https://github.com/mwaskom/seaborn/blob/a47b97e4b98c809db55cbd283de21acba89fe186/project26/seaborn/_compat.py#L70-L105
| 26 |
[
0,
1,
2,
3,
4,
5,
6,
7
] | 22.222222 |
[
8,
9,
10,
11,
12,
13,
14,
15,
16,
17,
18,
20,
21,
22,
23,
25,
27,
28,
29,
30,
33,
35
] | 61.111111 | false | 26.470588 | 36 | 8 | 38.888889 | 4 |
def scale_factory(scale, axis, **kwargs):
modify_transform = False
if _version_predates(mpl, "3.4"):
if axis[0] in "xy":
modify_transform = True
axis = axis[0]
base = kwargs.pop("base", None)
if base is not None:
kwargs[f"base{axis}"] = base
nonpos = kwargs.pop("nonpositive", None)
if nonpos is not None:
kwargs[f"nonpos{axis}"] = nonpos
if isinstance(scale, str):
class Axis:
axis_name = axis
axis = Axis()
scale = mpl.scale.scale_factory(scale, axis, **kwargs)
if modify_transform:
transform = scale.get_transform()
transform.base = kwargs.get("base", 10)
if kwargs.get("nonpositive") == "mask":
# Setting a private attribute, but we only get here
# on an old matplotlib, so this won't break going forwards
transform._clip = False
return scale
| 18,916 |
mwaskom/seaborn
|
a47b97e4b98c809db55cbd283de21acba89fe186
|
seaborn/_compat.py
|
set_scale_obj
|
(ax, axis, scale)
|
Handle backwards compatability with setting matplotlib scale.
|
Handle backwards compatability with setting matplotlib scale.
| 108 | 127 |
def set_scale_obj(ax, axis, scale):
"""Handle backwards compatability with setting matplotlib scale."""
if _version_predates(mpl, "3.4"):
# The ability to pass a BaseScale instance to Axes.set_{}scale was added
# to matplotlib in version 3.4.0: GH: matplotlib/matplotlib/pull/19089
# Workaround: use the scale name, which is restrictive only if the user
# wants to define a custom scale; they'll need to update the registry too.
if scale.name is None:
# Hack to support our custom Formatter-less CatScale
return
method = getattr(ax, f"set_{axis}scale")
kws = {}
if scale.name == "function":
trans = scale.get_transform()
kws["functions"] = (trans._forward, trans._inverse)
method(scale.name, **kws)
axis_obj = getattr(ax, f"{axis}axis")
scale.set_default_locators_and_formatters(axis_obj)
else:
ax.set(**{f"{axis}scale": scale})
|
https://github.com/mwaskom/seaborn/blob/a47b97e4b98c809db55cbd283de21acba89fe186/project26/seaborn/_compat.py#L108-L127
| 26 |
[
0,
1,
2,
3,
4,
5,
6,
18,
19
] | 45 |
[
7,
9,
10,
11,
12,
13,
14,
15,
16,
17
] | 50 | false | 26.470588 | 20 | 4 | 50 | 1 |
def set_scale_obj(ax, axis, scale):
if _version_predates(mpl, "3.4"):
# The ability to pass a BaseScale instance to Axes.set_{}scale was added
# to matplotlib in version 3.4.0: GH: matplotlib/matplotlib/pull/19089
# Workaround: use the scale name, which is restrictive only if the user
# wants to define a custom scale; they'll need to update the registry too.
if scale.name is None:
# Hack to support our custom Formatter-less CatScale
return
method = getattr(ax, f"set_{axis}scale")
kws = {}
if scale.name == "function":
trans = scale.get_transform()
kws["functions"] = (trans._forward, trans._inverse)
method(scale.name, **kws)
axis_obj = getattr(ax, f"{axis}axis")
scale.set_default_locators_and_formatters(axis_obj)
else:
ax.set(**{f"{axis}scale": scale})
| 18,917 |
|
mwaskom/seaborn
|
a47b97e4b98c809db55cbd283de21acba89fe186
|
seaborn/_compat.py
|
get_colormap
|
(name)
|
Handle changes to matplotlib colormap interface in 3.6.
|
Handle changes to matplotlib colormap interface in 3.6.
| 130 | 135 |
def get_colormap(name):
"""Handle changes to matplotlib colormap interface in 3.6."""
try:
return mpl.colormaps[name]
except AttributeError:
return mpl.cm.get_cmap(name)
|
https://github.com/mwaskom/seaborn/blob/a47b97e4b98c809db55cbd283de21acba89fe186/project26/seaborn/_compat.py#L130-L135
| 26 |
[
0,
1,
2,
3,
4
] | 83.333333 |
[
5
] | 16.666667 | false | 26.470588 | 6 | 2 | 83.333333 | 1 |
def get_colormap(name):
try:
return mpl.colormaps[name]
except AttributeError:
return mpl.cm.get_cmap(name)
| 18,918 |
|
mwaskom/seaborn
|
a47b97e4b98c809db55cbd283de21acba89fe186
|
seaborn/_compat.py
|
register_colormap
|
(name, cmap)
|
Handle changes to matplotlib colormap interface in 3.6.
|
Handle changes to matplotlib colormap interface in 3.6.
| 138 | 144 |
def register_colormap(name, cmap):
"""Handle changes to matplotlib colormap interface in 3.6."""
try:
if name not in mpl.colormaps:
mpl.colormaps.register(cmap, name=name)
except AttributeError:
mpl.cm.register_cmap(name, cmap)
|
https://github.com/mwaskom/seaborn/blob/a47b97e4b98c809db55cbd283de21acba89fe186/project26/seaborn/_compat.py#L138-L144
| 26 |
[
0,
1,
2,
3,
4
] | 71.428571 |
[
5,
6
] | 28.571429 | false | 26.470588 | 7 | 3 | 71.428571 | 1 |
def register_colormap(name, cmap):
try:
if name not in mpl.colormaps:
mpl.colormaps.register(cmap, name=name)
except AttributeError:
mpl.cm.register_cmap(name, cmap)
| 18,919 |
|
mwaskom/seaborn
|
a47b97e4b98c809db55cbd283de21acba89fe186
|
seaborn/_compat.py
|
set_layout_engine
|
(fig, engine)
|
Handle changes to auto layout engine interface in 3.6
|
Handle changes to auto layout engine interface in 3.6
| 147 | 159 |
def set_layout_engine(fig, engine):
"""Handle changes to auto layout engine interface in 3.6"""
if hasattr(fig, "set_layout_engine"):
fig.set_layout_engine(engine)
else:
# _version_predates(mpl, 3.6)
if engine == "tight":
fig.set_tight_layout(True)
elif engine == "constrained":
fig.set_constrained_layout(True)
elif engine == "none":
fig.set_tight_layout(False)
fig.set_constrained_layout(False)
|
https://github.com/mwaskom/seaborn/blob/a47b97e4b98c809db55cbd283de21acba89fe186/project26/seaborn/_compat.py#L147-L159
| 26 |
[
0,
1,
2,
3,
4,
5
] | 46.153846 |
[
6,
7,
8,
9,
10,
11,
12
] | 53.846154 | false | 26.470588 | 13 | 5 | 46.153846 | 1 |
def set_layout_engine(fig, engine):
if hasattr(fig, "set_layout_engine"):
fig.set_layout_engine(engine)
else:
# _version_predates(mpl, 3.6)
if engine == "tight":
fig.set_tight_layout(True)
elif engine == "constrained":
fig.set_constrained_layout(True)
elif engine == "none":
fig.set_tight_layout(False)
fig.set_constrained_layout(False)
| 18,920 |
|
mwaskom/seaborn
|
a47b97e4b98c809db55cbd283de21acba89fe186
|
seaborn/_compat.py
|
share_axis
|
(ax0, ax1, which)
|
Handle changes to post-hoc axis sharing.
|
Handle changes to post-hoc axis sharing.
| 162 | 168 |
def share_axis(ax0, ax1, which):
"""Handle changes to post-hoc axis sharing."""
if _version_predates(mpl, "3.5"):
group = getattr(ax0, f"get_shared_{which}_axes")()
group.join(ax1, ax0)
else:
getattr(ax1, f"share{which}")(ax0)
|
https://github.com/mwaskom/seaborn/blob/a47b97e4b98c809db55cbd283de21acba89fe186/project26/seaborn/_compat.py#L162-L168
| 26 |
[
0,
1,
2,
5,
6
] | 71.428571 |
[
3,
4
] | 28.571429 | false | 26.470588 | 7 | 2 | 71.428571 | 1 |
def share_axis(ax0, ax1, which):
if _version_predates(mpl, "3.5"):
group = getattr(ax0, f"get_shared_{which}_axes")()
group.join(ax1, ax0)
else:
getattr(ax1, f"share{which}")(ax0)
| 18,921 |
|
mwaskom/seaborn
|
a47b97e4b98c809db55cbd283de21acba89fe186
|
seaborn/_statistics.py
|
_percentile_interval
|
(data, width)
|
return np.nanpercentile(data, percentiles)
|
Return a percentile interval from data of a given width.
|
Return a percentile interval from data of a given width.
| 519 | 523 |
def _percentile_interval(data, width):
"""Return a percentile interval from data of a given width."""
edge = (100 - width) / 2
percentiles = edge, 100 - edge
return np.nanpercentile(data, percentiles)
|
https://github.com/mwaskom/seaborn/blob/a47b97e4b98c809db55cbd283de21acba89fe186/project26/seaborn/_statistics.py#L519-L523
| 26 |
[
0,
1,
2,
3,
4
] | 100 |
[] | 0 | true | 96.212121 | 5 | 1 | 100 | 1 |
def _percentile_interval(data, width):
edge = (100 - width) / 2
percentiles = edge, 100 - edge
return np.nanpercentile(data, percentiles)
| 18,922 |
mwaskom/seaborn
|
a47b97e4b98c809db55cbd283de21acba89fe186
|
seaborn/_statistics.py
|
_validate_errorbar_arg
|
(arg)
|
return method, level
|
Check type and value of errorbar argument and assign default level.
|
Check type and value of errorbar argument and assign default level.
| 526 | 554 |
def _validate_errorbar_arg(arg):
"""Check type and value of errorbar argument and assign default level."""
DEFAULT_LEVELS = {
"ci": 95,
"pi": 95,
"se": 1,
"sd": 1,
}
usage = "`errorbar` must be a callable, string, or (string, number) tuple"
if arg is None:
return None, None
elif callable(arg):
return arg, None
elif isinstance(arg, str):
method = arg
level = DEFAULT_LEVELS.get(method, None)
else:
try:
method, level = arg
except (ValueError, TypeError) as err:
raise err.__class__(usage) from err
_check_argument("errorbar", list(DEFAULT_LEVELS), method)
if level is not None and not isinstance(level, Number):
raise TypeError(usage)
return method, level
|
https://github.com/mwaskom/seaborn/blob/a47b97e4b98c809db55cbd283de21acba89fe186/project26/seaborn/_statistics.py#L526-L554
| 26 |
[
0,
1,
2,
3,
4,
5,
6,
7,
8,
9,
10,
11,
12,
13,
14,
15,
16,
17,
18,
19,
20,
21,
22,
23,
24,
25,
26,
27,
28
] | 100 |
[] | 0 | true | 96.212121 | 29 | 7 | 100 | 1 |
def _validate_errorbar_arg(arg):
DEFAULT_LEVELS = {
"ci": 95,
"pi": 95,
"se": 1,
"sd": 1,
}
usage = "`errorbar` must be a callable, string, or (string, number) tuple"
if arg is None:
return None, None
elif callable(arg):
return arg, None
elif isinstance(arg, str):
method = arg
level = DEFAULT_LEVELS.get(method, None)
else:
try:
method, level = arg
except (ValueError, TypeError) as err:
raise err.__class__(usage) from err
_check_argument("errorbar", list(DEFAULT_LEVELS), method)
if level is not None and not isinstance(level, Number):
raise TypeError(usage)
return method, level
| 18,923 |
mwaskom/seaborn
|
a47b97e4b98c809db55cbd283de21acba89fe186
|
seaborn/_statistics.py
|
KDE.__init__
|
(
self, *,
bw_method=None,
bw_adjust=1,
gridsize=200,
cut=3,
clip=None,
cumulative=False,
)
|
Initialize the estimator with its parameters.
Parameters
----------
bw_method : string, scalar, or callable, optional
Method for determining the smoothing bandwidth to use; passed to
:class:`scipy.stats.gaussian_kde`.
bw_adjust : number, optional
Factor that multiplicatively scales the value chosen using
``bw_method``. Increasing will make the curve smoother. See Notes.
gridsize : int, optional
Number of points on each dimension of the evaluation grid.
cut : number, optional
Factor, multiplied by the smoothing bandwidth, that determines how
far the evaluation grid extends past the extreme datapoints. When
set to 0, truncate the curve at the data limits.
clip : pair of numbers or None, or a pair of such pairs
Do not evaluate the density outside of these limits.
cumulative : bool, optional
If True, estimate a cumulative distribution function. Requires scipy.
|
Initialize the estimator with its parameters.
| 43 | 87 |
def __init__(
self, *,
bw_method=None,
bw_adjust=1,
gridsize=200,
cut=3,
clip=None,
cumulative=False,
):
"""Initialize the estimator with its parameters.
Parameters
----------
bw_method : string, scalar, or callable, optional
Method for determining the smoothing bandwidth to use; passed to
:class:`scipy.stats.gaussian_kde`.
bw_adjust : number, optional
Factor that multiplicatively scales the value chosen using
``bw_method``. Increasing will make the curve smoother. See Notes.
gridsize : int, optional
Number of points on each dimension of the evaluation grid.
cut : number, optional
Factor, multiplied by the smoothing bandwidth, that determines how
far the evaluation grid extends past the extreme datapoints. When
set to 0, truncate the curve at the data limits.
clip : pair of numbers or None, or a pair of such pairs
Do not evaluate the density outside of these limits.
cumulative : bool, optional
If True, estimate a cumulative distribution function. Requires scipy.
"""
if clip is None:
clip = None, None
self.bw_method = bw_method
self.bw_adjust = bw_adjust
self.gridsize = gridsize
self.cut = cut
self.clip = clip
self.cumulative = cumulative
if cumulative and _no_scipy:
raise RuntimeError("Cumulative KDE evaluation requires scipy")
self.support = None
|
https://github.com/mwaskom/seaborn/blob/a47b97e4b98c809db55cbd283de21acba89fe186/project26/seaborn/_statistics.py#L43-L87
| 26 |
[
0,
30,
31,
32,
33,
34,
35,
36,
37,
38,
39,
40,
41,
42,
43,
44
] | 35.555556 |
[] | 0 | false | 96.212121 | 45 | 4 | 100 | 20 |
def __init__(
self, *,
bw_method=None,
bw_adjust=1,
gridsize=200,
cut=3,
clip=None,
cumulative=False,
):
if clip is None:
clip = None, None
self.bw_method = bw_method
self.bw_adjust = bw_adjust
self.gridsize = gridsize
self.cut = cut
self.clip = clip
self.cumulative = cumulative
if cumulative and _no_scipy:
raise RuntimeError("Cumulative KDE evaluation requires scipy")
self.support = None
| 18,924 |
|
mwaskom/seaborn
|
a47b97e4b98c809db55cbd283de21acba89fe186
|
seaborn/_statistics.py
|
KDE._define_support_grid
|
(self, x, bw, cut, clip, gridsize)
|
return np.linspace(gridmin, gridmax, gridsize)
|
Create the grid of evaluation points depending for vector x.
|
Create the grid of evaluation points depending for vector x.
| 89 | 95 |
def _define_support_grid(self, x, bw, cut, clip, gridsize):
"""Create the grid of evaluation points depending for vector x."""
clip_lo = -np.inf if clip[0] is None else clip[0]
clip_hi = +np.inf if clip[1] is None else clip[1]
gridmin = max(x.min() - bw * cut, clip_lo)
gridmax = min(x.max() + bw * cut, clip_hi)
return np.linspace(gridmin, gridmax, gridsize)
|
https://github.com/mwaskom/seaborn/blob/a47b97e4b98c809db55cbd283de21acba89fe186/project26/seaborn/_statistics.py#L89-L95
| 26 |
[
0,
1,
2,
3,
4,
5,
6
] | 100 |
[] | 0 | true | 96.212121 | 7 | 1 | 100 | 1 |
def _define_support_grid(self, x, bw, cut, clip, gridsize):
clip_lo = -np.inf if clip[0] is None else clip[0]
clip_hi = +np.inf if clip[1] is None else clip[1]
gridmin = max(x.min() - bw * cut, clip_lo)
gridmax = min(x.max() + bw * cut, clip_hi)
return np.linspace(gridmin, gridmax, gridsize)
| 18,925 |
mwaskom/seaborn
|
a47b97e4b98c809db55cbd283de21acba89fe186
|
seaborn/_statistics.py
|
KDE._define_support_univariate
|
(self, x, weights)
|
return grid
|
Create a 1D grid of evaluation points.
|
Create a 1D grid of evaluation points.
| 97 | 104 |
def _define_support_univariate(self, x, weights):
"""Create a 1D grid of evaluation points."""
kde = self._fit(x, weights)
bw = np.sqrt(kde.covariance.squeeze())
grid = self._define_support_grid(
x, bw, self.cut, self.clip, self.gridsize
)
return grid
|
https://github.com/mwaskom/seaborn/blob/a47b97e4b98c809db55cbd283de21acba89fe186/project26/seaborn/_statistics.py#L97-L104
| 26 |
[
0,
1,
2,
3,
4,
5,
6,
7
] | 100 |
[] | 0 | true | 96.212121 | 8 | 1 | 100 | 1 |
def _define_support_univariate(self, x, weights):
kde = self._fit(x, weights)
bw = np.sqrt(kde.covariance.squeeze())
grid = self._define_support_grid(
x, bw, self.cut, self.clip, self.gridsize
)
return grid
| 18,926 |
mwaskom/seaborn
|
a47b97e4b98c809db55cbd283de21acba89fe186
|
seaborn/_statistics.py
|
KDE._define_support_bivariate
|
(self, x1, x2, weights)
|
return grid1, grid2
|
Create a 2D grid of evaluation points.
|
Create a 2D grid of evaluation points.
| 106 | 122 |
def _define_support_bivariate(self, x1, x2, weights):
"""Create a 2D grid of evaluation points."""
clip = self.clip
if clip[0] is None or np.isscalar(clip[0]):
clip = (clip, clip)
kde = self._fit([x1, x2], weights)
bw = np.sqrt(np.diag(kde.covariance).squeeze())
grid1 = self._define_support_grid(
x1, bw[0], self.cut, clip[0], self.gridsize
)
grid2 = self._define_support_grid(
x2, bw[1], self.cut, clip[1], self.gridsize
)
return grid1, grid2
|
https://github.com/mwaskom/seaborn/blob/a47b97e4b98c809db55cbd283de21acba89fe186/project26/seaborn/_statistics.py#L106-L122
| 26 |
[
0,
1,
2,
3,
4,
5,
6,
7,
8,
9,
10,
11,
12,
13,
14,
15,
16
] | 100 |
[] | 0 | true | 96.212121 | 17 | 3 | 100 | 1 |
def _define_support_bivariate(self, x1, x2, weights):
clip = self.clip
if clip[0] is None or np.isscalar(clip[0]):
clip = (clip, clip)
kde = self._fit([x1, x2], weights)
bw = np.sqrt(np.diag(kde.covariance).squeeze())
grid1 = self._define_support_grid(
x1, bw[0], self.cut, clip[0], self.gridsize
)
grid2 = self._define_support_grid(
x2, bw[1], self.cut, clip[1], self.gridsize
)
return grid1, grid2
| 18,927 |
mwaskom/seaborn
|
a47b97e4b98c809db55cbd283de21acba89fe186
|
seaborn/_statistics.py
|
KDE.define_support
|
(self, x1, x2=None, weights=None, cache=True)
|
return support
|
Create the evaluation grid for a given data set.
|
Create the evaluation grid for a given data set.
| 124 | 134 |
def define_support(self, x1, x2=None, weights=None, cache=True):
"""Create the evaluation grid for a given data set."""
if x2 is None:
support = self._define_support_univariate(x1, weights)
else:
support = self._define_support_bivariate(x1, x2, weights)
if cache:
self.support = support
return support
|
https://github.com/mwaskom/seaborn/blob/a47b97e4b98c809db55cbd283de21acba89fe186/project26/seaborn/_statistics.py#L124-L134
| 26 |
[
0,
1,
2,
3,
4,
5,
6,
7,
8,
9,
10
] | 100 |
[] | 0 | true | 96.212121 | 11 | 3 | 100 | 1 |
def define_support(self, x1, x2=None, weights=None, cache=True):
if x2 is None:
support = self._define_support_univariate(x1, weights)
else:
support = self._define_support_bivariate(x1, x2, weights)
if cache:
self.support = support
return support
| 18,928 |
mwaskom/seaborn
|
a47b97e4b98c809db55cbd283de21acba89fe186
|
seaborn/_statistics.py
|
KDE._fit
|
(self, fit_data, weights=None)
|
return kde
|
Fit the scipy kde while adding bw_adjust logic and version check.
|
Fit the scipy kde while adding bw_adjust logic and version check.
| 136 | 145 |
def _fit(self, fit_data, weights=None):
"""Fit the scipy kde while adding bw_adjust logic and version check."""
fit_kws = {"bw_method": self.bw_method}
if weights is not None:
fit_kws["weights"] = weights
kde = gaussian_kde(fit_data, **fit_kws)
kde.set_bandwidth(kde.factor * self.bw_adjust)
return kde
|
https://github.com/mwaskom/seaborn/blob/a47b97e4b98c809db55cbd283de21acba89fe186/project26/seaborn/_statistics.py#L136-L145
| 26 |
[
0,
1,
2,
3,
4,
5,
6,
7,
8,
9
] | 100 |
[] | 0 | true | 96.212121 | 10 | 2 | 100 | 1 |
def _fit(self, fit_data, weights=None):
fit_kws = {"bw_method": self.bw_method}
if weights is not None:
fit_kws["weights"] = weights
kde = gaussian_kde(fit_data, **fit_kws)
kde.set_bandwidth(kde.factor * self.bw_adjust)
return kde
| 18,929 |
mwaskom/seaborn
|
a47b97e4b98c809db55cbd283de21acba89fe186
|
seaborn/_statistics.py
|
KDE._eval_univariate
|
(self, x, weights=None)
|
return density, support
|
Fit and evaluate a univariate on univariate data.
|
Fit and evaluate a univariate on univariate data.
| 147 | 163 |
def _eval_univariate(self, x, weights=None):
"""Fit and evaluate a univariate on univariate data."""
support = self.support
if support is None:
support = self.define_support(x, cache=False)
kde = self._fit(x, weights)
if self.cumulative:
s_0 = support[0]
density = np.array([
kde.integrate_box_1d(s_0, s_i) for s_i in support
])
else:
density = kde(support)
return density, support
|
https://github.com/mwaskom/seaborn/blob/a47b97e4b98c809db55cbd283de21acba89fe186/project26/seaborn/_statistics.py#L147-L163
| 26 |
[
0,
1,
2,
3,
4,
5,
6,
7,
8,
13,
14,
15,
16
] | 76.470588 |
[
9,
10
] | 11.764706 | false | 96.212121 | 17 | 4 | 88.235294 | 1 |
def _eval_univariate(self, x, weights=None):
support = self.support
if support is None:
support = self.define_support(x, cache=False)
kde = self._fit(x, weights)
if self.cumulative:
s_0 = support[0]
density = np.array([
kde.integrate_box_1d(s_0, s_i) for s_i in support
])
else:
density = kde(support)
return density, support
| 18,930 |
mwaskom/seaborn
|
a47b97e4b98c809db55cbd283de21acba89fe186
|
seaborn/_statistics.py
|
KDE._eval_bivariate
|
(self, x1, x2, weights=None)
|
return density, support
|
Fit and evaluate a univariate on bivariate data.
|
Fit and evaluate a univariate on bivariate data.
| 165 | 187 |
def _eval_bivariate(self, x1, x2, weights=None):
"""Fit and evaluate a univariate on bivariate data."""
support = self.support
if support is None:
support = self.define_support(x1, x2, cache=False)
kde = self._fit([x1, x2], weights)
if self.cumulative:
grid1, grid2 = support
density = np.zeros((grid1.size, grid2.size))
p0 = grid1.min(), grid2.min()
for i, xi in enumerate(grid1):
for j, xj in enumerate(grid2):
density[i, j] = kde.integrate_box(p0, (xi, xj))
else:
xx1, xx2 = np.meshgrid(*support)
density = kde([xx1.ravel(), xx2.ravel()]).reshape(xx1.shape)
return density, support
|
https://github.com/mwaskom/seaborn/blob/a47b97e4b98c809db55cbd283de21acba89fe186/project26/seaborn/_statistics.py#L165-L187
| 26 |
[
0,
1,
2,
3,
4,
5,
6,
7,
8,
9,
18,
19,
20,
21,
22
] | 65.217391 |
[
10,
11,
12,
13,
14,
15
] | 26.086957 | false | 96.212121 | 23 | 5 | 73.913043 | 1 |
def _eval_bivariate(self, x1, x2, weights=None):
support = self.support
if support is None:
support = self.define_support(x1, x2, cache=False)
kde = self._fit([x1, x2], weights)
if self.cumulative:
grid1, grid2 = support
density = np.zeros((grid1.size, grid2.size))
p0 = grid1.min(), grid2.min()
for i, xi in enumerate(grid1):
for j, xj in enumerate(grid2):
density[i, j] = kde.integrate_box(p0, (xi, xj))
else:
xx1, xx2 = np.meshgrid(*support)
density = kde([xx1.ravel(), xx2.ravel()]).reshape(xx1.shape)
return density, support
| 18,931 |
mwaskom/seaborn
|
a47b97e4b98c809db55cbd283de21acba89fe186
|
seaborn/_statistics.py
|
KDE.__call__
|
(self, x1, x2=None, weights=None)
|
Fit and evaluate on univariate or bivariate data.
|
Fit and evaluate on univariate or bivariate data.
| 189 | 194 |
def __call__(self, x1, x2=None, weights=None):
"""Fit and evaluate on univariate or bivariate data."""
if x2 is None:
return self._eval_univariate(x1, weights)
else:
return self._eval_bivariate(x1, x2, weights)
|
https://github.com/mwaskom/seaborn/blob/a47b97e4b98c809db55cbd283de21acba89fe186/project26/seaborn/_statistics.py#L189-L194
| 26 |
[
0,
1,
2,
3,
4,
5
] | 100 |
[] | 0 | true | 96.212121 | 6 | 2 | 100 | 1 |
def __call__(self, x1, x2=None, weights=None):
if x2 is None:
return self._eval_univariate(x1, weights)
else:
return self._eval_bivariate(x1, x2, weights)
| 18,932 |
|
mwaskom/seaborn
|
a47b97e4b98c809db55cbd283de21acba89fe186
|
seaborn/_statistics.py
|
Histogram.__init__
|
(
self,
stat="count",
bins="auto",
binwidth=None,
binrange=None,
discrete=False,
cumulative=False,
)
|
Initialize the estimator with its parameters.
Parameters
----------
stat : str
Aggregate statistic to compute in each bin.
- `count`: show the number of observations in each bin
- `frequency`: show the number of observations divided by the bin width
- `probability` or `proportion`: normalize such that bar heights sum to 1
- `percent`: normalize such that bar heights sum to 100
- `density`: normalize such that the total area of the histogram equals 1
bins : str, number, vector, or a pair of such values
Generic bin parameter that can be the name of a reference rule,
the number of bins, or the breaks of the bins.
Passed to :func:`numpy.histogram_bin_edges`.
binwidth : number or pair of numbers
Width of each bin, overrides ``bins`` but can be used with
``binrange``.
binrange : pair of numbers or a pair of pairs
Lowest and highest value for bin edges; can be used either
with ``bins`` or ``binwidth``. Defaults to data extremes.
discrete : bool or pair of bools
If True, set ``binwidth`` and ``binrange`` such that bin
edges cover integer values in the dataset.
cumulative : bool
If True, return the cumulative statistic.
|
Initialize the estimator with its parameters.
| 201 | 252 |
def __init__(
self,
stat="count",
bins="auto",
binwidth=None,
binrange=None,
discrete=False,
cumulative=False,
):
"""Initialize the estimator with its parameters.
Parameters
----------
stat : str
Aggregate statistic to compute in each bin.
- `count`: show the number of observations in each bin
- `frequency`: show the number of observations divided by the bin width
- `probability` or `proportion`: normalize such that bar heights sum to 1
- `percent`: normalize such that bar heights sum to 100
- `density`: normalize such that the total area of the histogram equals 1
bins : str, number, vector, or a pair of such values
Generic bin parameter that can be the name of a reference rule,
the number of bins, or the breaks of the bins.
Passed to :func:`numpy.histogram_bin_edges`.
binwidth : number or pair of numbers
Width of each bin, overrides ``bins`` but can be used with
``binrange``.
binrange : pair of numbers or a pair of pairs
Lowest and highest value for bin edges; can be used either
with ``bins`` or ``binwidth``. Defaults to data extremes.
discrete : bool or pair of bools
If True, set ``binwidth`` and ``binrange`` such that bin
edges cover integer values in the dataset.
cumulative : bool
If True, return the cumulative statistic.
"""
stat_choices = [
"count", "frequency", "density", "probability", "proportion", "percent",
]
_check_argument("stat", stat_choices, stat)
self.stat = stat
self.bins = bins
self.binwidth = binwidth
self.binrange = binrange
self.discrete = discrete
self.cumulative = cumulative
self.bin_kws = None
|
https://github.com/mwaskom/seaborn/blob/a47b97e4b98c809db55cbd283de21acba89fe186/project26/seaborn/_statistics.py#L201-L252
| 26 |
[
0,
38,
39,
40,
41,
42,
43,
44,
45,
46,
47,
48,
49,
50,
51
] | 28.846154 |
[] | 0 | false | 96.212121 | 52 | 1 | 100 | 28 |
def __init__(
self,
stat="count",
bins="auto",
binwidth=None,
binrange=None,
discrete=False,
cumulative=False,
):
stat_choices = [
"count", "frequency", "density", "probability", "proportion", "percent",
]
_check_argument("stat", stat_choices, stat)
self.stat = stat
self.bins = bins
self.binwidth = binwidth
self.binrange = binrange
self.discrete = discrete
self.cumulative = cumulative
self.bin_kws = None
| 18,933 |
|
mwaskom/seaborn
|
a47b97e4b98c809db55cbd283de21acba89fe186
|
seaborn/_statistics.py
|
Histogram._define_bin_edges
|
(self, x, weights, bins, binwidth, binrange, discrete)
|
return bin_edges
|
Inner function that takes bin parameters as arguments.
|
Inner function that takes bin parameters as arguments.
| 254 | 273 |
def _define_bin_edges(self, x, weights, bins, binwidth, binrange, discrete):
"""Inner function that takes bin parameters as arguments."""
if binrange is None:
start, stop = x.min(), x.max()
else:
start, stop = binrange
if discrete:
bin_edges = np.arange(start - .5, stop + 1.5)
elif binwidth is not None:
step = binwidth
bin_edges = np.arange(start, stop + step, step)
# Handle roundoff error (maybe there is a less clumsy way?)
if bin_edges.max() < stop or len(bin_edges) < 2:
bin_edges = np.append(bin_edges, bin_edges.max() + step)
else:
bin_edges = np.histogram_bin_edges(
x, bins, binrange, weights,
)
return bin_edges
|
https://github.com/mwaskom/seaborn/blob/a47b97e4b98c809db55cbd283de21acba89fe186/project26/seaborn/_statistics.py#L254-L273
| 26 |
[
0,
1,
2,
3,
4,
5,
6,
7,
8,
9,
10,
11,
12,
13,
14,
15,
16,
17,
18,
19
] | 100 |
[] | 0 | true | 96.212121 | 20 | 6 | 100 | 1 |
def _define_bin_edges(self, x, weights, bins, binwidth, binrange, discrete):
if binrange is None:
start, stop = x.min(), x.max()
else:
start, stop = binrange
if discrete:
bin_edges = np.arange(start - .5, stop + 1.5)
elif binwidth is not None:
step = binwidth
bin_edges = np.arange(start, stop + step, step)
# Handle roundoff error (maybe there is a less clumsy way?)
if bin_edges.max() < stop or len(bin_edges) < 2:
bin_edges = np.append(bin_edges, bin_edges.max() + step)
else:
bin_edges = np.histogram_bin_edges(
x, bins, binrange, weights,
)
return bin_edges
| 18,934 |
mwaskom/seaborn
|
a47b97e4b98c809db55cbd283de21acba89fe186
|
seaborn/_statistics.py
|
Histogram.define_bin_params
|
(self, x1, x2=None, weights=None, cache=True)
|
return bin_kws
|
Given data, return numpy.histogram parameters to define bins.
|
Given data, return numpy.histogram parameters to define bins.
| 275 | 333 |
def define_bin_params(self, x1, x2=None, weights=None, cache=True):
"""Given data, return numpy.histogram parameters to define bins."""
if x2 is None:
bin_edges = self._define_bin_edges(
x1, weights, self.bins, self.binwidth, self.binrange, self.discrete,
)
if isinstance(self.bins, (str, Number)):
n_bins = len(bin_edges) - 1
bin_range = bin_edges.min(), bin_edges.max()
bin_kws = dict(bins=n_bins, range=bin_range)
else:
bin_kws = dict(bins=bin_edges)
else:
bin_edges = []
for i, x in enumerate([x1, x2]):
# Resolve out whether bin parameters are shared
# or specific to each variable
bins = self.bins
if not bins or isinstance(bins, (str, Number)):
pass
elif isinstance(bins[i], str):
bins = bins[i]
elif len(bins) == 2:
bins = bins[i]
binwidth = self.binwidth
if binwidth is None:
pass
elif not isinstance(binwidth, Number):
binwidth = binwidth[i]
binrange = self.binrange
if binrange is None:
pass
elif not isinstance(binrange[0], Number):
binrange = binrange[i]
discrete = self.discrete
if not isinstance(discrete, bool):
discrete = discrete[i]
# Define the bins for this variable
bin_edges.append(self._define_bin_edges(
x, weights, bins, binwidth, binrange, discrete,
))
bin_kws = dict(bins=tuple(bin_edges))
if cache:
self.bin_kws = bin_kws
return bin_kws
|
https://github.com/mwaskom/seaborn/blob/a47b97e4b98c809db55cbd283de21acba89fe186/project26/seaborn/_statistics.py#L275-L333
| 26 |
[
0,
1,
2,
3,
4,
5,
6,
7,
8,
9,
10,
11,
12,
13,
14,
15,
16,
17,
18,
19,
20,
21,
22,
23,
24,
25,
26,
27,
28,
29,
30,
31,
32,
33,
34,
35,
36,
37,
38,
39,
40,
41,
42,
43,
44,
45,
46,
47,
48,
49,
50,
51,
52,
53,
54,
55,
56,
57,
58
] | 100 |
[] | 0 | true | 96.212121 | 59 | 14 | 100 | 1 |
def define_bin_params(self, x1, x2=None, weights=None, cache=True):
if x2 is None:
bin_edges = self._define_bin_edges(
x1, weights, self.bins, self.binwidth, self.binrange, self.discrete,
)
if isinstance(self.bins, (str, Number)):
n_bins = len(bin_edges) - 1
bin_range = bin_edges.min(), bin_edges.max()
bin_kws = dict(bins=n_bins, range=bin_range)
else:
bin_kws = dict(bins=bin_edges)
else:
bin_edges = []
for i, x in enumerate([x1, x2]):
# Resolve out whether bin parameters are shared
# or specific to each variable
bins = self.bins
if not bins or isinstance(bins, (str, Number)):
pass
elif isinstance(bins[i], str):
bins = bins[i]
elif len(bins) == 2:
bins = bins[i]
binwidth = self.binwidth
if binwidth is None:
pass
elif not isinstance(binwidth, Number):
binwidth = binwidth[i]
binrange = self.binrange
if binrange is None:
pass
elif not isinstance(binrange[0], Number):
binrange = binrange[i]
discrete = self.discrete
if not isinstance(discrete, bool):
discrete = discrete[i]
# Define the bins for this variable
bin_edges.append(self._define_bin_edges(
x, weights, bins, binwidth, binrange, discrete,
))
bin_kws = dict(bins=tuple(bin_edges))
if cache:
self.bin_kws = bin_kws
return bin_kws
| 18,935 |
mwaskom/seaborn
|
a47b97e4b98c809db55cbd283de21acba89fe186
|
seaborn/_statistics.py
|
Histogram._eval_bivariate
|
(self, x1, x2, weights)
|
return hist, bin_edges
|
Inner function for histogram of two variables.
|
Inner function for histogram of two variables.
| 335 | 365 |
def _eval_bivariate(self, x1, x2, weights):
"""Inner function for histogram of two variables."""
bin_kws = self.bin_kws
if bin_kws is None:
bin_kws = self.define_bin_params(x1, x2, cache=False)
density = self.stat == "density"
hist, *bin_edges = np.histogram2d(
x1, x2, **bin_kws, weights=weights, density=density
)
area = np.outer(
np.diff(bin_edges[0]),
np.diff(bin_edges[1]),
)
if self.stat == "probability" or self.stat == "proportion":
hist = hist.astype(float) / hist.sum()
elif self.stat == "percent":
hist = hist.astype(float) / hist.sum() * 100
elif self.stat == "frequency":
hist = hist.astype(float) / area
if self.cumulative:
if self.stat in ["density", "frequency"]:
hist = (hist * area).cumsum(axis=0).cumsum(axis=1)
else:
hist = hist.cumsum(axis=0).cumsum(axis=1)
return hist, bin_edges
|
https://github.com/mwaskom/seaborn/blob/a47b97e4b98c809db55cbd283de21acba89fe186/project26/seaborn/_statistics.py#L335-L365
| 26 |
[
0,
1,
2,
3,
4,
5,
6,
7,
8,
9,
10,
11,
12,
13,
14,
15,
16,
17,
18,
19,
20,
21,
22,
23,
24,
25,
26,
27,
28,
29,
30
] | 100 |
[] | 0 | true | 96.212121 | 31 | 8 | 100 | 1 |
def _eval_bivariate(self, x1, x2, weights):
bin_kws = self.bin_kws
if bin_kws is None:
bin_kws = self.define_bin_params(x1, x2, cache=False)
density = self.stat == "density"
hist, *bin_edges = np.histogram2d(
x1, x2, **bin_kws, weights=weights, density=density
)
area = np.outer(
np.diff(bin_edges[0]),
np.diff(bin_edges[1]),
)
if self.stat == "probability" or self.stat == "proportion":
hist = hist.astype(float) / hist.sum()
elif self.stat == "percent":
hist = hist.astype(float) / hist.sum() * 100
elif self.stat == "frequency":
hist = hist.astype(float) / area
if self.cumulative:
if self.stat in ["density", "frequency"]:
hist = (hist * area).cumsum(axis=0).cumsum(axis=1)
else:
hist = hist.cumsum(axis=0).cumsum(axis=1)
return hist, bin_edges
| 18,936 |
mwaskom/seaborn
|
a47b97e4b98c809db55cbd283de21acba89fe186
|
seaborn/_statistics.py
|
Histogram._eval_univariate
|
(self, x, weights)
|
return hist, bin_edges
|
Inner function for histogram of one variable.
|
Inner function for histogram of one variable.
| 367 | 391 |
def _eval_univariate(self, x, weights):
"""Inner function for histogram of one variable."""
bin_kws = self.bin_kws
if bin_kws is None:
bin_kws = self.define_bin_params(x, weights=weights, cache=False)
density = self.stat == "density"
hist, bin_edges = np.histogram(
x, **bin_kws, weights=weights, density=density,
)
if self.stat == "probability" or self.stat == "proportion":
hist = hist.astype(float) / hist.sum()
elif self.stat == "percent":
hist = hist.astype(float) / hist.sum() * 100
elif self.stat == "frequency":
hist = hist.astype(float) / np.diff(bin_edges)
if self.cumulative:
if self.stat in ["density", "frequency"]:
hist = (hist * np.diff(bin_edges)).cumsum()
else:
hist = hist.cumsum()
return hist, bin_edges
|
https://github.com/mwaskom/seaborn/blob/a47b97e4b98c809db55cbd283de21acba89fe186/project26/seaborn/_statistics.py#L367-L391
| 26 |
[
0,
1,
2,
3,
4,
5,
6,
7,
8,
9,
10,
11,
12,
13,
15,
16,
17,
18,
19,
20,
21,
22,
23,
24
] | 96 |
[
14
] | 4 | false | 96.212121 | 25 | 8 | 96 | 1 |
def _eval_univariate(self, x, weights):
bin_kws = self.bin_kws
if bin_kws is None:
bin_kws = self.define_bin_params(x, weights=weights, cache=False)
density = self.stat == "density"
hist, bin_edges = np.histogram(
x, **bin_kws, weights=weights, density=density,
)
if self.stat == "probability" or self.stat == "proportion":
hist = hist.astype(float) / hist.sum()
elif self.stat == "percent":
hist = hist.astype(float) / hist.sum() * 100
elif self.stat == "frequency":
hist = hist.astype(float) / np.diff(bin_edges)
if self.cumulative:
if self.stat in ["density", "frequency"]:
hist = (hist * np.diff(bin_edges)).cumsum()
else:
hist = hist.cumsum()
return hist, bin_edges
| 18,937 |
mwaskom/seaborn
|
a47b97e4b98c809db55cbd283de21acba89fe186
|
seaborn/_statistics.py
|
Histogram.__call__
|
(self, x1, x2=None, weights=None)
|
Count the occurrences in each bin, maybe normalize.
|
Count the occurrences in each bin, maybe normalize.
| 393 | 398 |
def __call__(self, x1, x2=None, weights=None):
"""Count the occurrences in each bin, maybe normalize."""
if x2 is None:
return self._eval_univariate(x1, weights)
else:
return self._eval_bivariate(x1, x2, weights)
|
https://github.com/mwaskom/seaborn/blob/a47b97e4b98c809db55cbd283de21acba89fe186/project26/seaborn/_statistics.py#L393-L398
| 26 |
[
0,
1,
2,
3,
4,
5
] | 100 |
[] | 0 | true | 96.212121 | 6 | 2 | 100 | 1 |
def __call__(self, x1, x2=None, weights=None):
if x2 is None:
return self._eval_univariate(x1, weights)
else:
return self._eval_bivariate(x1, x2, weights)
| 18,938 |
|
mwaskom/seaborn
|
a47b97e4b98c809db55cbd283de21acba89fe186
|
seaborn/_statistics.py
|
ECDF.__init__
|
(self, stat="proportion", complementary=False)
|
Initialize the class with its parameters
Parameters
----------
stat : {{"proportion", "count"}}
Distribution statistic to compute.
complementary : bool
If True, use the complementary CDF (1 - CDF)
|
Initialize the class with its parameters
| 403 | 416 |
def __init__(self, stat="proportion", complementary=False):
"""Initialize the class with its parameters
Parameters
----------
stat : {{"proportion", "count"}}
Distribution statistic to compute.
complementary : bool
If True, use the complementary CDF (1 - CDF)
"""
_check_argument("stat", ["count", "proportion"], stat)
self.stat = stat
self.complementary = complementary
|
https://github.com/mwaskom/seaborn/blob/a47b97e4b98c809db55cbd283de21acba89fe186/project26/seaborn/_statistics.py#L403-L416
| 26 |
[
0,
1,
2,
3,
4,
5,
6,
7,
8,
9,
10,
11,
12,
13
] | 100 |
[] | 0 | true | 96.212121 | 14 | 1 | 100 | 8 |
def __init__(self, stat="proportion", complementary=False):
_check_argument("stat", ["count", "proportion"], stat)
self.stat = stat
self.complementary = complementary
| 18,939 |
|
mwaskom/seaborn
|
a47b97e4b98c809db55cbd283de21acba89fe186
|
seaborn/_statistics.py
|
ECDF._eval_bivariate
|
(self, x1, x2, weights)
|
Inner function for ECDF of two variables.
|
Inner function for ECDF of two variables.
| 418 | 420 |
def _eval_bivariate(self, x1, x2, weights):
"""Inner function for ECDF of two variables."""
raise NotImplementedError("Bivariate ECDF is not implemented")
|
https://github.com/mwaskom/seaborn/blob/a47b97e4b98c809db55cbd283de21acba89fe186/project26/seaborn/_statistics.py#L418-L420
| 26 |
[
0,
1,
2
] | 150 |
[] | 0 | true | 96.212121 | 3 | 1 | 100 | 1 |
def _eval_bivariate(self, x1, x2, weights):
raise NotImplementedError("Bivariate ECDF is not implemented")
| 18,940 |
|
mwaskom/seaborn
|
a47b97e4b98c809db55cbd283de21acba89fe186
|
seaborn/_statistics.py
|
ECDF._eval_univariate
|
(self, x, weights)
|
return y, x
|
Inner function for ECDF of one variable.
|
Inner function for ECDF of one variable.
| 422 | 438 |
def _eval_univariate(self, x, weights):
"""Inner function for ECDF of one variable."""
sorter = x.argsort()
x = x[sorter]
weights = weights[sorter]
y = weights.cumsum()
if self.stat == "proportion":
y = y / y.max()
x = np.r_[-np.inf, x]
y = np.r_[0, y]
if self.complementary:
y = y.max() - y
return y, x
|
https://github.com/mwaskom/seaborn/blob/a47b97e4b98c809db55cbd283de21acba89fe186/project26/seaborn/_statistics.py#L422-L438
| 26 |
[
0,
1,
2,
3,
4,
5,
6,
7,
8,
9,
10,
11,
12,
13,
14,
15,
16
] | 100 |
[] | 0 | true | 96.212121 | 17 | 3 | 100 | 1 |
def _eval_univariate(self, x, weights):
sorter = x.argsort()
x = x[sorter]
weights = weights[sorter]
y = weights.cumsum()
if self.stat == "proportion":
y = y / y.max()
x = np.r_[-np.inf, x]
y = np.r_[0, y]
if self.complementary:
y = y.max() - y
return y, x
| 18,941 |
mwaskom/seaborn
|
a47b97e4b98c809db55cbd283de21acba89fe186
|
seaborn/_statistics.py
|
ECDF.__call__
|
(self, x1, x2=None, weights=None)
|
Return proportion or count of observations below each sorted datapoint.
|
Return proportion or count of observations below each sorted datapoint.
| 440 | 451 |
def __call__(self, x1, x2=None, weights=None):
"""Return proportion or count of observations below each sorted datapoint."""
x1 = np.asarray(x1)
if weights is None:
weights = np.ones_like(x1)
else:
weights = np.asarray(weights)
if x2 is None:
return self._eval_univariate(x1, weights)
else:
return self._eval_bivariate(x1, x2, weights)
|
https://github.com/mwaskom/seaborn/blob/a47b97e4b98c809db55cbd283de21acba89fe186/project26/seaborn/_statistics.py#L440-L451
| 26 |
[
0,
1,
2,
3,
4,
5,
6,
7,
8,
9,
10,
11
] | 100 |
[] | 0 | true | 96.212121 | 12 | 3 | 100 | 1 |
def __call__(self, x1, x2=None, weights=None):
x1 = np.asarray(x1)
if weights is None:
weights = np.ones_like(x1)
else:
weights = np.asarray(weights)
if x2 is None:
return self._eval_univariate(x1, weights)
else:
return self._eval_bivariate(x1, x2, weights)
| 18,942 |
|
mwaskom/seaborn
|
a47b97e4b98c809db55cbd283de21acba89fe186
|
seaborn/_statistics.py
|
EstimateAggregator.__init__
|
(self, estimator, errorbar=None, **boot_kws)
|
Data aggregator that produces an estimate and error bar interval.
Parameters
----------
estimator : callable or string
Function (or method name) that maps a vector to a scalar.
errorbar : string, (string, number) tuple, or callable
Name of errorbar method (either "ci", "pi", "se", or "sd"), or a tuple
with a method name and a level parameter, or a function that maps from a
vector to a (min, max) interval.
boot_kws
Additional keywords are passed to bootstrap when error_method is "ci".
|
Data aggregator that produces an estimate and error bar interval.
| 456 | 478 |
def __init__(self, estimator, errorbar=None, **boot_kws):
"""
Data aggregator that produces an estimate and error bar interval.
Parameters
----------
estimator : callable or string
Function (or method name) that maps a vector to a scalar.
errorbar : string, (string, number) tuple, or callable
Name of errorbar method (either "ci", "pi", "se", or "sd"), or a tuple
with a method name and a level parameter, or a function that maps from a
vector to a (min, max) interval.
boot_kws
Additional keywords are passed to bootstrap when error_method is "ci".
"""
self.estimator = estimator
method, level = _validate_errorbar_arg(errorbar)
self.error_method = method
self.error_level = level
self.boot_kws = boot_kws
|
https://github.com/mwaskom/seaborn/blob/a47b97e4b98c809db55cbd283de21acba89fe186/project26/seaborn/_statistics.py#L456-L478
| 26 |
[
0,
1,
2,
3,
4,
5,
6,
7,
8,
9,
10,
11,
12,
13,
14,
15,
16,
17,
18,
19,
20,
21,
22
] | 100 |
[] | 0 | true | 96.212121 | 23 | 1 | 100 | 12 |
def __init__(self, estimator, errorbar=None, **boot_kws):
self.estimator = estimator
method, level = _validate_errorbar_arg(errorbar)
self.error_method = method
self.error_level = level
self.boot_kws = boot_kws
| 18,943 |
|
mwaskom/seaborn
|
a47b97e4b98c809db55cbd283de21acba89fe186
|
seaborn/_statistics.py
|
EstimateAggregator.__call__
|
(self, data, var)
|
return pd.Series({var: estimate, f"{var}min": err_min, f"{var}max": err_max})
|
Aggregate over `var` column of `data` with estimate and error interval.
|
Aggregate over `var` column of `data` with estimate and error interval.
| 480 | 516 |
def __call__(self, data, var):
"""Aggregate over `var` column of `data` with estimate and error interval."""
vals = data[var]
if callable(self.estimator):
# You would think we could pass to vals.agg, and yet:
# https://github.com/mwaskom/seaborn/issues/2943
estimate = self.estimator(vals)
else:
estimate = vals.agg(self.estimator)
# Options that produce no error bars
if self.error_method is None:
err_min = err_max = np.nan
elif len(data) <= 1:
err_min = err_max = np.nan
# Generic errorbars from user-supplied function
elif callable(self.error_method):
err_min, err_max = self.error_method(vals)
# Parametric options
elif self.error_method == "sd":
half_interval = vals.std() * self.error_level
err_min, err_max = estimate - half_interval, estimate + half_interval
elif self.error_method == "se":
half_interval = vals.sem() * self.error_level
err_min, err_max = estimate - half_interval, estimate + half_interval
# Nonparametric options
elif self.error_method == "pi":
err_min, err_max = _percentile_interval(vals, self.error_level)
elif self.error_method == "ci":
units = data.get("units", None)
boots = bootstrap(vals, units=units, func=self.estimator, **self.boot_kws)
err_min, err_max = _percentile_interval(boots, self.error_level)
return pd.Series({var: estimate, f"{var}min": err_min, f"{var}max": err_max})
|
https://github.com/mwaskom/seaborn/blob/a47b97e4b98c809db55cbd283de21acba89fe186/project26/seaborn/_statistics.py#L480-L516
| 26 |
[
0,
1,
2,
3,
4,
5,
6,
7,
8,
9,
10,
11,
12,
13,
14,
15,
16,
17,
18,
19,
20,
21,
22,
23,
24,
25,
26,
27,
28,
29,
30,
31,
32,
33,
34,
35,
36
] | 100 |
[] | 0 | true | 96.212121 | 37 | 9 | 100 | 1 |
def __call__(self, data, var):
vals = data[var]
if callable(self.estimator):
# You would think we could pass to vals.agg, and yet:
# https://github.com/mwaskom/seaborn/issues/2943
estimate = self.estimator(vals)
else:
estimate = vals.agg(self.estimator)
# Options that produce no error bars
if self.error_method is None:
err_min = err_max = np.nan
elif len(data) <= 1:
err_min = err_max = np.nan
# Generic errorbars from user-supplied function
elif callable(self.error_method):
err_min, err_max = self.error_method(vals)
# Parametric options
elif self.error_method == "sd":
half_interval = vals.std() * self.error_level
err_min, err_max = estimate - half_interval, estimate + half_interval
elif self.error_method == "se":
half_interval = vals.sem() * self.error_level
err_min, err_max = estimate - half_interval, estimate + half_interval
# Nonparametric options
elif self.error_method == "pi":
err_min, err_max = _percentile_interval(vals, self.error_level)
elif self.error_method == "ci":
units = data.get("units", None)
boots = bootstrap(vals, units=units, func=self.estimator, **self.boot_kws)
err_min, err_max = _percentile_interval(boots, self.error_level)
return pd.Series({var: estimate, f"{var}min": err_min, f"{var}max": err_max})
| 18,944 |
mwaskom/seaborn
|
a47b97e4b98c809db55cbd283de21acba89fe186
|
seaborn/relational.py
|
lineplot
|
(
data=None, *,
x=None, y=None, hue=None, size=None, style=None, units=None,
palette=None, hue_order=None, hue_norm=None,
sizes=None, size_order=None, size_norm=None,
dashes=True, markers=None, style_order=None,
estimator="mean", errorbar=("ci", 95), n_boot=1000, seed=None,
orient="x", sort=True, err_style="band", err_kws=None,
legend="auto", ci="deprecated", ax=None, **kwargs
)
|
return ax
| 603 | 646 |
def lineplot(
data=None, *,
x=None, y=None, hue=None, size=None, style=None, units=None,
palette=None, hue_order=None, hue_norm=None,
sizes=None, size_order=None, size_norm=None,
dashes=True, markers=None, style_order=None,
estimator="mean", errorbar=("ci", 95), n_boot=1000, seed=None,
orient="x", sort=True, err_style="band", err_kws=None,
legend="auto", ci="deprecated", ax=None, **kwargs
):
# Handle deprecation of ci parameter
errorbar = _deprecate_ci(errorbar, ci)
variables = _LinePlotter.get_semantics(locals())
p = _LinePlotter(
data=data, variables=variables,
estimator=estimator, n_boot=n_boot, seed=seed, errorbar=errorbar,
sort=sort, orient=orient, err_style=err_style, err_kws=err_kws,
legend=legend,
)
p.map_hue(palette=palette, order=hue_order, norm=hue_norm)
p.map_size(sizes=sizes, order=size_order, norm=size_norm)
p.map_style(markers=markers, dashes=dashes, order=style_order)
if ax is None:
ax = plt.gca()
if style is None and not {"ls", "linestyle"} & set(kwargs): # XXX
kwargs["dashes"] = "" if dashes is None or isinstance(dashes, bool) else dashes
if not p.has_xy_data:
return ax
p._attach(ax)
# Other functions have color as an explicit param,
# and we should probably do that here too
color = kwargs.pop("color", kwargs.pop("c", None))
kwargs["color"] = _default_color(ax.plot, hue, color, kwargs)
p.plot(ax, kwargs)
return ax
|
https://github.com/mwaskom/seaborn/blob/a47b97e4b98c809db55cbd283de21acba89fe186/project26/seaborn/relational.py#L603-L646
| 26 |
[
0,
11,
12,
13,
14,
15,
21,
22,
23,
24,
25,
26,
27,
28,
29,
30,
31,
32,
33,
34,
35,
36,
37,
38,
39,
40,
41,
42,
43
] | 65.909091 |
[] | 0 | false | 99.698795 | 44 | 6 | 100 | 0 |
def lineplot(
data=None, *,
x=None, y=None, hue=None, size=None, style=None, units=None,
palette=None, hue_order=None, hue_norm=None,
sizes=None, size_order=None, size_norm=None,
dashes=True, markers=None, style_order=None,
estimator="mean", errorbar=("ci", 95), n_boot=1000, seed=None,
orient="x", sort=True, err_style="band", err_kws=None,
legend="auto", ci="deprecated", ax=None, **kwargs
):
# Handle deprecation of ci parameter
errorbar = _deprecate_ci(errorbar, ci)
variables = _LinePlotter.get_semantics(locals())
p = _LinePlotter(
data=data, variables=variables,
estimator=estimator, n_boot=n_boot, seed=seed, errorbar=errorbar,
sort=sort, orient=orient, err_style=err_style, err_kws=err_kws,
legend=legend,
)
p.map_hue(palette=palette, order=hue_order, norm=hue_norm)
p.map_size(sizes=sizes, order=size_order, norm=size_norm)
p.map_style(markers=markers, dashes=dashes, order=style_order)
if ax is None:
ax = plt.gca()
if style is None and not {"ls", "linestyle"} & set(kwargs): # XXX
kwargs["dashes"] = "" if dashes is None or isinstance(dashes, bool) else dashes
if not p.has_xy_data:
return ax
p._attach(ax)
# Other functions have color as an explicit param,
# and we should probably do that here too
color = kwargs.pop("color", kwargs.pop("c", None))
kwargs["color"] = _default_color(ax.plot, hue, color, kwargs)
p.plot(ax, kwargs)
return ax
| 18,945 |
||
mwaskom/seaborn
|
a47b97e4b98c809db55cbd283de21acba89fe186
|
seaborn/relational.py
|
scatterplot
|
(
data=None, *,
x=None, y=None, hue=None, size=None, style=None,
palette=None, hue_order=None, hue_norm=None,
sizes=None, size_order=None, size_norm=None,
markers=True, style_order=None, legend="auto", ax=None,
**kwargs
)
|
return ax
| 732 | 763 |
def scatterplot(
data=None, *,
x=None, y=None, hue=None, size=None, style=None,
palette=None, hue_order=None, hue_norm=None,
sizes=None, size_order=None, size_norm=None,
markers=True, style_order=None, legend="auto", ax=None,
**kwargs
):
variables = _ScatterPlotter.get_semantics(locals())
p = _ScatterPlotter(data=data, variables=variables, legend=legend)
p.map_hue(palette=palette, order=hue_order, norm=hue_norm)
p.map_size(sizes=sizes, order=size_order, norm=size_norm)
p.map_style(markers=markers, order=style_order)
if ax is None:
ax = plt.gca()
if not p.has_xy_data:
return ax
p._attach(ax)
# Other functions have color as an explicit param,
# and we should probably do that here too
color = kwargs.pop("color", None)
kwargs["color"] = _default_color(ax.scatter, hue, color, kwargs)
p.plot(ax, kwargs)
return ax
|
https://github.com/mwaskom/seaborn/blob/a47b97e4b98c809db55cbd283de21acba89fe186/project26/seaborn/relational.py#L732-L763
| 26 |
[
0,
8,
9,
10,
11,
12,
13,
14,
15,
16,
17,
18,
19,
20,
21,
22,
23,
24,
25,
26,
27,
28,
29,
30,
31
] | 78.125 |
[] | 0 | false | 99.698795 | 32 | 3 | 100 | 0 |
def scatterplot(
data=None, *,
x=None, y=None, hue=None, size=None, style=None,
palette=None, hue_order=None, hue_norm=None,
sizes=None, size_order=None, size_norm=None,
markers=True, style_order=None, legend="auto", ax=None,
**kwargs
):
variables = _ScatterPlotter.get_semantics(locals())
p = _ScatterPlotter(data=data, variables=variables, legend=legend)
p.map_hue(palette=palette, order=hue_order, norm=hue_norm)
p.map_size(sizes=sizes, order=size_order, norm=size_norm)
p.map_style(markers=markers, order=style_order)
if ax is None:
ax = plt.gca()
if not p.has_xy_data:
return ax
p._attach(ax)
# Other functions have color as an explicit param,
# and we should probably do that here too
color = kwargs.pop("color", None)
kwargs["color"] = _default_color(ax.scatter, hue, color, kwargs)
p.plot(ax, kwargs)
return ax
| 18,946 |
||
mwaskom/seaborn
|
a47b97e4b98c809db55cbd283de21acba89fe186
|
seaborn/relational.py
|
relplot
|
(
data=None, *,
x=None, y=None, hue=None, size=None, style=None, units=None,
row=None, col=None, col_wrap=None, row_order=None, col_order=None,
palette=None, hue_order=None, hue_norm=None,
sizes=None, size_order=None, size_norm=None,
markers=None, dashes=None, style_order=None,
legend="auto", kind="scatter", height=5, aspect=1, facet_kws=None,
**kwargs
)
|
return g
| 825 | 991 |
def relplot(
data=None, *,
x=None, y=None, hue=None, size=None, style=None, units=None,
row=None, col=None, col_wrap=None, row_order=None, col_order=None,
palette=None, hue_order=None, hue_norm=None,
sizes=None, size_order=None, size_norm=None,
markers=None, dashes=None, style_order=None,
legend="auto", kind="scatter", height=5, aspect=1, facet_kws=None,
**kwargs
):
if kind == "scatter":
plotter = _ScatterPlotter
func = scatterplot
markers = True if markers is None else markers
elif kind == "line":
plotter = _LinePlotter
func = lineplot
dashes = True if dashes is None else dashes
else:
err = f"Plot kind {kind} not recognized"
raise ValueError(err)
# Check for attempt to plot onto specific axes and warn
if "ax" in kwargs:
msg = (
"relplot is a figure-level function and does not accept "
"the `ax` parameter. You may wish to try {}".format(kind + "plot")
)
warnings.warn(msg, UserWarning)
kwargs.pop("ax")
# Use the full dataset to map the semantics
p = plotter(
data=data,
variables=plotter.get_semantics(locals()),
legend=legend,
)
p.map_hue(palette=palette, order=hue_order, norm=hue_norm)
p.map_size(sizes=sizes, order=size_order, norm=size_norm)
p.map_style(markers=markers, dashes=dashes, order=style_order)
# Extract the semantic mappings
if "hue" in p.variables:
palette = p._hue_map.lookup_table
hue_order = p._hue_map.levels
hue_norm = p._hue_map.norm
else:
palette = hue_order = hue_norm = None
if "size" in p.variables:
sizes = p._size_map.lookup_table
size_order = p._size_map.levels
size_norm = p._size_map.norm
if "style" in p.variables:
style_order = p._style_map.levels
if markers:
markers = {k: p._style_map(k, "marker") for k in style_order}
else:
markers = None
if dashes:
dashes = {k: p._style_map(k, "dashes") for k in style_order}
else:
dashes = None
else:
markers = dashes = style_order = None
# Now extract the data that would be used to draw a single plot
variables = p.variables
plot_data = p.plot_data
plot_semantics = p.semantics
# Define the common plotting parameters
plot_kws = dict(
palette=palette, hue_order=hue_order, hue_norm=hue_norm,
sizes=sizes, size_order=size_order, size_norm=size_norm,
markers=markers, dashes=dashes, style_order=style_order,
legend=False,
)
plot_kws.update(kwargs)
if kind == "scatter":
plot_kws.pop("dashes")
# Add the grid semantics onto the plotter
grid_semantics = "row", "col"
p.semantics = plot_semantics + grid_semantics
p.assign_variables(
data=data,
variables=dict(
x=x, y=y,
hue=hue, size=size, style=style, units=units,
row=row, col=col,
),
)
# Define the named variables for plotting on each facet
# Rename the variables with a leading underscore to avoid
# collisions with faceting variable names
plot_variables = {v: f"_{v}" for v in variables}
plot_kws.update(plot_variables)
# Pass the row/col variables to FacetGrid with their original
# names so that the axes titles render correctly
for var in ["row", "col"]:
# Handle faceting variables that lack name information
if var in p.variables and p.variables[var] is None:
p.variables[var] = f"_{var}_"
grid_kws = {v: p.variables.get(v) for v in grid_semantics}
# Rename the columns of the plot_data structure appropriately
new_cols = plot_variables.copy()
new_cols.update(grid_kws)
full_data = p.plot_data.rename(columns=new_cols)
# Set up the FacetGrid object
facet_kws = {} if facet_kws is None else facet_kws.copy()
g = FacetGrid(
data=full_data.dropna(axis=1, how="all"),
**grid_kws,
col_wrap=col_wrap, row_order=row_order, col_order=col_order,
height=height, aspect=aspect, dropna=False,
**facet_kws
)
# Draw the plot
g.map_dataframe(func, **plot_kws)
# Label the axes, using the original variables
# Pass "" when the variable name is None to overwrite internal variables
g.set_axis_labels(variables.get("x") or "", variables.get("y") or "")
# Show the legend
if legend:
# Replace the original plot data so the legend uses
# numeric data with the correct type
p.plot_data = plot_data
p.add_legend_data(g.axes.flat[0])
if p.legend_data:
g.add_legend(legend_data=p.legend_data,
label_order=p.legend_order,
title=p.legend_title,
adjust_subtitles=True)
# Rename the columns of the FacetGrid's `data` attribute
# to match the original column names
orig_cols = {
f"_{k}": f"_{k}_" if v is None else v for k, v in variables.items()
}
grid_data = g.data.rename(columns=orig_cols)
if data is not None and (x is not None or y is not None):
if not isinstance(data, pd.DataFrame):
data = pd.DataFrame(data)
g.data = pd.merge(
data,
grid_data[grid_data.columns.difference(data.columns)],
left_index=True,
right_index=True,
)
else:
g.data = grid_data
return g
|
https://github.com/mwaskom/seaborn/blob/a47b97e4b98c809db55cbd283de21acba89fe186/project26/seaborn/relational.py#L825-L991
| 26 |
[
0,
10,
11,
12,
13,
14,
15,
16,
17,
18,
19,
20,
21,
22,
24,
25,
26,
27,
28,
29,
33,
34,
35,
36,
37,
42,
43,
44,
45,
46,
47,
48,
49,
50,
52,
53,
54,
55,
56,
57,
58,
59,
60,
61,
62,
64,
65,
66,
68,
70,
71,
72,
73,
74,
75,
76,
77,
78,
84,
85,
86,
87,
88,
89,
90,
91,
102,
103,
104,
105,
106,
107,
108,
109,
110,
111,
112,
113,
114,
115,
116,
117,
118,
119,
120,
121,
129,
130,
131,
132,
133,
134,
135,
136,
137,
138,
139,
140,
141,
142,
143,
149,
150,
153,
154,
155,
156,
157,
164,
165,
166
] | 66.467066 |
[] | 0 | false | 99.698795 | 167 | 21 | 100 | 0 |
def relplot(
data=None, *,
x=None, y=None, hue=None, size=None, style=None, units=None,
row=None, col=None, col_wrap=None, row_order=None, col_order=None,
palette=None, hue_order=None, hue_norm=None,
sizes=None, size_order=None, size_norm=None,
markers=None, dashes=None, style_order=None,
legend="auto", kind="scatter", height=5, aspect=1, facet_kws=None,
**kwargs
):
if kind == "scatter":
plotter = _ScatterPlotter
func = scatterplot
markers = True if markers is None else markers
elif kind == "line":
plotter = _LinePlotter
func = lineplot
dashes = True if dashes is None else dashes
else:
err = f"Plot kind {kind} not recognized"
raise ValueError(err)
# Check for attempt to plot onto specific axes and warn
if "ax" in kwargs:
msg = (
"relplot is a figure-level function and does not accept "
"the `ax` parameter. You may wish to try {}".format(kind + "plot")
)
warnings.warn(msg, UserWarning)
kwargs.pop("ax")
# Use the full dataset to map the semantics
p = plotter(
data=data,
variables=plotter.get_semantics(locals()),
legend=legend,
)
p.map_hue(palette=palette, order=hue_order, norm=hue_norm)
p.map_size(sizes=sizes, order=size_order, norm=size_norm)
p.map_style(markers=markers, dashes=dashes, order=style_order)
# Extract the semantic mappings
if "hue" in p.variables:
palette = p._hue_map.lookup_table
hue_order = p._hue_map.levels
hue_norm = p._hue_map.norm
else:
palette = hue_order = hue_norm = None
if "size" in p.variables:
sizes = p._size_map.lookup_table
size_order = p._size_map.levels
size_norm = p._size_map.norm
if "style" in p.variables:
style_order = p._style_map.levels
if markers:
markers = {k: p._style_map(k, "marker") for k in style_order}
else:
markers = None
if dashes:
dashes = {k: p._style_map(k, "dashes") for k in style_order}
else:
dashes = None
else:
markers = dashes = style_order = None
# Now extract the data that would be used to draw a single plot
variables = p.variables
plot_data = p.plot_data
plot_semantics = p.semantics
# Define the common plotting parameters
plot_kws = dict(
palette=palette, hue_order=hue_order, hue_norm=hue_norm,
sizes=sizes, size_order=size_order, size_norm=size_norm,
markers=markers, dashes=dashes, style_order=style_order,
legend=False,
)
plot_kws.update(kwargs)
if kind == "scatter":
plot_kws.pop("dashes")
# Add the grid semantics onto the plotter
grid_semantics = "row", "col"
p.semantics = plot_semantics + grid_semantics
p.assign_variables(
data=data,
variables=dict(
x=x, y=y,
hue=hue, size=size, style=style, units=units,
row=row, col=col,
),
)
# Define the named variables for plotting on each facet
# Rename the variables with a leading underscore to avoid
# collisions with faceting variable names
plot_variables = {v: f"_{v}" for v in variables}
plot_kws.update(plot_variables)
# Pass the row/col variables to FacetGrid with their original
# names so that the axes titles render correctly
for var in ["row", "col"]:
# Handle faceting variables that lack name information
if var in p.variables and p.variables[var] is None:
p.variables[var] = f"_{var}_"
grid_kws = {v: p.variables.get(v) for v in grid_semantics}
# Rename the columns of the plot_data structure appropriately
new_cols = plot_variables.copy()
new_cols.update(grid_kws)
full_data = p.plot_data.rename(columns=new_cols)
# Set up the FacetGrid object
facet_kws = {} if facet_kws is None else facet_kws.copy()
g = FacetGrid(
data=full_data.dropna(axis=1, how="all"),
**grid_kws,
col_wrap=col_wrap, row_order=row_order, col_order=col_order,
height=height, aspect=aspect, dropna=False,
**facet_kws
)
# Draw the plot
g.map_dataframe(func, **plot_kws)
# Label the axes, using the original variables
# Pass "" when the variable name is None to overwrite internal variables
g.set_axis_labels(variables.get("x") or "", variables.get("y") or "")
# Show the legend
if legend:
# Replace the original plot data so the legend uses
# numeric data with the correct type
p.plot_data = plot_data
p.add_legend_data(g.axes.flat[0])
if p.legend_data:
g.add_legend(legend_data=p.legend_data,
label_order=p.legend_order,
title=p.legend_title,
adjust_subtitles=True)
# Rename the columns of the FacetGrid's `data` attribute
# to match the original column names
orig_cols = {
f"_{k}": f"_{k}_" if v is None else v for k, v in variables.items()
}
grid_data = g.data.rename(columns=orig_cols)
if data is not None and (x is not None or y is not None):
if not isinstance(data, pd.DataFrame):
data = pd.DataFrame(data)
g.data = pd.merge(
data,
grid_data[grid_data.columns.difference(data.columns)],
left_index=True,
right_index=True,
)
else:
g.data = grid_data
return g
| 18,947 |
||
mwaskom/seaborn
|
a47b97e4b98c809db55cbd283de21acba89fe186
|
seaborn/relational.py
|
_RelationalPlotter.add_legend_data
|
(self, ax)
|
Add labeled artists to represent the different plot semantics.
|
Add labeled artists to represent the different plot semantics.
| 193 | 343 |
def add_legend_data(self, ax):
"""Add labeled artists to represent the different plot semantics."""
verbosity = self.legend
if isinstance(verbosity, str) and verbosity not in ["auto", "brief", "full"]:
err = "`legend` must be 'auto', 'brief', 'full', or a boolean."
raise ValueError(err)
elif verbosity is True:
verbosity = "auto"
legend_kwargs = {}
keys = []
# Assign a legend title if there is only going to be one sub-legend,
# otherwise, subtitles will be inserted into the texts list with an
# invisible handle (which is a hack)
titles = {
title for title in
(self.variables.get(v, None) for v in ["hue", "size", "style"])
if title is not None
}
if len(titles) == 1:
legend_title = titles.pop()
else:
legend_title = ""
title_kws = dict(
visible=False, color="w", s=0, linewidth=0, marker="", dashes=""
)
def update(var_name, val_name, **kws):
key = var_name, val_name
if key in legend_kwargs:
legend_kwargs[key].update(**kws)
else:
keys.append(key)
legend_kwargs[key] = dict(**kws)
# Define the maximum number of ticks to use for "brief" legends
brief_ticks = 6
# -- Add a legend for hue semantics
brief_hue = self._hue_map.map_type == "numeric" and (
verbosity == "brief"
or (verbosity == "auto" and len(self._hue_map.levels) > brief_ticks)
)
if brief_hue:
if isinstance(self._hue_map.norm, mpl.colors.LogNorm):
locator = mpl.ticker.LogLocator(numticks=brief_ticks)
else:
locator = mpl.ticker.MaxNLocator(nbins=brief_ticks)
limits = min(self._hue_map.levels), max(self._hue_map.levels)
hue_levels, hue_formatted_levels = locator_to_legend_entries(
locator, limits, self.plot_data["hue"].infer_objects().dtype
)
elif self._hue_map.levels is None:
hue_levels = hue_formatted_levels = []
else:
hue_levels = hue_formatted_levels = self._hue_map.levels
# Add the hue semantic subtitle
if not legend_title and self.variables.get("hue", None) is not None:
update((self.variables["hue"], "title"),
self.variables["hue"], **title_kws)
# Add the hue semantic labels
for level, formatted_level in zip(hue_levels, hue_formatted_levels):
if level is not None:
color = self._hue_map(level)
update(self.variables["hue"], formatted_level, color=color)
# -- Add a legend for size semantics
brief_size = self._size_map.map_type == "numeric" and (
verbosity == "brief"
or (verbosity == "auto" and len(self._size_map.levels) > brief_ticks)
)
if brief_size:
# Define how ticks will interpolate between the min/max data values
if isinstance(self._size_map.norm, mpl.colors.LogNorm):
locator = mpl.ticker.LogLocator(numticks=brief_ticks)
else:
locator = mpl.ticker.MaxNLocator(nbins=brief_ticks)
# Define the min/max data values
limits = min(self._size_map.levels), max(self._size_map.levels)
size_levels, size_formatted_levels = locator_to_legend_entries(
locator, limits, self.plot_data["size"].infer_objects().dtype
)
elif self._size_map.levels is None:
size_levels = size_formatted_levels = []
else:
size_levels = size_formatted_levels = self._size_map.levels
# Add the size semantic subtitle
if not legend_title and self.variables.get("size", None) is not None:
update((self.variables["size"], "title"),
self.variables["size"], **title_kws)
# Add the size semantic labels
for level, formatted_level in zip(size_levels, size_formatted_levels):
if level is not None:
size = self._size_map(level)
update(
self.variables["size"],
formatted_level,
linewidth=size,
s=size,
)
# -- Add a legend for style semantics
# Add the style semantic title
if not legend_title and self.variables.get("style", None) is not None:
update((self.variables["style"], "title"),
self.variables["style"], **title_kws)
# Add the style semantic labels
if self._style_map.levels is not None:
for level in self._style_map.levels:
if level is not None:
attrs = self._style_map(level)
update(
self.variables["style"],
level,
marker=attrs.get("marker", ""),
dashes=attrs.get("dashes", ""),
)
func = getattr(ax, self._legend_func)
legend_data = {}
legend_order = []
for key in keys:
_, label = key
kws = legend_kwargs[key]
kws.setdefault("color", ".2")
use_kws = {}
for attr in self._legend_attributes + ["visible"]:
if attr in kws:
use_kws[attr] = kws[attr]
artist = func([], [], label=label, **use_kws)
if self._legend_func == "plot":
artist = artist[0]
legend_data[key] = artist
legend_order.append(key)
self.legend_title = legend_title
self.legend_data = legend_data
self.legend_order = legend_order
|
https://github.com/mwaskom/seaborn/blob/a47b97e4b98c809db55cbd283de21acba89fe186/project26/seaborn/relational.py#L193-L343
| 26 |
[
0,
1,
2,
3,
4,
5,
6,
7,
8,
9,
10,
11,
12,
13,
14,
15,
16,
17,
18,
19,
20,
21,
22,
23,
24,
25,
26,
27,
28,
29,
30,
31,
32,
33,
34,
35,
36,
37,
38,
39,
40,
41,
42,
43,
44,
45,
46,
47,
48,
49,
50,
51,
52,
53,
54,
55,
56,
57,
58,
59,
60,
61,
62,
63,
64,
65,
66,
67,
68,
69,
70,
71,
72,
73,
74,
75,
76,
77,
78,
79,
80,
81,
82,
83,
84,
85,
86,
87,
88,
89,
90,
91,
92,
93,
94,
95,
96,
97,
98,
99,
100,
101,
102,
103,
104,
105,
106,
107,
108,
109,
110,
111,
112,
113,
114,
115,
116,
117,
118,
119,
120,
121,
122,
123,
124,
125,
126,
127,
128,
129,
130,
131,
132,
133,
134,
135,
136,
137,
138,
139,
140,
141,
142,
143,
144,
145,
146,
147,
148,
149,
150
] | 100 |
[] | 0 | true | 99.698795 | 151 | 36 | 100 | 1 |
def add_legend_data(self, ax):
verbosity = self.legend
if isinstance(verbosity, str) and verbosity not in ["auto", "brief", "full"]:
err = "`legend` must be 'auto', 'brief', 'full', or a boolean."
raise ValueError(err)
elif verbosity is True:
verbosity = "auto"
legend_kwargs = {}
keys = []
# Assign a legend title if there is only going to be one sub-legend,
# otherwise, subtitles will be inserted into the texts list with an
# invisible handle (which is a hack)
titles = {
title for title in
(self.variables.get(v, None) for v in ["hue", "size", "style"])
if title is not None
}
if len(titles) == 1:
legend_title = titles.pop()
else:
legend_title = ""
title_kws = dict(
visible=False, color="w", s=0, linewidth=0, marker="", dashes=""
)
def update(var_name, val_name, **kws):
key = var_name, val_name
if key in legend_kwargs:
legend_kwargs[key].update(**kws)
else:
keys.append(key)
legend_kwargs[key] = dict(**kws)
# Define the maximum number of ticks to use for "brief" legends
brief_ticks = 6
# -- Add a legend for hue semantics
brief_hue = self._hue_map.map_type == "numeric" and (
verbosity == "brief"
or (verbosity == "auto" and len(self._hue_map.levels) > brief_ticks)
)
if brief_hue:
if isinstance(self._hue_map.norm, mpl.colors.LogNorm):
locator = mpl.ticker.LogLocator(numticks=brief_ticks)
else:
locator = mpl.ticker.MaxNLocator(nbins=brief_ticks)
limits = min(self._hue_map.levels), max(self._hue_map.levels)
hue_levels, hue_formatted_levels = locator_to_legend_entries(
locator, limits, self.plot_data["hue"].infer_objects().dtype
)
elif self._hue_map.levels is None:
hue_levels = hue_formatted_levels = []
else:
hue_levels = hue_formatted_levels = self._hue_map.levels
# Add the hue semantic subtitle
if not legend_title and self.variables.get("hue", None) is not None:
update((self.variables["hue"], "title"),
self.variables["hue"], **title_kws)
# Add the hue semantic labels
for level, formatted_level in zip(hue_levels, hue_formatted_levels):
if level is not None:
color = self._hue_map(level)
update(self.variables["hue"], formatted_level, color=color)
# -- Add a legend for size semantics
brief_size = self._size_map.map_type == "numeric" and (
verbosity == "brief"
or (verbosity == "auto" and len(self._size_map.levels) > brief_ticks)
)
if brief_size:
# Define how ticks will interpolate between the min/max data values
if isinstance(self._size_map.norm, mpl.colors.LogNorm):
locator = mpl.ticker.LogLocator(numticks=brief_ticks)
else:
locator = mpl.ticker.MaxNLocator(nbins=brief_ticks)
# Define the min/max data values
limits = min(self._size_map.levels), max(self._size_map.levels)
size_levels, size_formatted_levels = locator_to_legend_entries(
locator, limits, self.plot_data["size"].infer_objects().dtype
)
elif self._size_map.levels is None:
size_levels = size_formatted_levels = []
else:
size_levels = size_formatted_levels = self._size_map.levels
# Add the size semantic subtitle
if not legend_title and self.variables.get("size", None) is not None:
update((self.variables["size"], "title"),
self.variables["size"], **title_kws)
# Add the size semantic labels
for level, formatted_level in zip(size_levels, size_formatted_levels):
if level is not None:
size = self._size_map(level)
update(
self.variables["size"],
formatted_level,
linewidth=size,
s=size,
)
# -- Add a legend for style semantics
# Add the style semantic title
if not legend_title and self.variables.get("style", None) is not None:
update((self.variables["style"], "title"),
self.variables["style"], **title_kws)
# Add the style semantic labels
if self._style_map.levels is not None:
for level in self._style_map.levels:
if level is not None:
attrs = self._style_map(level)
update(
self.variables["style"],
level,
marker=attrs.get("marker", ""),
dashes=attrs.get("dashes", ""),
)
func = getattr(ax, self._legend_func)
legend_data = {}
legend_order = []
for key in keys:
_, label = key
kws = legend_kwargs[key]
kws.setdefault("color", ".2")
use_kws = {}
for attr in self._legend_attributes + ["visible"]:
if attr in kws:
use_kws[attr] = kws[attr]
artist = func([], [], label=label, **use_kws)
if self._legend_func == "plot":
artist = artist[0]
legend_data[key] = artist
legend_order.append(key)
self.legend_title = legend_title
self.legend_data = legend_data
self.legend_order = legend_order
| 18,948 |
|
mwaskom/seaborn
|
a47b97e4b98c809db55cbd283de21acba89fe186
|
seaborn/relational.py
|
_LinePlotter.__init__
|
(
self, *,
data=None, variables={},
estimator=None, n_boot=None, seed=None, errorbar=None,
sort=True, orient="x", err_style=None, err_kws=None, legend=None
)
| 351 | 376 |
def __init__(
self, *,
data=None, variables={},
estimator=None, n_boot=None, seed=None, errorbar=None,
sort=True, orient="x", err_style=None, err_kws=None, legend=None
):
# TODO this is messy, we want the mapping to be agnostic about
# the kind of plot to draw, but for the time being we need to set
# this information so the SizeMapping can use it
self._default_size_range = (
np.r_[.5, 2] * mpl.rcParams["lines.linewidth"]
)
super().__init__(data=data, variables=variables)
self.estimator = estimator
self.errorbar = errorbar
self.n_boot = n_boot
self.seed = seed
self.sort = sort
self.orient = orient
self.err_style = err_style
self.err_kws = {} if err_kws is None else err_kws
self.legend = legend
|
https://github.com/mwaskom/seaborn/blob/a47b97e4b98c809db55cbd283de21acba89fe186/project26/seaborn/relational.py#L351-L376
| 26 |
[
0,
9,
10,
13,
14,
15,
16,
17,
18,
19,
20,
21,
22,
23,
24,
25
] | 61.538462 |
[] | 0 | false | 99.698795 | 26 | 1 | 100 | 0 |
def __init__(
self, *,
data=None, variables={},
estimator=None, n_boot=None, seed=None, errorbar=None,
sort=True, orient="x", err_style=None, err_kws=None, legend=None
):
# TODO this is messy, we want the mapping to be agnostic about
# the kind of plot to draw, but for the time being we need to set
# this information so the SizeMapping can use it
self._default_size_range = (
np.r_[.5, 2] * mpl.rcParams["lines.linewidth"]
)
super().__init__(data=data, variables=variables)
self.estimator = estimator
self.errorbar = errorbar
self.n_boot = n_boot
self.seed = seed
self.sort = sort
self.orient = orient
self.err_style = err_style
self.err_kws = {} if err_kws is None else err_kws
self.legend = legend
| 18,949 |
|||
mwaskom/seaborn
|
a47b97e4b98c809db55cbd283de21acba89fe186
|
seaborn/relational.py
|
_LinePlotter.plot
|
(self, ax, kws)
|
Draw the plot onto an axes, passing matplotlib kwargs.
|
Draw the plot onto an axes, passing matplotlib kwargs.
| 378 | 521 |
def plot(self, ax, kws):
"""Draw the plot onto an axes, passing matplotlib kwargs."""
# Draw a test plot, using the passed in kwargs. The goal here is to
# honor both (a) the current state of the plot cycler and (b) the
# specified kwargs on all the lines we will draw, overriding when
# relevant with the data semantics. Note that we won't cycle
# internally; in other words, if `hue` is not used, all elements will
# have the same color, but they will have the color that you would have
# gotten from the corresponding matplotlib function, and calling the
# function will advance the axes property cycle.
kws.setdefault("markeredgewidth", kws.pop("mew", .75))
kws.setdefault("markeredgecolor", kws.pop("mec", "w"))
# Set default error kwargs
err_kws = self.err_kws.copy()
if self.err_style == "band":
err_kws.setdefault("alpha", .2)
elif self.err_style == "bars":
pass
elif self.err_style is not None:
err = "`err_style` must be 'band' or 'bars', not {}"
raise ValueError(err.format(self.err_style))
# Initialize the aggregation object
agg = EstimateAggregator(
self.estimator, self.errorbar, n_boot=self.n_boot, seed=self.seed,
)
# TODO abstract variable to aggregate over here-ish. Better name?
orient = self.orient
if orient not in {"x", "y"}:
err = f"`orient` must be either 'x' or 'y', not {orient!r}."
raise ValueError(err)
other = {"x": "y", "y": "x"}[orient]
# TODO How to handle NA? We don't want NA to propagate through to the
# estimate/CI when some values are present, but we would also like
# matplotlib to show "gaps" in the line when all values are missing.
# This is straightforward absent aggregation, but complicated with it.
# If we want to use nas, we need to conditionalize dropna in iter_data.
# Loop over the semantic subsets and add to the plot
grouping_vars = "hue", "size", "style"
for sub_vars, sub_data in self.iter_data(grouping_vars, from_comp_data=True):
if self.sort:
sort_vars = ["units", orient, other]
sort_cols = [var for var in sort_vars if var in self.variables]
sub_data = sub_data.sort_values(sort_cols)
if (
self.estimator is not None
and sub_data[orient].value_counts().max() > 1
):
if "units" in self.variables:
# TODO eventually relax this constraint
err = "estimator must be None when specifying units"
raise ValueError(err)
grouped = sub_data.groupby(orient, sort=self.sort)
# Could pass as_index=False instead of reset_index,
# but that fails on a corner case with older pandas.
sub_data = grouped.apply(agg, other).reset_index()
else:
sub_data[f"{other}min"] = np.nan
sub_data[f"{other}max"] = np.nan
# TODO this is pretty ad hoc ; see GH2409
for var in "xy":
if self._log_scaled(var):
for col in sub_data.filter(regex=f"^{var}"):
sub_data[col] = np.power(10, sub_data[col])
# --- Draw the main line(s)
if "units" in self.variables: # XXX why not add to grouping variables?
lines = []
for _, unit_data in sub_data.groupby("units"):
lines.extend(ax.plot(unit_data["x"], unit_data["y"], **kws))
else:
lines = ax.plot(sub_data["x"], sub_data["y"], **kws)
for line in lines:
if "hue" in sub_vars:
line.set_color(self._hue_map(sub_vars["hue"]))
if "size" in sub_vars:
line.set_linewidth(self._size_map(sub_vars["size"]))
if "style" in sub_vars:
attributes = self._style_map(sub_vars["style"])
if "dashes" in attributes:
line.set_dashes(attributes["dashes"])
if "marker" in attributes:
line.set_marker(attributes["marker"])
line_color = line.get_color()
line_alpha = line.get_alpha()
line_capstyle = line.get_solid_capstyle()
# --- Draw the confidence intervals
if self.estimator is not None and self.errorbar is not None:
# TODO handling of orientation will need to happen here
if self.err_style == "band":
func = {"x": ax.fill_between, "y": ax.fill_betweenx}[orient]
func(
sub_data[orient],
sub_data[f"{other}min"], sub_data[f"{other}max"],
color=line_color, **err_kws
)
elif self.err_style == "bars":
error_param = {
f"{other}err": (
sub_data[other] - sub_data[f"{other}min"],
sub_data[f"{other}max"] - sub_data[other],
)
}
ebars = ax.errorbar(
sub_data["x"], sub_data["y"], **error_param,
linestyle="", color=line_color, alpha=line_alpha,
**err_kws
)
# Set the capstyle properly on the error bars
for obj in ebars.get_children():
if isinstance(obj, mpl.collections.LineCollection):
obj.set_capstyle(line_capstyle)
# Finalize the axes details
self._add_axis_labels(ax)
if self.legend:
self.add_legend_data(ax)
handles, _ = ax.get_legend_handles_labels()
if handles:
legend = ax.legend(title=self.legend_title)
adjust_legend_subtitles(legend)
|
https://github.com/mwaskom/seaborn/blob/a47b97e4b98c809db55cbd283de21acba89fe186/project26/seaborn/relational.py#L378-L521
| 26 |
[
0,
1,
2,
3,
4,
5,
6,
7,
8,
9,
10,
11,
12,
13,
14,
15,
16,
17,
18,
19,
20,
21,
22,
23,
24,
25,
26,
27,
28,
29,
30,
31,
32,
33,
34,
35,
36,
37,
38,
39,
40,
41,
42,
43,
44,
45,
46,
47,
48,
49,
50,
51,
52,
53,
54,
55,
56,
57,
58,
59,
60,
61,
62,
63,
64,
65,
66,
67,
68,
69,
70,
71,
72,
73,
74,
75,
76,
77,
78,
79,
80,
81,
82,
83,
84,
85,
86,
87,
88,
89,
90,
91,
92,
93,
94,
95,
96,
97,
98,
99,
100,
101,
102,
103,
104,
105,
106,
107,
108,
109,
110,
111,
112,
113,
114,
115,
116,
117,
118,
119,
120,
121,
122,
123,
124,
125,
126,
127,
128,
129,
130,
131,
132,
133,
134,
135,
136,
137,
138,
139,
140,
141,
142,
143
] | 100 |
[] | 0 | true | 99.698795 | 144 | 30 | 100 | 1 |
def plot(self, ax, kws):
# Draw a test plot, using the passed in kwargs. The goal here is to
# honor both (a) the current state of the plot cycler and (b) the
# specified kwargs on all the lines we will draw, overriding when
# relevant with the data semantics. Note that we won't cycle
# internally; in other words, if `hue` is not used, all elements will
# have the same color, but they will have the color that you would have
# gotten from the corresponding matplotlib function, and calling the
# function will advance the axes property cycle.
kws.setdefault("markeredgewidth", kws.pop("mew", .75))
kws.setdefault("markeredgecolor", kws.pop("mec", "w"))
# Set default error kwargs
err_kws = self.err_kws.copy()
if self.err_style == "band":
err_kws.setdefault("alpha", .2)
elif self.err_style == "bars":
pass
elif self.err_style is not None:
err = "`err_style` must be 'band' or 'bars', not {}"
raise ValueError(err.format(self.err_style))
# Initialize the aggregation object
agg = EstimateAggregator(
self.estimator, self.errorbar, n_boot=self.n_boot, seed=self.seed,
)
# TODO abstract variable to aggregate over here-ish. Better name?
orient = self.orient
if orient not in {"x", "y"}:
err = f"`orient` must be either 'x' or 'y', not {orient!r}."
raise ValueError(err)
other = {"x": "y", "y": "x"}[orient]
# TODO How to handle NA? We don't want NA to propagate through to the
# estimate/CI when some values are present, but we would also like
# matplotlib to show "gaps" in the line when all values are missing.
# This is straightforward absent aggregation, but complicated with it.
# If we want to use nas, we need to conditionalize dropna in iter_data.
# Loop over the semantic subsets and add to the plot
grouping_vars = "hue", "size", "style"
for sub_vars, sub_data in self.iter_data(grouping_vars, from_comp_data=True):
if self.sort:
sort_vars = ["units", orient, other]
sort_cols = [var for var in sort_vars if var in self.variables]
sub_data = sub_data.sort_values(sort_cols)
if (
self.estimator is not None
and sub_data[orient].value_counts().max() > 1
):
if "units" in self.variables:
# TODO eventually relax this constraint
err = "estimator must be None when specifying units"
raise ValueError(err)
grouped = sub_data.groupby(orient, sort=self.sort)
# Could pass as_index=False instead of reset_index,
# but that fails on a corner case with older pandas.
sub_data = grouped.apply(agg, other).reset_index()
else:
sub_data[f"{other}min"] = np.nan
sub_data[f"{other}max"] = np.nan
# TODO this is pretty ad hoc ; see GH2409
for var in "xy":
if self._log_scaled(var):
for col in sub_data.filter(regex=f"^{var}"):
sub_data[col] = np.power(10, sub_data[col])
# --- Draw the main line(s)
if "units" in self.variables: # XXX why not add to grouping variables?
lines = []
for _, unit_data in sub_data.groupby("units"):
lines.extend(ax.plot(unit_data["x"], unit_data["y"], **kws))
else:
lines = ax.plot(sub_data["x"], sub_data["y"], **kws)
for line in lines:
if "hue" in sub_vars:
line.set_color(self._hue_map(sub_vars["hue"]))
if "size" in sub_vars:
line.set_linewidth(self._size_map(sub_vars["size"]))
if "style" in sub_vars:
attributes = self._style_map(sub_vars["style"])
if "dashes" in attributes:
line.set_dashes(attributes["dashes"])
if "marker" in attributes:
line.set_marker(attributes["marker"])
line_color = line.get_color()
line_alpha = line.get_alpha()
line_capstyle = line.get_solid_capstyle()
# --- Draw the confidence intervals
if self.estimator is not None and self.errorbar is not None:
# TODO handling of orientation will need to happen here
if self.err_style == "band":
func = {"x": ax.fill_between, "y": ax.fill_betweenx}[orient]
func(
sub_data[orient],
sub_data[f"{other}min"], sub_data[f"{other}max"],
color=line_color, **err_kws
)
elif self.err_style == "bars":
error_param = {
f"{other}err": (
sub_data[other] - sub_data[f"{other}min"],
sub_data[f"{other}max"] - sub_data[other],
)
}
ebars = ax.errorbar(
sub_data["x"], sub_data["y"], **error_param,
linestyle="", color=line_color, alpha=line_alpha,
**err_kws
)
# Set the capstyle properly on the error bars
for obj in ebars.get_children():
if isinstance(obj, mpl.collections.LineCollection):
obj.set_capstyle(line_capstyle)
# Finalize the axes details
self._add_axis_labels(ax)
if self.legend:
self.add_legend_data(ax)
handles, _ = ax.get_legend_handles_labels()
if handles:
legend = ax.legend(title=self.legend_title)
adjust_legend_subtitles(legend)
| 18,950 |
|
mwaskom/seaborn
|
a47b97e4b98c809db55cbd283de21acba89fe186
|
seaborn/relational.py
|
_ScatterPlotter.__init__
|
(self, *, data=None, variables={}, legend=None)
| 529 | 540 |
def __init__(self, *, data=None, variables={}, legend=None):
# TODO this is messy, we want the mapping to be agnostic about
# the kind of plot to draw, but for the time being we need to set
# this information so the SizeMapping can use it
self._default_size_range = (
np.r_[.5, 2] * np.square(mpl.rcParams["lines.markersize"])
)
super().__init__(data=data, variables=variables)
self.legend = legend
|
https://github.com/mwaskom/seaborn/blob/a47b97e4b98c809db55cbd283de21acba89fe186/project26/seaborn/relational.py#L529-L540
| 26 |
[
0,
1,
2,
3,
4,
5,
8,
9,
10,
11
] | 83.333333 |
[] | 0 | false | 99.698795 | 12 | 1 | 100 | 0 |
def __init__(self, *, data=None, variables={}, legend=None):
# TODO this is messy, we want the mapping to be agnostic about
# the kind of plot to draw, but for the time being we need to set
# this information so the SizeMapping can use it
self._default_size_range = (
np.r_[.5, 2] * np.square(mpl.rcParams["lines.markersize"])
)
super().__init__(data=data, variables=variables)
self.legend = legend
| 18,951 |
|||
mwaskom/seaborn
|
a47b97e4b98c809db55cbd283de21acba89fe186
|
seaborn/relational.py
|
_ScatterPlotter.plot
|
(self, ax, kws)
| 542 | 600 |
def plot(self, ax, kws):
# --- Determine the visual attributes of the plot
data = self.plot_data.dropna()
if data.empty:
return
# Define the vectors of x and y positions
empty = np.full(len(data), np.nan)
x = data.get("x", empty)
y = data.get("y", empty)
if "style" in self.variables:
# Use a representative marker so scatter sets the edgecolor
# properly for line art markers. We currently enforce either
# all or none line art so this works.
example_level = self._style_map.levels[0]
example_marker = self._style_map(example_level, "marker")
kws.setdefault("marker", example_marker)
# Conditionally set the marker edgecolor based on whether the marker is "filled"
# See https://github.com/matplotlib/matplotlib/issues/17849 for context
m = kws.get("marker", mpl.rcParams.get("marker", "o"))
if not isinstance(m, mpl.markers.MarkerStyle):
# TODO in more recent matplotlib (which?) can pass a MarkerStyle here
m = mpl.markers.MarkerStyle(m)
if m.is_filled():
kws.setdefault("edgecolor", "w")
# Draw the scatter plot
points = ax.scatter(x=x, y=y, **kws)
# Apply the mapping from semantic variables to artist attributes
if "hue" in self.variables:
points.set_facecolors(self._hue_map(data["hue"]))
if "size" in self.variables:
points.set_sizes(self._size_map(data["size"]))
if "style" in self.variables:
p = [self._style_map(val, "path") for val in data["style"]]
points.set_paths(p)
# Apply dependent default attributes
if "linewidth" not in kws:
sizes = points.get_sizes()
points.set_linewidths(.08 * np.sqrt(np.percentile(sizes, 10)))
# Finalize the axes details
self._add_axis_labels(ax)
if self.legend:
self.add_legend_data(ax)
handles, _ = ax.get_legend_handles_labels()
if handles:
legend = ax.legend(title=self.legend_title)
adjust_legend_subtitles(legend)
|
https://github.com/mwaskom/seaborn/blob/a47b97e4b98c809db55cbd283de21acba89fe186/project26/seaborn/relational.py#L542-L600
| 26 |
[
0,
1,
2,
3,
4,
5,
8,
9,
10,
11,
12,
13,
14,
15,
16,
17,
18,
19,
20,
21,
22,
23,
24,
25,
26,
27,
28,
29,
30,
31,
32,
33,
34,
35,
36,
37,
38,
39,
40,
41,
42,
43,
44,
45,
46,
47,
48,
49,
50,
51,
52,
53,
54,
55,
56,
57,
58
] | 96.610169 |
[
6
] | 1.694915 | false | 99.698795 | 59 | 12 | 98.305085 | 0 |
def plot(self, ax, kws):
# --- Determine the visual attributes of the plot
data = self.plot_data.dropna()
if data.empty:
return
# Define the vectors of x and y positions
empty = np.full(len(data), np.nan)
x = data.get("x", empty)
y = data.get("y", empty)
if "style" in self.variables:
# Use a representative marker so scatter sets the edgecolor
# properly for line art markers. We currently enforce either
# all or none line art so this works.
example_level = self._style_map.levels[0]
example_marker = self._style_map(example_level, "marker")
kws.setdefault("marker", example_marker)
# Conditionally set the marker edgecolor based on whether the marker is "filled"
# See https://github.com/matplotlib/matplotlib/issues/17849 for context
m = kws.get("marker", mpl.rcParams.get("marker", "o"))
if not isinstance(m, mpl.markers.MarkerStyle):
# TODO in more recent matplotlib (which?) can pass a MarkerStyle here
m = mpl.markers.MarkerStyle(m)
if m.is_filled():
kws.setdefault("edgecolor", "w")
# Draw the scatter plot
points = ax.scatter(x=x, y=y, **kws)
# Apply the mapping from semantic variables to artist attributes
if "hue" in self.variables:
points.set_facecolors(self._hue_map(data["hue"]))
if "size" in self.variables:
points.set_sizes(self._size_map(data["size"]))
if "style" in self.variables:
p = [self._style_map(val, "path") for val in data["style"]]
points.set_paths(p)
# Apply dependent default attributes
if "linewidth" not in kws:
sizes = points.get_sizes()
points.set_linewidths(.08 * np.sqrt(np.percentile(sizes, 10)))
# Finalize the axes details
self._add_axis_labels(ax)
if self.legend:
self.add_legend_data(ax)
handles, _ = ax.get_legend_handles_labels()
if handles:
legend = ax.legend(title=self.legend_title)
adjust_legend_subtitles(legend)
| 18,952 |
|||
mwaskom/seaborn
|
a47b97e4b98c809db55cbd283de21acba89fe186
|
seaborn/axisgrid.py
|
pairplot
|
(
data, *,
hue=None, hue_order=None, palette=None,
vars=None, x_vars=None, y_vars=None,
kind="scatter", diag_kind="auto", markers=None,
height=2.5, aspect=1, corner=False, dropna=False,
plot_kws=None, diag_kws=None, grid_kws=None, size=None,
)
|
return grid
|
Plot pairwise relationships in a dataset.
By default, this function will create a grid of Axes such that each numeric
variable in ``data`` will by shared across the y-axes across a single row and
the x-axes across a single column. The diagonal plots are treated
differently: a univariate distribution plot is drawn to show the marginal
distribution of the data in each column.
It is also possible to show a subset of variables or plot different
variables on the rows and columns.
This is a high-level interface for :class:`PairGrid` that is intended to
make it easy to draw a few common styles. You should use :class:`PairGrid`
directly if you need more flexibility.
Parameters
----------
data : `pandas.DataFrame`
Tidy (long-form) dataframe where each column is a variable and
each row is an observation.
hue : name of variable in ``data``
Variable in ``data`` to map plot aspects to different colors.
hue_order : list of strings
Order for the levels of the hue variable in the palette
palette : dict or seaborn color palette
Set of colors for mapping the ``hue`` variable. If a dict, keys
should be values in the ``hue`` variable.
vars : list of variable names
Variables within ``data`` to use, otherwise use every column with
a numeric datatype.
{x, y}_vars : lists of variable names
Variables within ``data`` to use separately for the rows and
columns of the figure; i.e. to make a non-square plot.
kind : {'scatter', 'kde', 'hist', 'reg'}
Kind of plot to make.
diag_kind : {'auto', 'hist', 'kde', None}
Kind of plot for the diagonal subplots. If 'auto', choose based on
whether or not ``hue`` is used.
markers : single matplotlib marker code or list
Either the marker to use for all scatterplot points or a list of markers
with a length the same as the number of levels in the hue variable so that
differently colored points will also have different scatterplot
markers.
height : scalar
Height (in inches) of each facet.
aspect : scalar
Aspect * height gives the width (in inches) of each facet.
corner : bool
If True, don't add axes to the upper (off-diagonal) triangle of the
grid, making this a "corner" plot.
dropna : boolean
Drop missing values from the data before plotting.
{plot, diag, grid}_kws : dicts
Dictionaries of keyword arguments. ``plot_kws`` are passed to the
bivariate plotting function, ``diag_kws`` are passed to the univariate
plotting function, and ``grid_kws`` are passed to the :class:`PairGrid`
constructor.
Returns
-------
grid : :class:`PairGrid`
Returns the underlying :class:`PairGrid` instance for further tweaking.
See Also
--------
PairGrid : Subplot grid for more flexible plotting of pairwise relationships.
JointGrid : Grid for plotting joint and marginal distributions of two variables.
Examples
--------
.. include:: ../docstrings/pairplot.rst
|
Plot pairwise relationships in a dataset.
| 2,005 | 2,176 |
def pairplot(
data, *,
hue=None, hue_order=None, palette=None,
vars=None, x_vars=None, y_vars=None,
kind="scatter", diag_kind="auto", markers=None,
height=2.5, aspect=1, corner=False, dropna=False,
plot_kws=None, diag_kws=None, grid_kws=None, size=None,
):
"""Plot pairwise relationships in a dataset.
By default, this function will create a grid of Axes such that each numeric
variable in ``data`` will by shared across the y-axes across a single row and
the x-axes across a single column. The diagonal plots are treated
differently: a univariate distribution plot is drawn to show the marginal
distribution of the data in each column.
It is also possible to show a subset of variables or plot different
variables on the rows and columns.
This is a high-level interface for :class:`PairGrid` that is intended to
make it easy to draw a few common styles. You should use :class:`PairGrid`
directly if you need more flexibility.
Parameters
----------
data : `pandas.DataFrame`
Tidy (long-form) dataframe where each column is a variable and
each row is an observation.
hue : name of variable in ``data``
Variable in ``data`` to map plot aspects to different colors.
hue_order : list of strings
Order for the levels of the hue variable in the palette
palette : dict or seaborn color palette
Set of colors for mapping the ``hue`` variable. If a dict, keys
should be values in the ``hue`` variable.
vars : list of variable names
Variables within ``data`` to use, otherwise use every column with
a numeric datatype.
{x, y}_vars : lists of variable names
Variables within ``data`` to use separately for the rows and
columns of the figure; i.e. to make a non-square plot.
kind : {'scatter', 'kde', 'hist', 'reg'}
Kind of plot to make.
diag_kind : {'auto', 'hist', 'kde', None}
Kind of plot for the diagonal subplots. If 'auto', choose based on
whether or not ``hue`` is used.
markers : single matplotlib marker code or list
Either the marker to use for all scatterplot points or a list of markers
with a length the same as the number of levels in the hue variable so that
differently colored points will also have different scatterplot
markers.
height : scalar
Height (in inches) of each facet.
aspect : scalar
Aspect * height gives the width (in inches) of each facet.
corner : bool
If True, don't add axes to the upper (off-diagonal) triangle of the
grid, making this a "corner" plot.
dropna : boolean
Drop missing values from the data before plotting.
{plot, diag, grid}_kws : dicts
Dictionaries of keyword arguments. ``plot_kws`` are passed to the
bivariate plotting function, ``diag_kws`` are passed to the univariate
plotting function, and ``grid_kws`` are passed to the :class:`PairGrid`
constructor.
Returns
-------
grid : :class:`PairGrid`
Returns the underlying :class:`PairGrid` instance for further tweaking.
See Also
--------
PairGrid : Subplot grid for more flexible plotting of pairwise relationships.
JointGrid : Grid for plotting joint and marginal distributions of two variables.
Examples
--------
.. include:: ../docstrings/pairplot.rst
"""
# Avoid circular import
from .distributions import histplot, kdeplot
# Handle deprecations
if size is not None:
height = size
msg = ("The `size` parameter has been renamed to `height`; "
"please update your code.")
warnings.warn(msg, UserWarning)
if not isinstance(data, pd.DataFrame):
raise TypeError(
f"'data' must be pandas DataFrame object, not: {type(data)}")
plot_kws = {} if plot_kws is None else plot_kws.copy()
diag_kws = {} if diag_kws is None else diag_kws.copy()
grid_kws = {} if grid_kws is None else grid_kws.copy()
# Resolve "auto" diag kind
if diag_kind == "auto":
if hue is None:
diag_kind = "kde" if kind == "kde" else "hist"
else:
diag_kind = "hist" if kind == "hist" else "kde"
# Set up the PairGrid
grid_kws.setdefault("diag_sharey", diag_kind == "hist")
grid = PairGrid(data, vars=vars, x_vars=x_vars, y_vars=y_vars, hue=hue,
hue_order=hue_order, palette=palette, corner=corner,
height=height, aspect=aspect, dropna=dropna, **grid_kws)
# Add the markers here as PairGrid has figured out how many levels of the
# hue variable are needed and we don't want to duplicate that process
if markers is not None:
if kind == "reg":
# Needed until regplot supports style
if grid.hue_names is None:
n_markers = 1
else:
n_markers = len(grid.hue_names)
if not isinstance(markers, list):
markers = [markers] * n_markers
if len(markers) != n_markers:
raise ValueError("markers must be a singleton or a list of "
"markers for each level of the hue variable")
grid.hue_kws = {"marker": markers}
elif kind == "scatter":
if isinstance(markers, str):
plot_kws["marker"] = markers
elif hue is not None:
plot_kws["style"] = data[hue]
plot_kws["markers"] = markers
# Draw the marginal plots on the diagonal
diag_kws = diag_kws.copy()
diag_kws.setdefault("legend", False)
if diag_kind == "hist":
grid.map_diag(histplot, **diag_kws)
elif diag_kind == "kde":
diag_kws.setdefault("fill", True)
diag_kws.setdefault("warn_singular", False)
grid.map_diag(kdeplot, **diag_kws)
# Maybe plot on the off-diagonals
if diag_kind is not None:
plotter = grid.map_offdiag
else:
plotter = grid.map
if kind == "scatter":
from .relational import scatterplot # Avoid circular import
plotter(scatterplot, **plot_kws)
elif kind == "reg":
from .regression import regplot # Avoid circular import
plotter(regplot, **plot_kws)
elif kind == "kde":
from .distributions import kdeplot # Avoid circular import
plot_kws.setdefault("warn_singular", False)
plotter(kdeplot, **plot_kws)
elif kind == "hist":
from .distributions import histplot # Avoid circular import
plotter(histplot, **plot_kws)
# Add a legend
if hue is not None:
grid.add_legend()
grid.tight_layout()
return grid
|
https://github.com/mwaskom/seaborn/blob/a47b97e4b98c809db55cbd283de21acba89fe186/project26/seaborn/axisgrid.py#L2005-L2176
| 26 |
[
0,
82,
83,
84,
85,
86,
91,
92,
95,
96,
97,
98,
99,
100,
101,
102,
103,
105,
106,
107,
108,
109,
114,
115,
116,
117,
118,
121,
122,
124,
127,
128,
129,
131,
132,
133,
134,
135,
136,
137,
138,
139,
140,
141,
142,
143,
144,
145,
146,
147,
150,
151,
152,
153,
154,
155,
156,
157,
158,
159,
160,
161,
162,
163,
164,
165,
166,
167,
168,
169,
170,
171
] | 41.860465 |
[
87,
88,
90,
93,
119,
123,
125,
130,
149
] | 5.232558 | false | 96.911197 | 172 | 21 | 94.767442 | 72 |
def pairplot(
data, *,
hue=None, hue_order=None, palette=None,
vars=None, x_vars=None, y_vars=None,
kind="scatter", diag_kind="auto", markers=None,
height=2.5, aspect=1, corner=False, dropna=False,
plot_kws=None, diag_kws=None, grid_kws=None, size=None,
):
# Avoid circular import
from .distributions import histplot, kdeplot
# Handle deprecations
if size is not None:
height = size
msg = ("The `size` parameter has been renamed to `height`; "
"please update your code.")
warnings.warn(msg, UserWarning)
if not isinstance(data, pd.DataFrame):
raise TypeError(
f"'data' must be pandas DataFrame object, not: {type(data)}")
plot_kws = {} if plot_kws is None else plot_kws.copy()
diag_kws = {} if diag_kws is None else diag_kws.copy()
grid_kws = {} if grid_kws is None else grid_kws.copy()
# Resolve "auto" diag kind
if diag_kind == "auto":
if hue is None:
diag_kind = "kde" if kind == "kde" else "hist"
else:
diag_kind = "hist" if kind == "hist" else "kde"
# Set up the PairGrid
grid_kws.setdefault("diag_sharey", diag_kind == "hist")
grid = PairGrid(data, vars=vars, x_vars=x_vars, y_vars=y_vars, hue=hue,
hue_order=hue_order, palette=palette, corner=corner,
height=height, aspect=aspect, dropna=dropna, **grid_kws)
# Add the markers here as PairGrid has figured out how many levels of the
# hue variable are needed and we don't want to duplicate that process
if markers is not None:
if kind == "reg":
# Needed until regplot supports style
if grid.hue_names is None:
n_markers = 1
else:
n_markers = len(grid.hue_names)
if not isinstance(markers, list):
markers = [markers] * n_markers
if len(markers) != n_markers:
raise ValueError("markers must be a singleton or a list of "
"markers for each level of the hue variable")
grid.hue_kws = {"marker": markers}
elif kind == "scatter":
if isinstance(markers, str):
plot_kws["marker"] = markers
elif hue is not None:
plot_kws["style"] = data[hue]
plot_kws["markers"] = markers
# Draw the marginal plots on the diagonal
diag_kws = diag_kws.copy()
diag_kws.setdefault("legend", False)
if diag_kind == "hist":
grid.map_diag(histplot, **diag_kws)
elif diag_kind == "kde":
diag_kws.setdefault("fill", True)
diag_kws.setdefault("warn_singular", False)
grid.map_diag(kdeplot, **diag_kws)
# Maybe plot on the off-diagonals
if diag_kind is not None:
plotter = grid.map_offdiag
else:
plotter = grid.map
if kind == "scatter":
from .relational import scatterplot # Avoid circular import
plotter(scatterplot, **plot_kws)
elif kind == "reg":
from .regression import regplot # Avoid circular import
plotter(regplot, **plot_kws)
elif kind == "kde":
from .distributions import kdeplot # Avoid circular import
plot_kws.setdefault("warn_singular", False)
plotter(kdeplot, **plot_kws)
elif kind == "hist":
from .distributions import histplot # Avoid circular import
plotter(histplot, **plot_kws)
# Add a legend
if hue is not None:
grid.add_legend()
grid.tight_layout()
return grid
| 18,953 |
mwaskom/seaborn
|
a47b97e4b98c809db55cbd283de21acba89fe186
|
seaborn/axisgrid.py
|
jointplot
|
(
data=None, *, x=None, y=None, hue=None, kind="scatter",
height=6, ratio=5, space=.2, dropna=False, xlim=None, ylim=None,
color=None, palette=None, hue_order=None, hue_norm=None, marginal_ticks=False,
joint_kws=None, marginal_kws=None,
**kwargs
)
|
return grid
| 2,179 | 2,339 |
def jointplot(
data=None, *, x=None, y=None, hue=None, kind="scatter",
height=6, ratio=5, space=.2, dropna=False, xlim=None, ylim=None,
color=None, palette=None, hue_order=None, hue_norm=None, marginal_ticks=False,
joint_kws=None, marginal_kws=None,
**kwargs
):
# Avoid circular imports
from .relational import scatterplot
from .regression import regplot, residplot
from .distributions import histplot, kdeplot, _freedman_diaconis_bins
if kwargs.pop("ax", None) is not None:
msg = "Ignoring `ax`; jointplot is a figure-level function."
warnings.warn(msg, UserWarning, stacklevel=2)
# Set up empty default kwarg dicts
joint_kws = {} if joint_kws is None else joint_kws.copy()
joint_kws.update(kwargs)
marginal_kws = {} if marginal_kws is None else marginal_kws.copy()
# Handle deprecations of distplot-specific kwargs
distplot_keys = [
"rug", "fit", "hist_kws", "norm_hist" "hist_kws", "rug_kws",
]
unused_keys = []
for key in distplot_keys:
if key in marginal_kws:
unused_keys.append(key)
marginal_kws.pop(key)
if unused_keys and kind != "kde":
msg = (
"The marginal plotting function has changed to `histplot`,"
" which does not accept the following argument(s): {}."
).format(", ".join(unused_keys))
warnings.warn(msg, UserWarning)
# Validate the plot kind
plot_kinds = ["scatter", "hist", "hex", "kde", "reg", "resid"]
_check_argument("kind", plot_kinds, kind)
# Raise early if using `hue` with a kind that does not support it
if hue is not None and kind in ["hex", "reg", "resid"]:
msg = (
f"Use of `hue` with `kind='{kind}'` is not currently supported."
)
raise ValueError(msg)
# Make a colormap based off the plot color
# (Currently used only for kind="hex")
if color is None:
color = "C0"
color_rgb = mpl.colors.colorConverter.to_rgb(color)
colors = [utils.set_hls_values(color_rgb, l=l) # noqa
for l in np.linspace(1, 0, 12)]
cmap = blend_palette(colors, as_cmap=True)
# Matplotlib's hexbin plot is not na-robust
if kind == "hex":
dropna = True
# Initialize the JointGrid object
grid = JointGrid(
data=data, x=x, y=y, hue=hue,
palette=palette, hue_order=hue_order, hue_norm=hue_norm,
dropna=dropna, height=height, ratio=ratio, space=space,
xlim=xlim, ylim=ylim, marginal_ticks=marginal_ticks,
)
if grid.hue is not None:
marginal_kws.setdefault("legend", False)
# Plot the data using the grid
if kind.startswith("scatter"):
joint_kws.setdefault("color", color)
grid.plot_joint(scatterplot, **joint_kws)
if grid.hue is None:
marg_func = histplot
else:
marg_func = kdeplot
marginal_kws.setdefault("warn_singular", False)
marginal_kws.setdefault("fill", True)
marginal_kws.setdefault("color", color)
grid.plot_marginals(marg_func, **marginal_kws)
elif kind.startswith("hist"):
# TODO process pair parameters for bins, etc. and pass
# to both joint and marginal plots
joint_kws.setdefault("color", color)
grid.plot_joint(histplot, **joint_kws)
marginal_kws.setdefault("kde", False)
marginal_kws.setdefault("color", color)
marg_x_kws = marginal_kws.copy()
marg_y_kws = marginal_kws.copy()
pair_keys = "bins", "binwidth", "binrange"
for key in pair_keys:
if isinstance(joint_kws.get(key), tuple):
x_val, y_val = joint_kws[key]
marg_x_kws.setdefault(key, x_val)
marg_y_kws.setdefault(key, y_val)
histplot(data=data, x=x, hue=hue, **marg_x_kws, ax=grid.ax_marg_x)
histplot(data=data, y=y, hue=hue, **marg_y_kws, ax=grid.ax_marg_y)
elif kind.startswith("kde"):
joint_kws.setdefault("color", color)
joint_kws.setdefault("warn_singular", False)
grid.plot_joint(kdeplot, **joint_kws)
marginal_kws.setdefault("color", color)
if "fill" in joint_kws:
marginal_kws.setdefault("fill", joint_kws["fill"])
grid.plot_marginals(kdeplot, **marginal_kws)
elif kind.startswith("hex"):
x_bins = min(_freedman_diaconis_bins(grid.x), 50)
y_bins = min(_freedman_diaconis_bins(grid.y), 50)
gridsize = int(np.mean([x_bins, y_bins]))
joint_kws.setdefault("gridsize", gridsize)
joint_kws.setdefault("cmap", cmap)
grid.plot_joint(plt.hexbin, **joint_kws)
marginal_kws.setdefault("kde", False)
marginal_kws.setdefault("color", color)
grid.plot_marginals(histplot, **marginal_kws)
elif kind.startswith("reg"):
marginal_kws.setdefault("color", color)
marginal_kws.setdefault("kde", True)
grid.plot_marginals(histplot, **marginal_kws)
joint_kws.setdefault("color", color)
grid.plot_joint(regplot, **joint_kws)
elif kind.startswith("resid"):
joint_kws.setdefault("color", color)
grid.plot_joint(residplot, **joint_kws)
x, y = grid.ax_joint.collections[0].get_offsets().T
marginal_kws.setdefault("color", color)
histplot(x=x, hue=hue, ax=grid.ax_marg_x, **marginal_kws)
histplot(y=y, hue=hue, ax=grid.ax_marg_y, **marginal_kws)
# Make the main axes active in the matplotlib state machine
plt.sca(grid.ax_joint)
return grid
|
https://github.com/mwaskom/seaborn/blob/a47b97e4b98c809db55cbd283de21acba89fe186/project26/seaborn/axisgrid.py#L2179-L2339
| 26 |
[
0,
7,
8,
9,
10,
11,
12,
13,
14,
15,
16,
17,
18,
19,
20,
21,
22,
25,
26,
27,
28,
29,
30,
31,
35,
36,
37,
38,
39,
40,
41,
42,
43,
46,
47,
48,
49,
50,
51,
52,
53,
55,
56,
57,
58,
59,
60,
61,
62,
68,
69,
70,
71,
72,
73,
74,
75,
76,
77,
78,
79,
81,
82,
83,
84,
85,
86,
87,
88,
89,
90,
91,
92,
93,
94,
95,
96,
97,
98,
99,
100,
101,
102,
103,
104,
105,
106,
107,
108,
109,
110,
111,
112,
113,
114,
115,
116,
117,
118,
119,
121,
122,
123,
124,
125,
126,
127,
128,
129,
130,
131,
132,
133,
134,
135,
136,
137,
138,
139,
140,
141,
142,
143,
144,
145,
146,
147,
148,
149,
150,
151,
152,
153,
154,
155,
156,
157,
158,
159,
160
] | 86.956522 |
[
120
] | 0.621118 | false | 96.911197 | 161 | 22 | 99.378882 | 0 |
def jointplot(
data=None, *, x=None, y=None, hue=None, kind="scatter",
height=6, ratio=5, space=.2, dropna=False, xlim=None, ylim=None,
color=None, palette=None, hue_order=None, hue_norm=None, marginal_ticks=False,
joint_kws=None, marginal_kws=None,
**kwargs
):
# Avoid circular imports
from .relational import scatterplot
from .regression import regplot, residplot
from .distributions import histplot, kdeplot, _freedman_diaconis_bins
if kwargs.pop("ax", None) is not None:
msg = "Ignoring `ax`; jointplot is a figure-level function."
warnings.warn(msg, UserWarning, stacklevel=2)
# Set up empty default kwarg dicts
joint_kws = {} if joint_kws is None else joint_kws.copy()
joint_kws.update(kwargs)
marginal_kws = {} if marginal_kws is None else marginal_kws.copy()
# Handle deprecations of distplot-specific kwargs
distplot_keys = [
"rug", "fit", "hist_kws", "norm_hist" "hist_kws", "rug_kws",
]
unused_keys = []
for key in distplot_keys:
if key in marginal_kws:
unused_keys.append(key)
marginal_kws.pop(key)
if unused_keys and kind != "kde":
msg = (
"The marginal plotting function has changed to `histplot`,"
" which does not accept the following argument(s): {}."
).format(", ".join(unused_keys))
warnings.warn(msg, UserWarning)
# Validate the plot kind
plot_kinds = ["scatter", "hist", "hex", "kde", "reg", "resid"]
_check_argument("kind", plot_kinds, kind)
# Raise early if using `hue` with a kind that does not support it
if hue is not None and kind in ["hex", "reg", "resid"]:
msg = (
f"Use of `hue` with `kind='{kind}'` is not currently supported."
)
raise ValueError(msg)
# Make a colormap based off the plot color
# (Currently used only for kind="hex")
if color is None:
color = "C0"
color_rgb = mpl.colors.colorConverter.to_rgb(color)
colors = [utils.set_hls_values(color_rgb, l=l) # noqa
for l in np.linspace(1, 0, 12)]
cmap = blend_palette(colors, as_cmap=True)
# Matplotlib's hexbin plot is not na-robust
if kind == "hex":
dropna = True
# Initialize the JointGrid object
grid = JointGrid(
data=data, x=x, y=y, hue=hue,
palette=palette, hue_order=hue_order, hue_norm=hue_norm,
dropna=dropna, height=height, ratio=ratio, space=space,
xlim=xlim, ylim=ylim, marginal_ticks=marginal_ticks,
)
if grid.hue is not None:
marginal_kws.setdefault("legend", False)
# Plot the data using the grid
if kind.startswith("scatter"):
joint_kws.setdefault("color", color)
grid.plot_joint(scatterplot, **joint_kws)
if grid.hue is None:
marg_func = histplot
else:
marg_func = kdeplot
marginal_kws.setdefault("warn_singular", False)
marginal_kws.setdefault("fill", True)
marginal_kws.setdefault("color", color)
grid.plot_marginals(marg_func, **marginal_kws)
elif kind.startswith("hist"):
# TODO process pair parameters for bins, etc. and pass
# to both joint and marginal plots
joint_kws.setdefault("color", color)
grid.plot_joint(histplot, **joint_kws)
marginal_kws.setdefault("kde", False)
marginal_kws.setdefault("color", color)
marg_x_kws = marginal_kws.copy()
marg_y_kws = marginal_kws.copy()
pair_keys = "bins", "binwidth", "binrange"
for key in pair_keys:
if isinstance(joint_kws.get(key), tuple):
x_val, y_val = joint_kws[key]
marg_x_kws.setdefault(key, x_val)
marg_y_kws.setdefault(key, y_val)
histplot(data=data, x=x, hue=hue, **marg_x_kws, ax=grid.ax_marg_x)
histplot(data=data, y=y, hue=hue, **marg_y_kws, ax=grid.ax_marg_y)
elif kind.startswith("kde"):
joint_kws.setdefault("color", color)
joint_kws.setdefault("warn_singular", False)
grid.plot_joint(kdeplot, **joint_kws)
marginal_kws.setdefault("color", color)
if "fill" in joint_kws:
marginal_kws.setdefault("fill", joint_kws["fill"])
grid.plot_marginals(kdeplot, **marginal_kws)
elif kind.startswith("hex"):
x_bins = min(_freedman_diaconis_bins(grid.x), 50)
y_bins = min(_freedman_diaconis_bins(grid.y), 50)
gridsize = int(np.mean([x_bins, y_bins]))
joint_kws.setdefault("gridsize", gridsize)
joint_kws.setdefault("cmap", cmap)
grid.plot_joint(plt.hexbin, **joint_kws)
marginal_kws.setdefault("kde", False)
marginal_kws.setdefault("color", color)
grid.plot_marginals(histplot, **marginal_kws)
elif kind.startswith("reg"):
marginal_kws.setdefault("color", color)
marginal_kws.setdefault("kde", True)
grid.plot_marginals(histplot, **marginal_kws)
joint_kws.setdefault("color", color)
grid.plot_joint(regplot, **joint_kws)
elif kind.startswith("resid"):
joint_kws.setdefault("color", color)
grid.plot_joint(residplot, **joint_kws)
x, y = grid.ax_joint.collections[0].get_offsets().T
marginal_kws.setdefault("color", color)
histplot(x=x, hue=hue, ax=grid.ax_marg_x, **marginal_kws)
histplot(y=y, hue=hue, ax=grid.ax_marg_y, **marginal_kws)
# Make the main axes active in the matplotlib state machine
plt.sca(grid.ax_joint)
return grid
| 18,954 |
||
mwaskom/seaborn
|
a47b97e4b98c809db55cbd283de21acba89fe186
|
seaborn/axisgrid.py
|
_BaseGrid.set
|
(self, **kwargs)
|
return self
|
Set attributes on each subplot Axes.
|
Set attributes on each subplot Axes.
| 35 | 40 |
def set(self, **kwargs):
"""Set attributes on each subplot Axes."""
for ax in self.axes.flat:
if ax is not None: # Handle removed axes
ax.set(**kwargs)
return self
|
https://github.com/mwaskom/seaborn/blob/a47b97e4b98c809db55cbd283de21acba89fe186/project26/seaborn/axisgrid.py#L35-L40
| 26 |
[
0,
1,
2,
3,
4,
5
] | 100 |
[] | 0 | true | 96.911197 | 6 | 3 | 100 | 1 |
def set(self, **kwargs):
for ax in self.axes.flat:
if ax is not None: # Handle removed axes
ax.set(**kwargs)
return self
| 18,955 |
mwaskom/seaborn
|
a47b97e4b98c809db55cbd283de21acba89fe186
|
seaborn/axisgrid.py
|
_BaseGrid.fig
|
(self)
|
return self._figure
|
DEPRECATED: prefer the `figure` property.
|
DEPRECATED: prefer the `figure` property.
| 43 | 49 |
def fig(self):
"""DEPRECATED: prefer the `figure` property."""
# Grid.figure is preferred because it matches the Axes attribute name.
# But as the maintanace burden on having this property is minimal,
# let's be slow about formally deprecating it. For now just note its deprecation
# in the docstring; add a warning in version 0.13, and eventually remove it.
return self._figure
|
https://github.com/mwaskom/seaborn/blob/a47b97e4b98c809db55cbd283de21acba89fe186/project26/seaborn/axisgrid.py#L43-L49
| 26 |
[
0,
1,
2,
3,
4,
5,
6
] | 100 |
[] | 0 | true | 96.911197 | 7 | 1 | 100 | 1 |
def fig(self):
# Grid.figure is preferred because it matches the Axes attribute name.
# But as the maintanace burden on having this property is minimal,
# let's be slow about formally deprecating it. For now just note its deprecation
# in the docstring; add a warning in version 0.13, and eventually remove it.
return self._figure
| 18,956 |
mwaskom/seaborn
|
a47b97e4b98c809db55cbd283de21acba89fe186
|
seaborn/axisgrid.py
|
_BaseGrid.figure
|
(self)
|
return self._figure
|
Access the :class:`matplotlib.figure.Figure` object underlying the grid.
|
Access the :class:`matplotlib.figure.Figure` object underlying the grid.
| 52 | 54 |
def figure(self):
"""Access the :class:`matplotlib.figure.Figure` object underlying the grid."""
return self._figure
|
https://github.com/mwaskom/seaborn/blob/a47b97e4b98c809db55cbd283de21acba89fe186/project26/seaborn/axisgrid.py#L52-L54
| 26 |
[
0,
1,
2
] | 100 |
[] | 0 | true | 96.911197 | 3 | 1 | 100 | 1 |
def figure(self):
return self._figure
| 18,957 |
mwaskom/seaborn
|
a47b97e4b98c809db55cbd283de21acba89fe186
|
seaborn/axisgrid.py
|
_BaseGrid.apply
|
(self, func, *args, **kwargs)
|
return self
|
Pass the grid to a user-supplied function and return self.
The `func` must accept an object of this type for its first
positional argument. Additional arguments are passed through.
The return value of `func` is ignored; this method returns self.
See the `pipe` method if you want the return value.
Added in v0.12.0.
|
Pass the grid to a user-supplied function and return self.
| 56 | 69 |
def apply(self, func, *args, **kwargs):
"""
Pass the grid to a user-supplied function and return self.
The `func` must accept an object of this type for its first
positional argument. Additional arguments are passed through.
The return value of `func` is ignored; this method returns self.
See the `pipe` method if you want the return value.
Added in v0.12.0.
"""
func(self, *args, **kwargs)
return self
|
https://github.com/mwaskom/seaborn/blob/a47b97e4b98c809db55cbd283de21acba89fe186/project26/seaborn/axisgrid.py#L56-L69
| 26 |
[
0,
1,
2,
3,
4,
5,
6,
7,
8,
9,
10,
11,
12,
13
] | 100 |
[] | 0 | true | 96.911197 | 14 | 1 | 100 | 8 |
def apply(self, func, *args, **kwargs):
func(self, *args, **kwargs)
return self
| 18,958 |
mwaskom/seaborn
|
a47b97e4b98c809db55cbd283de21acba89fe186
|
seaborn/axisgrid.py
|
_BaseGrid.pipe
|
(self, func, *args, **kwargs)
|
return func(self, *args, **kwargs)
|
Pass the grid to a user-supplied function and return its value.
The `func` must accept an object of this type for its first
positional argument. Additional arguments are passed through.
The return value of `func` becomes the return value of this method.
See the `apply` method if you want to return self instead.
Added in v0.12.0.
|
Pass the grid to a user-supplied function and return its value.
| 71 | 83 |
def pipe(self, func, *args, **kwargs):
"""
Pass the grid to a user-supplied function and return its value.
The `func` must accept an object of this type for its first
positional argument. Additional arguments are passed through.
The return value of `func` becomes the return value of this method.
See the `apply` method if you want to return self instead.
Added in v0.12.0.
"""
return func(self, *args, **kwargs)
|
https://github.com/mwaskom/seaborn/blob/a47b97e4b98c809db55cbd283de21acba89fe186/project26/seaborn/axisgrid.py#L71-L83
| 26 |
[
0,
1,
2,
3,
4,
5,
6,
7,
8,
9,
10,
11,
12
] | 100 |
[] | 0 | true | 96.911197 | 13 | 1 | 100 | 8 |
def pipe(self, func, *args, **kwargs):
return func(self, *args, **kwargs)
| 18,959 |
mwaskom/seaborn
|
a47b97e4b98c809db55cbd283de21acba89fe186
|
seaborn/axisgrid.py
|
_BaseGrid.savefig
|
(self, *args, **kwargs)
|
Save an image of the plot.
This wraps :meth:`matplotlib.figure.Figure.savefig`, using bbox_inches="tight"
by default. Parameters are passed through to the matplotlib function.
|
Save an image of the plot.
| 85 | 95 |
def savefig(self, *args, **kwargs):
"""
Save an image of the plot.
This wraps :meth:`matplotlib.figure.Figure.savefig`, using bbox_inches="tight"
by default. Parameters are passed through to the matplotlib function.
"""
kwargs = kwargs.copy()
kwargs.setdefault("bbox_inches", "tight")
self.figure.savefig(*args, **kwargs)
|
https://github.com/mwaskom/seaborn/blob/a47b97e4b98c809db55cbd283de21acba89fe186/project26/seaborn/axisgrid.py#L85-L95
| 26 |
[
0,
1,
2,
3,
4,
5,
6,
7
] | 72.727273 |
[
8,
9,
10
] | 27.272727 | false | 96.911197 | 11 | 1 | 72.727273 | 4 |
def savefig(self, *args, **kwargs):
kwargs = kwargs.copy()
kwargs.setdefault("bbox_inches", "tight")
self.figure.savefig(*args, **kwargs)
| 18,960 |
|
mwaskom/seaborn
|
a47b97e4b98c809db55cbd283de21acba89fe186
|
seaborn/axisgrid.py
|
Grid.__init__
|
(self)
| 103 | 110 |
def __init__(self):
self._tight_layout_rect = [0, 0, 1, 1]
self._tight_layout_pad = None
# This attribute is set externally and is a hack to handle newer functions that
# don't add proxy artists onto the Axes. We need an overall cleaner approach.
self._extract_legend_handles = False
|
https://github.com/mwaskom/seaborn/blob/a47b97e4b98c809db55cbd283de21acba89fe186/project26/seaborn/axisgrid.py#L103-L110
| 26 |
[
0,
1,
2,
3,
4,
5,
6,
7
] | 100 |
[] | 0 | true | 96.911197 | 8 | 1 | 100 | 0 |
def __init__(self):
self._tight_layout_rect = [0, 0, 1, 1]
self._tight_layout_pad = None
# This attribute is set externally and is a hack to handle newer functions that
# don't add proxy artists onto the Axes. We need an overall cleaner approach.
self._extract_legend_handles = False
| 18,961 |
|||
mwaskom/seaborn
|
a47b97e4b98c809db55cbd283de21acba89fe186
|
seaborn/axisgrid.py
|
Grid.tight_layout
|
(self, *args, **kwargs)
|
return self
|
Call fig.tight_layout within rect that exclude the legend.
|
Call fig.tight_layout within rect that exclude the legend.
| 112 | 119 |
def tight_layout(self, *args, **kwargs):
"""Call fig.tight_layout within rect that exclude the legend."""
kwargs = kwargs.copy()
kwargs.setdefault("rect", self._tight_layout_rect)
if self._tight_layout_pad is not None:
kwargs.setdefault("pad", self._tight_layout_pad)
self._figure.tight_layout(*args, **kwargs)
return self
|
https://github.com/mwaskom/seaborn/blob/a47b97e4b98c809db55cbd283de21acba89fe186/project26/seaborn/axisgrid.py#L112-L119
| 26 |
[
0,
1,
2,
3,
4,
5,
6,
7
] | 100 |
[] | 0 | true | 96.911197 | 8 | 2 | 100 | 1 |
def tight_layout(self, *args, **kwargs):
kwargs = kwargs.copy()
kwargs.setdefault("rect", self._tight_layout_rect)
if self._tight_layout_pad is not None:
kwargs.setdefault("pad", self._tight_layout_pad)
self._figure.tight_layout(*args, **kwargs)
return self
| 18,962 |
mwaskom/seaborn
|
a47b97e4b98c809db55cbd283de21acba89fe186
|
seaborn/axisgrid.py
|
Grid.add_legend
|
(self, legend_data=None, title=None, label_order=None,
adjust_subtitles=False, **kwargs)
|
return self
|
Draw a legend, maybe placing it outside axes and resizing the figure.
Parameters
----------
legend_data : dict
Dictionary mapping label names (or two-element tuples where the
second element is a label name) to matplotlib artist handles. The
default reads from ``self._legend_data``.
title : string
Title for the legend. The default reads from ``self._hue_var``.
label_order : list of labels
The order that the legend entries should appear in. The default
reads from ``self.hue_names``.
adjust_subtitles : bool
If True, modify entries with invisible artists to left-align
the labels and set the font size to that of a title.
kwargs : key, value pairings
Other keyword arguments are passed to the underlying legend methods
on the Figure or Axes object.
Returns
-------
self : Grid instance
Returns self for easy chaining.
|
Draw a legend, maybe placing it outside axes and resizing the figure.
| 121 | 223 |
def add_legend(self, legend_data=None, title=None, label_order=None,
adjust_subtitles=False, **kwargs):
"""Draw a legend, maybe placing it outside axes and resizing the figure.
Parameters
----------
legend_data : dict
Dictionary mapping label names (or two-element tuples where the
second element is a label name) to matplotlib artist handles. The
default reads from ``self._legend_data``.
title : string
Title for the legend. The default reads from ``self._hue_var``.
label_order : list of labels
The order that the legend entries should appear in. The default
reads from ``self.hue_names``.
adjust_subtitles : bool
If True, modify entries with invisible artists to left-align
the labels and set the font size to that of a title.
kwargs : key, value pairings
Other keyword arguments are passed to the underlying legend methods
on the Figure or Axes object.
Returns
-------
self : Grid instance
Returns self for easy chaining.
"""
# Find the data for the legend
if legend_data is None:
legend_data = self._legend_data
if label_order is None:
if self.hue_names is None:
label_order = list(legend_data.keys())
else:
label_order = list(map(utils.to_utf8, self.hue_names))
blank_handle = mpl.patches.Patch(alpha=0, linewidth=0)
handles = [legend_data.get(l, blank_handle) for l in label_order]
title = self._hue_var if title is None else title
title_size = mpl.rcParams["legend.title_fontsize"]
# Unpack nested labels from a hierarchical legend
labels = []
for entry in label_order:
if isinstance(entry, tuple):
_, label = entry
else:
label = entry
labels.append(label)
# Set default legend kwargs
kwargs.setdefault("scatterpoints", 1)
if self._legend_out:
kwargs.setdefault("frameon", False)
kwargs.setdefault("loc", "center right")
# Draw a full-figure legend outside the grid
figlegend = self._figure.legend(handles, labels, **kwargs)
self._legend = figlegend
figlegend.set_title(title, prop={"size": title_size})
if adjust_subtitles:
adjust_legend_subtitles(figlegend)
# Draw the plot to set the bounding boxes correctly
_draw_figure(self._figure)
# Calculate and set the new width of the figure so the legend fits
legend_width = figlegend.get_window_extent().width / self._figure.dpi
fig_width, fig_height = self._figure.get_size_inches()
self._figure.set_size_inches(fig_width + legend_width, fig_height)
# Draw the plot again to get the new transformations
_draw_figure(self._figure)
# Now calculate how much space we need on the right side
legend_width = figlegend.get_window_extent().width / self._figure.dpi
space_needed = legend_width / (fig_width + legend_width)
margin = .04 if self._margin_titles else .01
self._space_needed = margin + space_needed
right = 1 - self._space_needed
# Place the subplot axes to give space for the legend
self._figure.subplots_adjust(right=right)
self._tight_layout_rect[2] = right
else:
# Draw a legend in the first axis
ax = self.axes.flat[0]
kwargs.setdefault("loc", "best")
leg = ax.legend(handles, labels, **kwargs)
leg.set_title(title, prop={"size": title_size})
self._legend = leg
if adjust_subtitles:
adjust_legend_subtitles(leg)
return self
|
https://github.com/mwaskom/seaborn/blob/a47b97e4b98c809db55cbd283de21acba89fe186/project26/seaborn/axisgrid.py#L121-L223
| 26 |
[
0,
28,
29,
30,
31,
32,
33,
35,
36,
37,
38,
39,
40,
41,
42,
43,
44,
45,
46,
48,
49,
50,
51,
52,
53,
54,
55,
56,
57,
58,
59,
60,
61,
62,
63,
64,
65,
66,
67,
68,
69,
70,
71,
72,
73,
74,
75,
76,
77,
78,
79,
80,
81,
82,
83,
84,
85,
86,
87,
88,
89,
91,
92,
93,
94,
95,
96,
97,
98,
99,
100,
101,
102
] | 70.873786 |
[] | 0 | false | 96.911197 | 103 | 10 | 100 | 24 |
def add_legend(self, legend_data=None, title=None, label_order=None,
adjust_subtitles=False, **kwargs):
# Find the data for the legend
if legend_data is None:
legend_data = self._legend_data
if label_order is None:
if self.hue_names is None:
label_order = list(legend_data.keys())
else:
label_order = list(map(utils.to_utf8, self.hue_names))
blank_handle = mpl.patches.Patch(alpha=0, linewidth=0)
handles = [legend_data.get(l, blank_handle) for l in label_order]
title = self._hue_var if title is None else title
title_size = mpl.rcParams["legend.title_fontsize"]
# Unpack nested labels from a hierarchical legend
labels = []
for entry in label_order:
if isinstance(entry, tuple):
_, label = entry
else:
label = entry
labels.append(label)
# Set default legend kwargs
kwargs.setdefault("scatterpoints", 1)
if self._legend_out:
kwargs.setdefault("frameon", False)
kwargs.setdefault("loc", "center right")
# Draw a full-figure legend outside the grid
figlegend = self._figure.legend(handles, labels, **kwargs)
self._legend = figlegend
figlegend.set_title(title, prop={"size": title_size})
if adjust_subtitles:
adjust_legend_subtitles(figlegend)
# Draw the plot to set the bounding boxes correctly
_draw_figure(self._figure)
# Calculate and set the new width of the figure so the legend fits
legend_width = figlegend.get_window_extent().width / self._figure.dpi
fig_width, fig_height = self._figure.get_size_inches()
self._figure.set_size_inches(fig_width + legend_width, fig_height)
# Draw the plot again to get the new transformations
_draw_figure(self._figure)
# Now calculate how much space we need on the right side
legend_width = figlegend.get_window_extent().width / self._figure.dpi
space_needed = legend_width / (fig_width + legend_width)
margin = .04 if self._margin_titles else .01
self._space_needed = margin + space_needed
right = 1 - self._space_needed
# Place the subplot axes to give space for the legend
self._figure.subplots_adjust(right=right)
self._tight_layout_rect[2] = right
else:
# Draw a legend in the first axis
ax = self.axes.flat[0]
kwargs.setdefault("loc", "best")
leg = ax.legend(handles, labels, **kwargs)
leg.set_title(title, prop={"size": title_size})
self._legend = leg
if adjust_subtitles:
adjust_legend_subtitles(leg)
return self
| 18,963 |
mwaskom/seaborn
|
a47b97e4b98c809db55cbd283de21acba89fe186
|
seaborn/axisgrid.py
|
Grid._update_legend_data
|
(self, ax)
|
Extract the legend data from an axes object and save it.
|
Extract the legend data from an axes object and save it.
| 225 | 242 |
def _update_legend_data(self, ax):
"""Extract the legend data from an axes object and save it."""
data = {}
# Get data directly from the legend, which is necessary
# for newer functions that don't add labeled proxy artists
if ax.legend_ is not None and self._extract_legend_handles:
handles = ax.legend_.legendHandles
labels = [t.get_text() for t in ax.legend_.texts]
data.update({l: h for h, l in zip(handles, labels)})
handles, labels = ax.get_legend_handles_labels()
data.update({l: h for h, l in zip(handles, labels)})
self._legend_data.update(data)
# Now clear the legend
ax.legend_ = None
|
https://github.com/mwaskom/seaborn/blob/a47b97e4b98c809db55cbd283de21acba89fe186/project26/seaborn/axisgrid.py#L225-L242
| 26 |
[
0,
1,
2,
3,
4,
5,
6,
7,
8,
9,
10,
11,
12,
13,
14,
15,
16,
17
] | 100 |
[] | 0 | true | 96.911197 | 18 | 4 | 100 | 1 |
def _update_legend_data(self, ax):
data = {}
# Get data directly from the legend, which is necessary
# for newer functions that don't add labeled proxy artists
if ax.legend_ is not None and self._extract_legend_handles:
handles = ax.legend_.legendHandles
labels = [t.get_text() for t in ax.legend_.texts]
data.update({l: h for h, l in zip(handles, labels)})
handles, labels = ax.get_legend_handles_labels()
data.update({l: h for h, l in zip(handles, labels)})
self._legend_data.update(data)
# Now clear the legend
ax.legend_ = None
| 18,964 |
|
mwaskom/seaborn
|
a47b97e4b98c809db55cbd283de21acba89fe186
|
seaborn/axisgrid.py
|
Grid._get_palette
|
(self, data, hue, hue_order, palette)
|
return palette
|
Get a list of colors for the hue variable.
|
Get a list of colors for the hue variable.
| 244 | 272 |
def _get_palette(self, data, hue, hue_order, palette):
"""Get a list of colors for the hue variable."""
if hue is None:
palette = color_palette(n_colors=1)
else:
hue_names = categorical_order(data[hue], hue_order)
n_colors = len(hue_names)
# By default use either the current color palette or HUSL
if palette is None:
current_palette = utils.get_color_cycle()
if n_colors > len(current_palette):
colors = color_palette("husl", n_colors)
else:
colors = color_palette(n_colors=n_colors)
# Allow for palette to map from hue variable names
elif isinstance(palette, dict):
color_names = [palette[h] for h in hue_names]
colors = color_palette(color_names, n_colors)
# Otherwise act as if we just got a list of colors
else:
colors = color_palette(palette, n_colors)
palette = color_palette(colors, n_colors)
return palette
|
https://github.com/mwaskom/seaborn/blob/a47b97e4b98c809db55cbd283de21acba89fe186/project26/seaborn/axisgrid.py#L244-L272
| 26 |
[
0,
1,
2,
3,
4,
5,
6,
7,
8,
9,
10,
11,
12,
13,
14,
15,
16,
17,
18,
19,
20,
21,
22,
23,
24,
25,
26,
27,
28
] | 100 |
[] | 0 | true | 96.911197 | 29 | 6 | 100 | 1 |
def _get_palette(self, data, hue, hue_order, palette):
if hue is None:
palette = color_palette(n_colors=1)
else:
hue_names = categorical_order(data[hue], hue_order)
n_colors = len(hue_names)
# By default use either the current color palette or HUSL
if palette is None:
current_palette = utils.get_color_cycle()
if n_colors > len(current_palette):
colors = color_palette("husl", n_colors)
else:
colors = color_palette(n_colors=n_colors)
# Allow for palette to map from hue variable names
elif isinstance(palette, dict):
color_names = [palette[h] for h in hue_names]
colors = color_palette(color_names, n_colors)
# Otherwise act as if we just got a list of colors
else:
colors = color_palette(palette, n_colors)
palette = color_palette(colors, n_colors)
return palette
| 18,965 |
mwaskom/seaborn
|
a47b97e4b98c809db55cbd283de21acba89fe186
|
seaborn/axisgrid.py
|
Grid.legend
|
(self)
|
The :class:`matplotlib.legend.Legend` object, if present.
|
The :class:`matplotlib.legend.Legend` object, if present.
| 275 | 280 |
def legend(self):
"""The :class:`matplotlib.legend.Legend` object, if present."""
try:
return self._legend
except AttributeError:
return None
|
https://github.com/mwaskom/seaborn/blob/a47b97e4b98c809db55cbd283de21acba89fe186/project26/seaborn/axisgrid.py#L275-L280
| 26 |
[
0,
1,
2,
3,
4,
5
] | 100 |
[] | 0 | true | 96.911197 | 6 | 2 | 100 | 1 |
def legend(self):
try:
return self._legend
except AttributeError:
return None
| 18,966 |
|
mwaskom/seaborn
|
a47b97e4b98c809db55cbd283de21acba89fe186
|
seaborn/axisgrid.py
|
Grid.tick_params
|
(self, axis='both', **kwargs)
|
return self
|
Modify the ticks, tick labels, and gridlines.
Parameters
----------
axis : {'x', 'y', 'both'}
The axis on which to apply the formatting.
kwargs : keyword arguments
Additional keyword arguments to pass to
:meth:`matplotlib.axes.Axes.tick_params`.
Returns
-------
self : Grid instance
Returns self for easy chaining.
|
Modify the ticks, tick labels, and gridlines.
| 282 | 301 |
def tick_params(self, axis='both', **kwargs):
"""Modify the ticks, tick labels, and gridlines.
Parameters
----------
axis : {'x', 'y', 'both'}
The axis on which to apply the formatting.
kwargs : keyword arguments
Additional keyword arguments to pass to
:meth:`matplotlib.axes.Axes.tick_params`.
Returns
-------
self : Grid instance
Returns self for easy chaining.
"""
for ax in self.figure.axes:
ax.tick_params(axis=axis, **kwargs)
return self
|
https://github.com/mwaskom/seaborn/blob/a47b97e4b98c809db55cbd283de21acba89fe186/project26/seaborn/axisgrid.py#L282-L301
| 26 |
[
0,
1,
2,
3,
4,
5,
6,
7,
8,
9,
10,
11,
12,
13,
14,
15,
16,
17,
18,
19
] | 100 |
[] | 0 | true | 96.911197 | 20 | 2 | 100 | 14 |
def tick_params(self, axis='both', **kwargs):
for ax in self.figure.axes:
ax.tick_params(axis=axis, **kwargs)
return self
| 18,967 |
mwaskom/seaborn
|
a47b97e4b98c809db55cbd283de21acba89fe186
|
seaborn/axisgrid.py
|
FacetGrid.__init__
|
(
self, data, *,
row=None, col=None, hue=None, col_wrap=None,
sharex=True, sharey=True, height=3, aspect=1, palette=None,
row_order=None, col_order=None, hue_order=None, hue_kws=None,
dropna=False, legend_out=True, despine=True,
margin_titles=False, xlim=None, ylim=None, subplot_kws=None,
gridspec_kws=None,
)
| 366 | 543 |
def __init__(
self, data, *,
row=None, col=None, hue=None, col_wrap=None,
sharex=True, sharey=True, height=3, aspect=1, palette=None,
row_order=None, col_order=None, hue_order=None, hue_kws=None,
dropna=False, legend_out=True, despine=True,
margin_titles=False, xlim=None, ylim=None, subplot_kws=None,
gridspec_kws=None,
):
super().__init__()
# Determine the hue facet layer information
hue_var = hue
if hue is None:
hue_names = None
else:
hue_names = categorical_order(data[hue], hue_order)
colors = self._get_palette(data, hue, hue_order, palette)
# Set up the lists of names for the row and column facet variables
if row is None:
row_names = []
else:
row_names = categorical_order(data[row], row_order)
if col is None:
col_names = []
else:
col_names = categorical_order(data[col], col_order)
# Additional dict of kwarg -> list of values for mapping the hue var
hue_kws = hue_kws if hue_kws is not None else {}
# Make a boolean mask that is True anywhere there is an NA
# value in one of the faceting variables, but only if dropna is True
none_na = np.zeros(len(data), bool)
if dropna:
row_na = none_na if row is None else data[row].isnull()
col_na = none_na if col is None else data[col].isnull()
hue_na = none_na if hue is None else data[hue].isnull()
not_na = ~(row_na | col_na | hue_na)
else:
not_na = ~none_na
# Compute the grid shape
ncol = 1 if col is None else len(col_names)
nrow = 1 if row is None else len(row_names)
self._n_facets = ncol * nrow
self._col_wrap = col_wrap
if col_wrap is not None:
if row is not None:
err = "Cannot use `row` and `col_wrap` together."
raise ValueError(err)
ncol = col_wrap
nrow = int(np.ceil(len(col_names) / col_wrap))
self._ncol = ncol
self._nrow = nrow
# Calculate the base figure size
# This can get stretched later by a legend
# TODO this doesn't account for axis labels
figsize = (ncol * height * aspect, nrow * height)
# Validate some inputs
if col_wrap is not None:
margin_titles = False
# Build the subplot keyword dictionary
subplot_kws = {} if subplot_kws is None else subplot_kws.copy()
gridspec_kws = {} if gridspec_kws is None else gridspec_kws.copy()
if xlim is not None:
subplot_kws["xlim"] = xlim
if ylim is not None:
subplot_kws["ylim"] = ylim
# --- Initialize the subplot grid
with _disable_autolayout():
fig = plt.figure(figsize=figsize)
if col_wrap is None:
kwargs = dict(squeeze=False,
sharex=sharex, sharey=sharey,
subplot_kw=subplot_kws,
gridspec_kw=gridspec_kws)
axes = fig.subplots(nrow, ncol, **kwargs)
if col is None and row is None:
axes_dict = {}
elif col is None:
axes_dict = dict(zip(row_names, axes.flat))
elif row is None:
axes_dict = dict(zip(col_names, axes.flat))
else:
facet_product = product(row_names, col_names)
axes_dict = dict(zip(facet_product, axes.flat))
else:
# If wrapping the col variable we need to make the grid ourselves
if gridspec_kws:
warnings.warn("`gridspec_kws` ignored when using `col_wrap`")
n_axes = len(col_names)
axes = np.empty(n_axes, object)
axes[0] = fig.add_subplot(nrow, ncol, 1, **subplot_kws)
if sharex:
subplot_kws["sharex"] = axes[0]
if sharey:
subplot_kws["sharey"] = axes[0]
for i in range(1, n_axes):
axes[i] = fig.add_subplot(nrow, ncol, i + 1, **subplot_kws)
axes_dict = dict(zip(col_names, axes))
# --- Set up the class attributes
# Attributes that are part of the public API but accessed through
# a property so that Sphinx adds them to the auto class doc
self._figure = fig
self._axes = axes
self._axes_dict = axes_dict
self._legend = None
# Public attributes that aren't explicitly documented
# (It's not obvious that having them be public was a good idea)
self.data = data
self.row_names = row_names
self.col_names = col_names
self.hue_names = hue_names
self.hue_kws = hue_kws
# Next the private variables
self._nrow = nrow
self._row_var = row
self._ncol = ncol
self._col_var = col
self._margin_titles = margin_titles
self._margin_titles_texts = []
self._col_wrap = col_wrap
self._hue_var = hue_var
self._colors = colors
self._legend_out = legend_out
self._legend_data = {}
self._x_var = None
self._y_var = None
self._sharex = sharex
self._sharey = sharey
self._dropna = dropna
self._not_na = not_na
# --- Make the axes look good
self.set_titles()
self.tight_layout()
if despine:
self.despine()
if sharex in [True, 'col']:
for ax in self._not_bottom_axes:
for label in ax.get_xticklabels():
label.set_visible(False)
ax.xaxis.offsetText.set_visible(False)
ax.xaxis.label.set_visible(False)
if sharey in [True, 'row']:
for ax in self._not_left_axes:
for label in ax.get_yticklabels():
label.set_visible(False)
ax.yaxis.offsetText.set_visible(False)
ax.yaxis.label.set_visible(False)
|
https://github.com/mwaskom/seaborn/blob/a47b97e4b98c809db55cbd283de21acba89fe186/project26/seaborn/axisgrid.py#L366-L543
| 26 |
[
0,
9,
10,
11,
12,
13,
14,
15,
17,
18,
19,
20,
21,
22,
23,
25,
26,
27,
28,
30,
31,
32,
33,
34,
35,
36,
37,
38,
39,
40,
41,
42,
44,
45,
46,
47,
48,
49,
50,
51,
52,
53,
54,
55,
56,
57,
58,
59,
60,
61,
62,
63,
64,
65,
66,
67,
68,
69,
70,
71,
72,
73,
74,
75,
76,
77,
78,
79,
80,
81,
82,
83,
84,
85,
89,
90,
91,
92,
93,
94,
95,
96,
97,
99,
100,
101,
104,
105,
106,
107,
108,
109,
110,
111,
112,
113,
114,
115,
116,
117,
118,
119,
120,
121,
122,
123,
124,
125,
126,
127,
128,
129,
130,
131,
132,
133,
134,
135,
136,
137,
138,
139,
140,
141,
142,
143,
144,
145,
146,
147,
148,
149,
150,
151,
152,
153,
154,
155,
156,
157,
158,
159,
160,
161,
162,
163,
164,
165,
166,
167,
168,
169,
170,
171,
172,
173,
174,
175,
176,
177
] | 89.88764 |
[] | 0 | false | 96.911197 | 178 | 27 | 100 | 0 |
def __init__(
self, data, *,
row=None, col=None, hue=None, col_wrap=None,
sharex=True, sharey=True, height=3, aspect=1, palette=None,
row_order=None, col_order=None, hue_order=None, hue_kws=None,
dropna=False, legend_out=True, despine=True,
margin_titles=False, xlim=None, ylim=None, subplot_kws=None,
gridspec_kws=None,
):
super().__init__()
# Determine the hue facet layer information
hue_var = hue
if hue is None:
hue_names = None
else:
hue_names = categorical_order(data[hue], hue_order)
colors = self._get_palette(data, hue, hue_order, palette)
# Set up the lists of names for the row and column facet variables
if row is None:
row_names = []
else:
row_names = categorical_order(data[row], row_order)
if col is None:
col_names = []
else:
col_names = categorical_order(data[col], col_order)
# Additional dict of kwarg -> list of values for mapping the hue var
hue_kws = hue_kws if hue_kws is not None else {}
# Make a boolean mask that is True anywhere there is an NA
# value in one of the faceting variables, but only if dropna is True
none_na = np.zeros(len(data), bool)
if dropna:
row_na = none_na if row is None else data[row].isnull()
col_na = none_na if col is None else data[col].isnull()
hue_na = none_na if hue is None else data[hue].isnull()
not_na = ~(row_na | col_na | hue_na)
else:
not_na = ~none_na
# Compute the grid shape
ncol = 1 if col is None else len(col_names)
nrow = 1 if row is None else len(row_names)
self._n_facets = ncol * nrow
self._col_wrap = col_wrap
if col_wrap is not None:
if row is not None:
err = "Cannot use `row` and `col_wrap` together."
raise ValueError(err)
ncol = col_wrap
nrow = int(np.ceil(len(col_names) / col_wrap))
self._ncol = ncol
self._nrow = nrow
# Calculate the base figure size
# This can get stretched later by a legend
# TODO this doesn't account for axis labels
figsize = (ncol * height * aspect, nrow * height)
# Validate some inputs
if col_wrap is not None:
margin_titles = False
# Build the subplot keyword dictionary
subplot_kws = {} if subplot_kws is None else subplot_kws.copy()
gridspec_kws = {} if gridspec_kws is None else gridspec_kws.copy()
if xlim is not None:
subplot_kws["xlim"] = xlim
if ylim is not None:
subplot_kws["ylim"] = ylim
# --- Initialize the subplot grid
with _disable_autolayout():
fig = plt.figure(figsize=figsize)
if col_wrap is None:
kwargs = dict(squeeze=False,
sharex=sharex, sharey=sharey,
subplot_kw=subplot_kws,
gridspec_kw=gridspec_kws)
axes = fig.subplots(nrow, ncol, **kwargs)
if col is None and row is None:
axes_dict = {}
elif col is None:
axes_dict = dict(zip(row_names, axes.flat))
elif row is None:
axes_dict = dict(zip(col_names, axes.flat))
else:
facet_product = product(row_names, col_names)
axes_dict = dict(zip(facet_product, axes.flat))
else:
# If wrapping the col variable we need to make the grid ourselves
if gridspec_kws:
warnings.warn("`gridspec_kws` ignored when using `col_wrap`")
n_axes = len(col_names)
axes = np.empty(n_axes, object)
axes[0] = fig.add_subplot(nrow, ncol, 1, **subplot_kws)
if sharex:
subplot_kws["sharex"] = axes[0]
if sharey:
subplot_kws["sharey"] = axes[0]
for i in range(1, n_axes):
axes[i] = fig.add_subplot(nrow, ncol, i + 1, **subplot_kws)
axes_dict = dict(zip(col_names, axes))
# --- Set up the class attributes
# Attributes that are part of the public API but accessed through
# a property so that Sphinx adds them to the auto class doc
self._figure = fig
self._axes = axes
self._axes_dict = axes_dict
self._legend = None
# Public attributes that aren't explicitly documented
# (It's not obvious that having them be public was a good idea)
self.data = data
self.row_names = row_names
self.col_names = col_names
self.hue_names = hue_names
self.hue_kws = hue_kws
# Next the private variables
self._nrow = nrow
self._row_var = row
self._ncol = ncol
self._col_var = col
self._margin_titles = margin_titles
self._margin_titles_texts = []
self._col_wrap = col_wrap
self._hue_var = hue_var
self._colors = colors
self._legend_out = legend_out
self._legend_data = {}
self._x_var = None
self._y_var = None
self._sharex = sharex
self._sharey = sharey
self._dropna = dropna
self._not_na = not_na
# --- Make the axes look good
self.set_titles()
self.tight_layout()
if despine:
self.despine()
if sharex in [True, 'col']:
for ax in self._not_bottom_axes:
for label in ax.get_xticklabels():
label.set_visible(False)
ax.xaxis.offsetText.set_visible(False)
ax.xaxis.label.set_visible(False)
if sharey in [True, 'row']:
for ax in self._not_left_axes:
for label in ax.get_yticklabels():
label.set_visible(False)
ax.yaxis.offsetText.set_visible(False)
ax.yaxis.label.set_visible(False)
| 18,968 |
|||
mwaskom/seaborn
|
a47b97e4b98c809db55cbd283de21acba89fe186
|
seaborn/axisgrid.py
|
FacetGrid.facet_data
|
(self)
|
Generator for name indices and data subsets for each facet.
Yields
------
(i, j, k), data_ijk : tuple of ints, DataFrame
The ints provide an index into the {row, col, hue}_names attribute,
and the dataframe contains a subset of the full data corresponding
to each facet. The generator yields subsets that correspond with
the self.axes.flat iterator, or self.axes[i, j] when `col_wrap`
is None.
|
Generator for name indices and data subsets for each facet.
| 637 | 675 |
def facet_data(self):
"""Generator for name indices and data subsets for each facet.
Yields
------
(i, j, k), data_ijk : tuple of ints, DataFrame
The ints provide an index into the {row, col, hue}_names attribute,
and the dataframe contains a subset of the full data corresponding
to each facet. The generator yields subsets that correspond with
the self.axes.flat iterator, or self.axes[i, j] when `col_wrap`
is None.
"""
data = self.data
# Construct masks for the row variable
if self.row_names:
row_masks = [data[self._row_var] == n for n in self.row_names]
else:
row_masks = [np.repeat(True, len(self.data))]
# Construct masks for the column variable
if self.col_names:
col_masks = [data[self._col_var] == n for n in self.col_names]
else:
col_masks = [np.repeat(True, len(self.data))]
# Construct masks for the hue variable
if self.hue_names:
hue_masks = [data[self._hue_var] == n for n in self.hue_names]
else:
hue_masks = [np.repeat(True, len(self.data))]
# Here is the main generator loop
for (i, row), (j, col), (k, hue) in product(enumerate(row_masks),
enumerate(col_masks),
enumerate(hue_masks)):
data_ijk = data[row & col & hue & self._not_na]
yield (i, j, k), data_ijk
|
https://github.com/mwaskom/seaborn/blob/a47b97e4b98c809db55cbd283de21acba89fe186/project26/seaborn/axisgrid.py#L637-L675
| 26 |
[
0,
1,
2,
3,
4,
5,
6,
7,
8,
9,
10,
11,
12,
13,
14,
15,
16,
17,
18,
19,
20,
21,
22,
23,
24,
25,
26,
27,
28,
29,
30,
31,
32,
33,
34,
35,
36,
37,
38
] | 100 |
[] | 0 | true | 96.911197 | 39 | 8 | 100 | 10 |
def facet_data(self):
data = self.data
# Construct masks for the row variable
if self.row_names:
row_masks = [data[self._row_var] == n for n in self.row_names]
else:
row_masks = [np.repeat(True, len(self.data))]
# Construct masks for the column variable
if self.col_names:
col_masks = [data[self._col_var] == n for n in self.col_names]
else:
col_masks = [np.repeat(True, len(self.data))]
# Construct masks for the hue variable
if self.hue_names:
hue_masks = [data[self._hue_var] == n for n in self.hue_names]
else:
hue_masks = [np.repeat(True, len(self.data))]
# Here is the main generator loop
for (i, row), (j, col), (k, hue) in product(enumerate(row_masks),
enumerate(col_masks),
enumerate(hue_masks)):
data_ijk = data[row & col & hue & self._not_na]
yield (i, j, k), data_ijk
| 18,969 |
|
mwaskom/seaborn
|
a47b97e4b98c809db55cbd283de21acba89fe186
|
seaborn/axisgrid.py
|
FacetGrid.map
|
(self, func, *args, **kwargs)
|
return self
|
Apply a plotting function to each facet's subset of the data.
Parameters
----------
func : callable
A plotting function that takes data and keyword arguments. It
must plot to the currently active matplotlib Axes and take a
`color` keyword argument. If faceting on the `hue` dimension,
it must also take a `label` keyword argument.
args : strings
Column names in self.data that identify variables with data to
plot. The data for each variable is passed to `func` in the
order the variables are specified in the call.
kwargs : keyword arguments
All keyword arguments are passed to the plotting function.
Returns
-------
self : object
Returns self.
|
Apply a plotting function to each facet's subset of the data.
| 677 | 757 |
def map(self, func, *args, **kwargs):
"""Apply a plotting function to each facet's subset of the data.
Parameters
----------
func : callable
A plotting function that takes data and keyword arguments. It
must plot to the currently active matplotlib Axes and take a
`color` keyword argument. If faceting on the `hue` dimension,
it must also take a `label` keyword argument.
args : strings
Column names in self.data that identify variables with data to
plot. The data for each variable is passed to `func` in the
order the variables are specified in the call.
kwargs : keyword arguments
All keyword arguments are passed to the plotting function.
Returns
-------
self : object
Returns self.
"""
# If color was a keyword argument, grab it here
kw_color = kwargs.pop("color", None)
# How we use the function depends on where it comes from
func_module = str(getattr(func, "__module__", ""))
# Check for categorical plots without order information
if func_module == "seaborn.categorical":
if "order" not in kwargs:
warning = ("Using the {} function without specifying "
"`order` is likely to produce an incorrect "
"plot.".format(func.__name__))
warnings.warn(warning)
if len(args) == 3 and "hue_order" not in kwargs:
warning = ("Using the {} function without specifying "
"`hue_order` is likely to produce an incorrect "
"plot.".format(func.__name__))
warnings.warn(warning)
# Iterate over the data subsets
for (row_i, col_j, hue_k), data_ijk in self.facet_data():
# If this subset is null, move on
if not data_ijk.values.size:
continue
# Get the current axis
modify_state = not func_module.startswith("seaborn")
ax = self.facet_axis(row_i, col_j, modify_state)
# Decide what color to plot with
kwargs["color"] = self._facet_color(hue_k, kw_color)
# Insert the other hue aesthetics if appropriate
for kw, val_list in self.hue_kws.items():
kwargs[kw] = val_list[hue_k]
# Insert a label in the keyword arguments for the legend
if self._hue_var is not None:
kwargs["label"] = utils.to_utf8(self.hue_names[hue_k])
# Get the actual data we are going to plot with
plot_data = data_ijk[list(args)]
if self._dropna:
plot_data = plot_data.dropna()
plot_args = [v for k, v in plot_data.items()]
# Some matplotlib functions don't handle pandas objects correctly
if func_module.startswith("matplotlib"):
plot_args = [v.values for v in plot_args]
# Draw the plot
self._facet_plot(func, ax, plot_args, kwargs)
# Finalize the annotations and layout
self._finalize_grid(args[:2])
return self
|
https://github.com/mwaskom/seaborn/blob/a47b97e4b98c809db55cbd283de21acba89fe186/project26/seaborn/axisgrid.py#L677-L757
| 26 |
[
0,
1,
2,
3,
4,
5,
6,
7,
8,
9,
10,
11,
12,
13,
14,
15,
16,
17,
18,
19,
20,
21,
22,
23,
24,
25,
26,
27,
28,
29,
30,
31,
32,
33,
34,
35,
36,
42,
43,
44,
45,
46,
47,
48,
49,
50,
51,
52,
53,
54,
55,
56,
57,
58,
59,
60,
61,
62,
63,
64,
65,
66,
68,
69,
70,
71,
72,
73,
74,
75,
76,
77,
78,
79,
80
] | 92.592593 |
[
37,
40,
67
] | 3.703704 | false | 96.911197 | 81 | 13 | 96.296296 | 20 |
def map(self, func, *args, **kwargs):
# If color was a keyword argument, grab it here
kw_color = kwargs.pop("color", None)
# How we use the function depends on where it comes from
func_module = str(getattr(func, "__module__", ""))
# Check for categorical plots without order information
if func_module == "seaborn.categorical":
if "order" not in kwargs:
warning = ("Using the {} function without specifying "
"`order` is likely to produce an incorrect "
"plot.".format(func.__name__))
warnings.warn(warning)
if len(args) == 3 and "hue_order" not in kwargs:
warning = ("Using the {} function without specifying "
"`hue_order` is likely to produce an incorrect "
"plot.".format(func.__name__))
warnings.warn(warning)
# Iterate over the data subsets
for (row_i, col_j, hue_k), data_ijk in self.facet_data():
# If this subset is null, move on
if not data_ijk.values.size:
continue
# Get the current axis
modify_state = not func_module.startswith("seaborn")
ax = self.facet_axis(row_i, col_j, modify_state)
# Decide what color to plot with
kwargs["color"] = self._facet_color(hue_k, kw_color)
# Insert the other hue aesthetics if appropriate
for kw, val_list in self.hue_kws.items():
kwargs[kw] = val_list[hue_k]
# Insert a label in the keyword arguments for the legend
if self._hue_var is not None:
kwargs["label"] = utils.to_utf8(self.hue_names[hue_k])
# Get the actual data we are going to plot with
plot_data = data_ijk[list(args)]
if self._dropna:
plot_data = plot_data.dropna()
plot_args = [v for k, v in plot_data.items()]
# Some matplotlib functions don't handle pandas objects correctly
if func_module.startswith("matplotlib"):
plot_args = [v.values for v in plot_args]
# Draw the plot
self._facet_plot(func, ax, plot_args, kwargs)
# Finalize the annotations and layout
self._finalize_grid(args[:2])
return self
| 18,970 |
mwaskom/seaborn
|
a47b97e4b98c809db55cbd283de21acba89fe186
|
seaborn/axisgrid.py
|
FacetGrid.map_dataframe
|
(self, func, *args, **kwargs)
|
return self
|
Like ``.map`` but passes args as strings and inserts data in kwargs.
This method is suitable for plotting with functions that accept a
long-form DataFrame as a `data` keyword argument and access the
data in that DataFrame using string variable names.
Parameters
----------
func : callable
A plotting function that takes data and keyword arguments. Unlike
the `map` method, a function used here must "understand" Pandas
objects. It also must plot to the currently active matplotlib Axes
and take a `color` keyword argument. If faceting on the `hue`
dimension, it must also take a `label` keyword argument.
args : strings
Column names in self.data that identify variables with data to
plot. The data for each variable is passed to `func` in the
order the variables are specified in the call.
kwargs : keyword arguments
All keyword arguments are passed to the plotting function.
Returns
-------
self : object
Returns self.
|
Like ``.map`` but passes args as strings and inserts data in kwargs.
| 759 | 828 |
def map_dataframe(self, func, *args, **kwargs):
"""Like ``.map`` but passes args as strings and inserts data in kwargs.
This method is suitable for plotting with functions that accept a
long-form DataFrame as a `data` keyword argument and access the
data in that DataFrame using string variable names.
Parameters
----------
func : callable
A plotting function that takes data and keyword arguments. Unlike
the `map` method, a function used here must "understand" Pandas
objects. It also must plot to the currently active matplotlib Axes
and take a `color` keyword argument. If faceting on the `hue`
dimension, it must also take a `label` keyword argument.
args : strings
Column names in self.data that identify variables with data to
plot. The data for each variable is passed to `func` in the
order the variables are specified in the call.
kwargs : keyword arguments
All keyword arguments are passed to the plotting function.
Returns
-------
self : object
Returns self.
"""
# If color was a keyword argument, grab it here
kw_color = kwargs.pop("color", None)
# Iterate over the data subsets
for (row_i, col_j, hue_k), data_ijk in self.facet_data():
# If this subset is null, move on
if not data_ijk.values.size:
continue
# Get the current axis
modify_state = not str(func.__module__).startswith("seaborn")
ax = self.facet_axis(row_i, col_j, modify_state)
# Decide what color to plot with
kwargs["color"] = self._facet_color(hue_k, kw_color)
# Insert the other hue aesthetics if appropriate
for kw, val_list in self.hue_kws.items():
kwargs[kw] = val_list[hue_k]
# Insert a label in the keyword arguments for the legend
if self._hue_var is not None:
kwargs["label"] = self.hue_names[hue_k]
# Stick the facet dataframe into the kwargs
if self._dropna:
data_ijk = data_ijk.dropna()
kwargs["data"] = data_ijk
# Draw the plot
self._facet_plot(func, ax, args, kwargs)
# For axis labels, prefer to use positional args for backcompat
# but also extract the x/y kwargs and use if no corresponding arg
axis_labels = [kwargs.get("x", None), kwargs.get("y", None)]
for i, val in enumerate(args[:2]):
axis_labels[i] = val
self._finalize_grid(axis_labels)
return self
|
https://github.com/mwaskom/seaborn/blob/a47b97e4b98c809db55cbd283de21acba89fe186/project26/seaborn/axisgrid.py#L759-L828
| 26 |
[
0,
1,
2,
3,
4,
5,
6,
7,
8,
9,
10,
11,
12,
13,
14,
15,
16,
17,
18,
19,
20,
21,
22,
23,
24,
25,
26,
27,
28,
29,
30,
31,
32,
33,
34,
35,
36,
37,
38,
39,
40,
41,
42,
43,
44,
45,
46,
47,
48,
49,
50,
51,
52,
53,
54,
55,
57,
58,
59,
60,
61,
62,
63,
64,
65,
66,
67,
68,
69
] | 98.571429 |
[
56
] | 1.428571 | false | 96.911197 | 70 | 7 | 98.571429 | 25 |
def map_dataframe(self, func, *args, **kwargs):
# If color was a keyword argument, grab it here
kw_color = kwargs.pop("color", None)
# Iterate over the data subsets
for (row_i, col_j, hue_k), data_ijk in self.facet_data():
# If this subset is null, move on
if not data_ijk.values.size:
continue
# Get the current axis
modify_state = not str(func.__module__).startswith("seaborn")
ax = self.facet_axis(row_i, col_j, modify_state)
# Decide what color to plot with
kwargs["color"] = self._facet_color(hue_k, kw_color)
# Insert the other hue aesthetics if appropriate
for kw, val_list in self.hue_kws.items():
kwargs[kw] = val_list[hue_k]
# Insert a label in the keyword arguments for the legend
if self._hue_var is not None:
kwargs["label"] = self.hue_names[hue_k]
# Stick the facet dataframe into the kwargs
if self._dropna:
data_ijk = data_ijk.dropna()
kwargs["data"] = data_ijk
# Draw the plot
self._facet_plot(func, ax, args, kwargs)
# For axis labels, prefer to use positional args for backcompat
# but also extract the x/y kwargs and use if no corresponding arg
axis_labels = [kwargs.get("x", None), kwargs.get("y", None)]
for i, val in enumerate(args[:2]):
axis_labels[i] = val
self._finalize_grid(axis_labels)
return self
| 18,971 |
mwaskom/seaborn
|
a47b97e4b98c809db55cbd283de21acba89fe186
|
seaborn/axisgrid.py
|
FacetGrid._facet_color
|
(self, hue_index, kw_color)
| 830 | 836 |
def _facet_color(self, hue_index, kw_color):
color = self._colors[hue_index]
if kw_color is not None:
return kw_color
elif color is not None:
return color
|
https://github.com/mwaskom/seaborn/blob/a47b97e4b98c809db55cbd283de21acba89fe186/project26/seaborn/axisgrid.py#L830-L836
| 26 |
[
0,
1,
2,
3,
4,
5,
6
] | 100 |
[] | 0 | true | 96.911197 | 7 | 3 | 100 | 0 |
def _facet_color(self, hue_index, kw_color):
color = self._colors[hue_index]
if kw_color is not None:
return kw_color
elif color is not None:
return color
| 18,972 |
|||
mwaskom/seaborn
|
a47b97e4b98c809db55cbd283de21acba89fe186
|
seaborn/axisgrid.py
|
FacetGrid._facet_plot
|
(self, func, ax, plot_args, plot_kwargs)
| 838 | 851 |
def _facet_plot(self, func, ax, plot_args, plot_kwargs):
# Draw the plot
if str(func.__module__).startswith("seaborn"):
plot_kwargs = plot_kwargs.copy()
semantics = ["x", "y", "hue", "size", "style"]
for key, val in zip(semantics, plot_args):
plot_kwargs[key] = val
plot_args = []
plot_kwargs["ax"] = ax
func(*plot_args, **plot_kwargs)
# Sort out the supporting information
self._update_legend_data(ax)
|
https://github.com/mwaskom/seaborn/blob/a47b97e4b98c809db55cbd283de21acba89fe186/project26/seaborn/axisgrid.py#L838-L851
| 26 |
[
0,
1,
2,
3,
4,
5,
6,
7,
8,
9,
10,
11,
12,
13
] | 100 |
[] | 0 | true | 96.911197 | 14 | 3 | 100 | 0 |
def _facet_plot(self, func, ax, plot_args, plot_kwargs):
# Draw the plot
if str(func.__module__).startswith("seaborn"):
plot_kwargs = plot_kwargs.copy()
semantics = ["x", "y", "hue", "size", "style"]
for key, val in zip(semantics, plot_args):
plot_kwargs[key] = val
plot_args = []
plot_kwargs["ax"] = ax
func(*plot_args, **plot_kwargs)
# Sort out the supporting information
self._update_legend_data(ax)
| 18,973 |
|||
mwaskom/seaborn
|
a47b97e4b98c809db55cbd283de21acba89fe186
|
seaborn/axisgrid.py
|
FacetGrid._finalize_grid
|
(self, axlabels)
|
Finalize the annotations and layout.
|
Finalize the annotations and layout.
| 853 | 856 |
def _finalize_grid(self, axlabels):
"""Finalize the annotations and layout."""
self.set_axis_labels(*axlabels)
self.tight_layout()
|
https://github.com/mwaskom/seaborn/blob/a47b97e4b98c809db55cbd283de21acba89fe186/project26/seaborn/axisgrid.py#L853-L856
| 26 |
[
0,
1,
2,
3
] | 100 |
[] | 0 | true | 96.911197 | 4 | 1 | 100 | 1 |
def _finalize_grid(self, axlabels):
self.set_axis_labels(*axlabels)
self.tight_layout()
| 18,974 |
|
mwaskom/seaborn
|
a47b97e4b98c809db55cbd283de21acba89fe186
|
seaborn/axisgrid.py
|
FacetGrid.facet_axis
|
(self, row_i, col_j, modify_state=True)
|
return ax
|
Make the axis identified by these indices active and return it.
|
Make the axis identified by these indices active and return it.
| 858 | 870 |
def facet_axis(self, row_i, col_j, modify_state=True):
"""Make the axis identified by these indices active and return it."""
# Calculate the actual indices of the axes to plot on
if self._col_wrap is not None:
ax = self.axes.flat[col_j]
else:
ax = self.axes[row_i, col_j]
# Get a reference to the axes object we want, and make it active
if modify_state:
plt.sca(ax)
return ax
|
https://github.com/mwaskom/seaborn/blob/a47b97e4b98c809db55cbd283de21acba89fe186/project26/seaborn/axisgrid.py#L858-L870
| 26 |
[
0,
1,
2,
3,
4,
5,
6,
7,
8,
9,
10,
11,
12
] | 100 |
[] | 0 | true | 96.911197 | 13 | 3 | 100 | 1 |
def facet_axis(self, row_i, col_j, modify_state=True):
# Calculate the actual indices of the axes to plot on
if self._col_wrap is not None:
ax = self.axes.flat[col_j]
else:
ax = self.axes[row_i, col_j]
# Get a reference to the axes object we want, and make it active
if modify_state:
plt.sca(ax)
return ax
| 18,975 |
mwaskom/seaborn
|
a47b97e4b98c809db55cbd283de21acba89fe186
|
seaborn/axisgrid.py
|
FacetGrid.despine
|
(self, **kwargs)
|
return self
|
Remove axis spines from the facets.
|
Remove axis spines from the facets.
| 872 | 875 |
def despine(self, **kwargs):
"""Remove axis spines from the facets."""
utils.despine(self._figure, **kwargs)
return self
|
https://github.com/mwaskom/seaborn/blob/a47b97e4b98c809db55cbd283de21acba89fe186/project26/seaborn/axisgrid.py#L872-L875
| 26 |
[
0,
1,
2,
3
] | 100 |
[] | 0 | true | 96.911197 | 4 | 1 | 100 | 1 |
def despine(self, **kwargs):
utils.despine(self._figure, **kwargs)
return self
| 18,976 |
mwaskom/seaborn
|
a47b97e4b98c809db55cbd283de21acba89fe186
|
seaborn/axisgrid.py
|
FacetGrid.set_axis_labels
|
(self, x_var=None, y_var=None, clear_inner=True, **kwargs)
|
return self
|
Set axis labels on the left column and bottom row of the grid.
|
Set axis labels on the left column and bottom row of the grid.
| 877 | 886 |
def set_axis_labels(self, x_var=None, y_var=None, clear_inner=True, **kwargs):
"""Set axis labels on the left column and bottom row of the grid."""
if x_var is not None:
self._x_var = x_var
self.set_xlabels(x_var, clear_inner=clear_inner, **kwargs)
if y_var is not None:
self._y_var = y_var
self.set_ylabels(y_var, clear_inner=clear_inner, **kwargs)
return self
|
https://github.com/mwaskom/seaborn/blob/a47b97e4b98c809db55cbd283de21acba89fe186/project26/seaborn/axisgrid.py#L877-L886
| 26 |
[
0,
1,
2,
3,
4,
5,
6,
7,
8,
9
] | 100 |
[] | 0 | true | 96.911197 | 10 | 3 | 100 | 1 |
def set_axis_labels(self, x_var=None, y_var=None, clear_inner=True, **kwargs):
if x_var is not None:
self._x_var = x_var
self.set_xlabels(x_var, clear_inner=clear_inner, **kwargs)
if y_var is not None:
self._y_var = y_var
self.set_ylabels(y_var, clear_inner=clear_inner, **kwargs)
return self
| 18,977 |
mwaskom/seaborn
|
a47b97e4b98c809db55cbd283de21acba89fe186
|
seaborn/axisgrid.py
|
FacetGrid.set_xlabels
|
(self, label=None, clear_inner=True, **kwargs)
|
return self
|
Label the x axis on the bottom row of the grid.
|
Label the x axis on the bottom row of the grid.
| 888 | 897 |
def set_xlabels(self, label=None, clear_inner=True, **kwargs):
"""Label the x axis on the bottom row of the grid."""
if label is None:
label = self._x_var
for ax in self._bottom_axes:
ax.set_xlabel(label, **kwargs)
if clear_inner:
for ax in self._not_bottom_axes:
ax.set_xlabel("")
return self
|
https://github.com/mwaskom/seaborn/blob/a47b97e4b98c809db55cbd283de21acba89fe186/project26/seaborn/axisgrid.py#L888-L897
| 26 |
[
0,
1,
2,
4,
5,
6,
7,
8,
9
] | 90 |
[
3
] | 10 | false | 96.911197 | 10 | 5 | 90 | 1 |
def set_xlabels(self, label=None, clear_inner=True, **kwargs):
if label is None:
label = self._x_var
for ax in self._bottom_axes:
ax.set_xlabel(label, **kwargs)
if clear_inner:
for ax in self._not_bottom_axes:
ax.set_xlabel("")
return self
| 18,978 |
mwaskom/seaborn
|
a47b97e4b98c809db55cbd283de21acba89fe186
|
seaborn/axisgrid.py
|
FacetGrid.set_ylabels
|
(self, label=None, clear_inner=True, **kwargs)
|
return self
|
Label the y axis on the left column of the grid.
|
Label the y axis on the left column of the grid.
| 899 | 908 |
def set_ylabels(self, label=None, clear_inner=True, **kwargs):
"""Label the y axis on the left column of the grid."""
if label is None:
label = self._y_var
for ax in self._left_axes:
ax.set_ylabel(label, **kwargs)
if clear_inner:
for ax in self._not_left_axes:
ax.set_ylabel("")
return self
|
https://github.com/mwaskom/seaborn/blob/a47b97e4b98c809db55cbd283de21acba89fe186/project26/seaborn/axisgrid.py#L899-L908
| 26 |
[
0,
1,
2,
4,
5,
6,
7,
8,
9
] | 90 |
[
3
] | 10 | false | 96.911197 | 10 | 5 | 90 | 1 |
def set_ylabels(self, label=None, clear_inner=True, **kwargs):
if label is None:
label = self._y_var
for ax in self._left_axes:
ax.set_ylabel(label, **kwargs)
if clear_inner:
for ax in self._not_left_axes:
ax.set_ylabel("")
return self
| 18,979 |
mwaskom/seaborn
|
a47b97e4b98c809db55cbd283de21acba89fe186
|
seaborn/axisgrid.py
|
FacetGrid.set_xticklabels
|
(self, labels=None, step=None, **kwargs)
|
return self
|
Set x axis tick labels of the grid.
|
Set x axis tick labels of the grid.
| 910 | 924 |
def set_xticklabels(self, labels=None, step=None, **kwargs):
"""Set x axis tick labels of the grid."""
for ax in self.axes.flat:
curr_ticks = ax.get_xticks()
ax.set_xticks(curr_ticks)
if labels is None:
curr_labels = [l.get_text() for l in ax.get_xticklabels()]
if step is not None:
xticks = ax.get_xticks()[::step]
curr_labels = curr_labels[::step]
ax.set_xticks(xticks)
ax.set_xticklabels(curr_labels, **kwargs)
else:
ax.set_xticklabels(labels, **kwargs)
return self
|
https://github.com/mwaskom/seaborn/blob/a47b97e4b98c809db55cbd283de21acba89fe186/project26/seaborn/axisgrid.py#L910-L924
| 26 |
[
0,
1,
2,
3,
4,
5,
6,
7,
8,
9,
10,
11,
12,
13,
14
] | 100 |
[] | 0 | true | 96.911197 | 15 | 5 | 100 | 1 |
def set_xticklabels(self, labels=None, step=None, **kwargs):
for ax in self.axes.flat:
curr_ticks = ax.get_xticks()
ax.set_xticks(curr_ticks)
if labels is None:
curr_labels = [l.get_text() for l in ax.get_xticklabels()]
if step is not None:
xticks = ax.get_xticks()[::step]
curr_labels = curr_labels[::step]
ax.set_xticks(xticks)
ax.set_xticklabels(curr_labels, **kwargs)
else:
ax.set_xticklabels(labels, **kwargs)
return self
| 18,980 |
mwaskom/seaborn
|
a47b97e4b98c809db55cbd283de21acba89fe186
|
seaborn/axisgrid.py
|
FacetGrid.set_yticklabels
|
(self, labels=None, **kwargs)
|
return self
|
Set y axis tick labels on the left column of the grid.
|
Set y axis tick labels on the left column of the grid.
| 926 | 936 |
def set_yticklabels(self, labels=None, **kwargs):
"""Set y axis tick labels on the left column of the grid."""
for ax in self.axes.flat:
curr_ticks = ax.get_yticks()
ax.set_yticks(curr_ticks)
if labels is None:
curr_labels = [l.get_text() for l in ax.get_yticklabels()]
ax.set_yticklabels(curr_labels, **kwargs)
else:
ax.set_yticklabels(labels, **kwargs)
return self
|
https://github.com/mwaskom/seaborn/blob/a47b97e4b98c809db55cbd283de21acba89fe186/project26/seaborn/axisgrid.py#L926-L936
| 26 |
[
0,
1,
2,
3,
4,
5,
6,
7,
8,
9,
10
] | 100 |
[] | 0 | true | 96.911197 | 11 | 4 | 100 | 1 |
def set_yticklabels(self, labels=None, **kwargs):
for ax in self.axes.flat:
curr_ticks = ax.get_yticks()
ax.set_yticks(curr_ticks)
if labels is None:
curr_labels = [l.get_text() for l in ax.get_yticklabels()]
ax.set_yticklabels(curr_labels, **kwargs)
else:
ax.set_yticklabels(labels, **kwargs)
return self
| 18,981 |
mwaskom/seaborn
|
a47b97e4b98c809db55cbd283de21acba89fe186
|
seaborn/axisgrid.py
|
FacetGrid.set_titles
|
(self, template=None, row_template=None, col_template=None,
**kwargs)
|
return self
|
Draw titles either above each facet or on the grid margins.
Parameters
----------
template : string
Template for all titles with the formatting keys {col_var} and
{col_name} (if using a `col` faceting variable) and/or {row_var}
and {row_name} (if using a `row` faceting variable).
row_template:
Template for the row variable when titles are drawn on the grid
margins. Must have {row_var} and {row_name} formatting keys.
col_template:
Template for the column variable when titles are drawn on the grid
margins. Must have {col_var} and {col_name} formatting keys.
Returns
-------
self: object
Returns self.
|
Draw titles either above each facet or on the grid margins.
| 938 | 1,028 |
def set_titles(self, template=None, row_template=None, col_template=None,
**kwargs):
"""Draw titles either above each facet or on the grid margins.
Parameters
----------
template : string
Template for all titles with the formatting keys {col_var} and
{col_name} (if using a `col` faceting variable) and/or {row_var}
and {row_name} (if using a `row` faceting variable).
row_template:
Template for the row variable when titles are drawn on the grid
margins. Must have {row_var} and {row_name} formatting keys.
col_template:
Template for the column variable when titles are drawn on the grid
margins. Must have {col_var} and {col_name} formatting keys.
Returns
-------
self: object
Returns self.
"""
args = dict(row_var=self._row_var, col_var=self._col_var)
kwargs["size"] = kwargs.pop("size", mpl.rcParams["axes.labelsize"])
# Establish default templates
if row_template is None:
row_template = "{row_var} = {row_name}"
if col_template is None:
col_template = "{col_var} = {col_name}"
if template is None:
if self._row_var is None:
template = col_template
elif self._col_var is None:
template = row_template
else:
template = " | ".join([row_template, col_template])
row_template = utils.to_utf8(row_template)
col_template = utils.to_utf8(col_template)
template = utils.to_utf8(template)
if self._margin_titles:
# Remove any existing title texts
for text in self._margin_titles_texts:
text.remove()
self._margin_titles_texts = []
if self.row_names is not None:
# Draw the row titles on the right edge of the grid
for i, row_name in enumerate(self.row_names):
ax = self.axes[i, -1]
args.update(dict(row_name=row_name))
title = row_template.format(**args)
text = ax.annotate(
title, xy=(1.02, .5), xycoords="axes fraction",
rotation=270, ha="left", va="center",
**kwargs
)
self._margin_titles_texts.append(text)
if self.col_names is not None:
# Draw the column titles as normal titles
for j, col_name in enumerate(self.col_names):
args.update(dict(col_name=col_name))
title = col_template.format(**args)
self.axes[0, j].set_title(title, **kwargs)
return self
# Otherwise title each facet with all the necessary information
if (self._row_var is not None) and (self._col_var is not None):
for i, row_name in enumerate(self.row_names):
for j, col_name in enumerate(self.col_names):
args.update(dict(row_name=row_name, col_name=col_name))
title = template.format(**args)
self.axes[i, j].set_title(title, **kwargs)
elif self.row_names is not None and len(self.row_names):
for i, row_name in enumerate(self.row_names):
args.update(dict(row_name=row_name))
title = template.format(**args)
self.axes[i, 0].set_title(title, **kwargs)
elif self.col_names is not None and len(self.col_names):
for i, col_name in enumerate(self.col_names):
args.update(dict(col_name=col_name))
title = template.format(**args)
# Index the flat array so col_wrap works
self.axes.flat[i].set_title(title, **kwargs)
return self
|
https://github.com/mwaskom/seaborn/blob/a47b97e4b98c809db55cbd283de21acba89fe186/project26/seaborn/axisgrid.py#L938-L1028
| 26 |
[
0,
22,
23,
24,
25,
26,
27,
28,
29,
30,
31,
32,
33,
34,
35,
36,
37,
38,
39,
40,
41,
42,
43,
44,
45,
46,
47,
48,
49,
50,
51,
52,
53,
54,
55,
56,
57,
58,
59,
60,
61,
62,
63,
64,
65,
66,
67,
68,
69,
70,
71,
72,
73,
74,
75,
76,
77,
78,
79,
80,
81,
82,
83,
84,
85,
86,
87,
88,
89,
90
] | 76.923077 |
[] | 0 | false | 96.911197 | 91 | 22 | 100 | 19 |
def set_titles(self, template=None, row_template=None, col_template=None,
**kwargs):
args = dict(row_var=self._row_var, col_var=self._col_var)
kwargs["size"] = kwargs.pop("size", mpl.rcParams["axes.labelsize"])
# Establish default templates
if row_template is None:
row_template = "{row_var} = {row_name}"
if col_template is None:
col_template = "{col_var} = {col_name}"
if template is None:
if self._row_var is None:
template = col_template
elif self._col_var is None:
template = row_template
else:
template = " | ".join([row_template, col_template])
row_template = utils.to_utf8(row_template)
col_template = utils.to_utf8(col_template)
template = utils.to_utf8(template)
if self._margin_titles:
# Remove any existing title texts
for text in self._margin_titles_texts:
text.remove()
self._margin_titles_texts = []
if self.row_names is not None:
# Draw the row titles on the right edge of the grid
for i, row_name in enumerate(self.row_names):
ax = self.axes[i, -1]
args.update(dict(row_name=row_name))
title = row_template.format(**args)
text = ax.annotate(
title, xy=(1.02, .5), xycoords="axes fraction",
rotation=270, ha="left", va="center",
**kwargs
)
self._margin_titles_texts.append(text)
if self.col_names is not None:
# Draw the column titles as normal titles
for j, col_name in enumerate(self.col_names):
args.update(dict(col_name=col_name))
title = col_template.format(**args)
self.axes[0, j].set_title(title, **kwargs)
return self
# Otherwise title each facet with all the necessary information
if (self._row_var is not None) and (self._col_var is not None):
for i, row_name in enumerate(self.row_names):
for j, col_name in enumerate(self.col_names):
args.update(dict(row_name=row_name, col_name=col_name))
title = template.format(**args)
self.axes[i, j].set_title(title, **kwargs)
elif self.row_names is not None and len(self.row_names):
for i, row_name in enumerate(self.row_names):
args.update(dict(row_name=row_name))
title = template.format(**args)
self.axes[i, 0].set_title(title, **kwargs)
elif self.col_names is not None and len(self.col_names):
for i, col_name in enumerate(self.col_names):
args.update(dict(col_name=col_name))
title = template.format(**args)
# Index the flat array so col_wrap works
self.axes.flat[i].set_title(title, **kwargs)
return self
| 18,982 |
mwaskom/seaborn
|
a47b97e4b98c809db55cbd283de21acba89fe186
|
seaborn/axisgrid.py
|
FacetGrid.refline
|
(self, *, x=None, y=None, color='.5', linestyle='--', **line_kws)
|
return self
|
Add a reference line(s) to each facet.
Parameters
----------
x, y : numeric
Value(s) to draw the line(s) at.
color : :mod:`matplotlib color <matplotlib.colors>`
Specifies the color of the reference line(s). Pass ``color=None`` to
use ``hue`` mapping.
linestyle : str
Specifies the style of the reference line(s).
line_kws : key, value mappings
Other keyword arguments are passed to :meth:`matplotlib.axes.Axes.axvline`
when ``x`` is not None and :meth:`matplotlib.axes.Axes.axhline` when ``y``
is not None.
Returns
-------
:class:`FacetGrid` instance
Returns ``self`` for easy method chaining.
|
Add a reference line(s) to each facet.
| 1,030 | 1,062 |
def refline(self, *, x=None, y=None, color='.5', linestyle='--', **line_kws):
"""Add a reference line(s) to each facet.
Parameters
----------
x, y : numeric
Value(s) to draw the line(s) at.
color : :mod:`matplotlib color <matplotlib.colors>`
Specifies the color of the reference line(s). Pass ``color=None`` to
use ``hue`` mapping.
linestyle : str
Specifies the style of the reference line(s).
line_kws : key, value mappings
Other keyword arguments are passed to :meth:`matplotlib.axes.Axes.axvline`
when ``x`` is not None and :meth:`matplotlib.axes.Axes.axhline` when ``y``
is not None.
Returns
-------
:class:`FacetGrid` instance
Returns ``self`` for easy method chaining.
"""
line_kws['color'] = color
line_kws['linestyle'] = linestyle
if x is not None:
self.map(plt.axvline, x=x, **line_kws)
if y is not None:
self.map(plt.axhline, y=y, **line_kws)
return self
|
https://github.com/mwaskom/seaborn/blob/a47b97e4b98c809db55cbd283de21acba89fe186/project26/seaborn/axisgrid.py#L1030-L1062
| 26 |
[
0,
1,
2,
3,
4,
5,
6,
7,
8,
9,
10,
11,
12,
13,
14,
15,
16,
17,
18,
19,
20,
21,
22,
23,
24,
25,
26,
27,
28,
29,
30,
31,
32
] | 100 |
[] | 0 | true | 96.911197 | 33 | 3 | 100 | 20 |
def refline(self, *, x=None, y=None, color='.5', linestyle='--', **line_kws):
line_kws['color'] = color
line_kws['linestyle'] = linestyle
if x is not None:
self.map(plt.axvline, x=x, **line_kws)
if y is not None:
self.map(plt.axhline, y=y, **line_kws)
return self
| 18,983 |
mwaskom/seaborn
|
a47b97e4b98c809db55cbd283de21acba89fe186
|
seaborn/axisgrid.py
|
FacetGrid.axes
|
(self)
|
return self._axes
|
An array of the :class:`matplotlib.axes.Axes` objects in the grid.
|
An array of the :class:`matplotlib.axes.Axes` objects in the grid.
| 1,067 | 1,069 |
def axes(self):
"""An array of the :class:`matplotlib.axes.Axes` objects in the grid."""
return self._axes
|
https://github.com/mwaskom/seaborn/blob/a47b97e4b98c809db55cbd283de21acba89fe186/project26/seaborn/axisgrid.py#L1067-L1069
| 26 |
[
0,
1,
2
] | 100 |
[] | 0 | true | 96.911197 | 3 | 1 | 100 | 1 |
def axes(self):
return self._axes
| 18,984 |
mwaskom/seaborn
|
a47b97e4b98c809db55cbd283de21acba89fe186
|
seaborn/axisgrid.py
|
FacetGrid.ax
|
(self)
|
The :class:`matplotlib.axes.Axes` when no faceting variables are assigned.
|
The :class:`matplotlib.axes.Axes` when no faceting variables are assigned.
| 1,072 | 1,080 |
def ax(self):
"""The :class:`matplotlib.axes.Axes` when no faceting variables are assigned."""
if self.axes.shape == (1, 1):
return self.axes[0, 0]
else:
err = (
"Use the `.axes` attribute when facet variables are assigned."
)
raise AttributeError(err)
|
https://github.com/mwaskom/seaborn/blob/a47b97e4b98c809db55cbd283de21acba89fe186/project26/seaborn/axisgrid.py#L1072-L1080
| 26 |
[
0,
1,
2,
3,
4,
5,
6,
7,
8
] | 100 |
[] | 0 | true | 96.911197 | 9 | 2 | 100 | 1 |
def ax(self):
if self.axes.shape == (1, 1):
return self.axes[0, 0]
else:
err = (
"Use the `.axes` attribute when facet variables are assigned."
)
raise AttributeError(err)
| 18,985 |
|
mwaskom/seaborn
|
a47b97e4b98c809db55cbd283de21acba89fe186
|
seaborn/axisgrid.py
|
FacetGrid.axes_dict
|
(self)
|
return self._axes_dict
|
A mapping of facet names to corresponding :class:`matplotlib.axes.Axes`.
If only one of ``row`` or ``col`` is assigned, each key is a string
representing a level of that variable. If both facet dimensions are
assigned, each key is a ``({row_level}, {col_level})`` tuple.
|
A mapping of facet names to corresponding :class:`matplotlib.axes.Axes`.
| 1,083 | 1,091 |
def axes_dict(self):
"""A mapping of facet names to corresponding :class:`matplotlib.axes.Axes`.
If only one of ``row`` or ``col`` is assigned, each key is a string
representing a level of that variable. If both facet dimensions are
assigned, each key is a ``({row_level}, {col_level})`` tuple.
"""
return self._axes_dict
|
https://github.com/mwaskom/seaborn/blob/a47b97e4b98c809db55cbd283de21acba89fe186/project26/seaborn/axisgrid.py#L1083-L1091
| 26 |
[
0,
1,
2,
3,
4,
5,
6,
7,
8
] | 100 |
[] | 0 | true | 96.911197 | 9 | 1 | 100 | 5 |
def axes_dict(self):
return self._axes_dict
| 18,986 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.