nwo
stringlengths 10
28
| sha
stringlengths 40
40
| path
stringlengths 11
97
| identifier
stringlengths 1
64
| parameters
stringlengths 2
2.24k
| return_statement
stringlengths 0
2.17k
| docstring
stringlengths 0
5.45k
| docstring_summary
stringlengths 0
3.83k
| func_begin
int64 1
13.4k
| func_end
int64 2
13.4k
| function
stringlengths 28
56.4k
| url
stringlengths 106
209
| project
int64 1
48
| executed_lines
list | executed_lines_pc
float64 0
153
| missing_lines
list | missing_lines_pc
float64 0
100
| covered
bool 2
classes | filecoverage
float64 2.53
100
| function_lines
int64 2
1.46k
| mccabe
int64 1
253
| coverage
float64 0
100
| docstring_lines
int64 0
112
| function_nodoc
stringlengths 9
56.4k
| id
int64 0
29.8k
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
akfamily/akshare
|
087025d8d6f799b30ca114013e82c1ad22dc9294
|
akshare/utils/demjson.py
|
JSON.encode_boolean
|
(self, bval, state)
|
Encodes the Python boolean into a JSON Boolean literal.
|
Encodes the Python boolean into a JSON Boolean literal.
| 3,743 | 3,745 |
def encode_boolean(self, bval, state):
"""Encodes the Python boolean into a JSON Boolean literal."""
state.append('true' if bool(bval) else 'false')
|
https://github.com/akfamily/akshare/blob/087025d8d6f799b30ca114013e82c1ad22dc9294/project25/akshare/utils/demjson.py#L3743-L3745
| 25 |
[
0,
1
] | 66.666667 |
[
2
] | 33.333333 | false | 14.825334 | 3 | 1 | 66.666667 | 1 |
def encode_boolean(self, bval, state):
state.append('true' if bool(bval) else 'false')
| 18,539 |
|
akfamily/akshare
|
087025d8d6f799b30ca114013e82c1ad22dc9294
|
akshare/utils/demjson.py
|
JSON.decode_number
|
(self, state)
|
Intermediate-level decoder for JSON numeric literals.
Takes a string and a starting index, and returns a Python
suitable numeric type and the index of the next unparsed character.
The returned numeric type can be either of a Python int,
long, or float. In addition some special non-numbers may
also be returned such as nan, inf, and neginf (technically
which are Python floats, but have no numeric value.)
Ref. ECMAScript section 8.5.
|
Intermediate-level decoder for JSON numeric literals.
| 3,747 | 4,025 |
def decode_number(self, state):
"""Intermediate-level decoder for JSON numeric literals.
Takes a string and a starting index, and returns a Python
suitable numeric type and the index of the next unparsed character.
The returned numeric type can be either of a Python int,
long, or float. In addition some special non-numbers may
also be returned such as nan, inf, and neginf (technically
which are Python floats, but have no numeric value.)
Ref. ECMAScript section 8.5.
"""
buf = state.buf
self.skipws(state)
start_position = buf.position
# Use external number parser hook if available
if self.has_hook('decode_number') or self.has_hook('decode_float'):
c = buf.peek()
if c and c in '-+0123456789.': # First chars for a number-like value
buf.save_position()
nbr = buf.pop_while_in('-+0123456789abcdefABCDEF' 'NaN' 'Infinity.')
if '.' in nbr and self.has_hook('decode_float'):
hook_name = 'decode_float'
elif self.has_hook('decode_number'):
hook_name = 'decode_number'
else:
hook_name = None
if hook_name:
try:
val = self.call_hook(hook_name, nbr, position=start_position)
except JSONSkipHook:
pass
except JSONError as err:
state.push_exception(err)
val = undefined
else:
buf.clear_saved_position()
return val
# Hook didn't handle it, restore old position
buf.restore_position()
# Detect initial sign character(s)
sign = +1
sign_count = 0
sign_saw_plus = False
sign_saw_ws = False
c = buf.peek()
while c and c in '+-':
if c == '-':
sign = sign * -1
elif c == '+':
sign_saw_plus = True
sign_count += 1
buf.skip()
if self.skipws_nocomments(state) > 0:
sign_saw_ws = True
c = buf.peek()
if sign_count > 1 or sign_saw_plus:
state.push_cond(self.options.all_numeric_signs,
'Numbers may only have a single "-" as a sign prefix',
position=start_position)
if sign_saw_ws:
state.push_error('Spaces may not appear between a +/- number sign and the digits', position=start_position)
# Check for ECMAScript symbolic non-numbers
if not c:
state.push_error('Missing numeric value after sign', position=start_position)
self.recover_parser(state)
self.stats.num_undefineds += 1
return undefined
elif c.isalpha() or c in '_$':
kw = buf.popwhile(lambda c: c.isalnum() or c in '_$')
if kw == 'NaN':
state.push_cond(self.options.non_numbers,
'NaN literals are not allowed in strict JSON',
position=start_position)
state.stats.num_nans += 1
return self.options.nan
elif kw == 'Infinity':
state.push_cond(self.options.non_numbers,
'Infinity literals are not allowed in strict JSON',
position=start_position)
state.stats.num_infinities += 1
if sign < 0:
return self.options.neginf
else:
return self.options.inf
else:
state.push_error('Unknown numeric value keyword', kw, position=start_position)
return undefined
# Check for radix-prefixed numbers
elif c == '0' and (buf.peek(1) in ['x', 'X']):
# ----- HEX NUMBERS 0x123
prefix = buf.popstr(2)
digits = buf.popwhile(helpers.is_hex_digit)
state.push_cond(self.options.hex_numbers,
'Hexadecimal literals are not allowed in strict JSON', prefix + digits,
position=start_position)
if len(digits) == 0:
state.push_error('Hexadecimal number is invalid', position=start_position)
self.recover_parser(state)
return undefined
ival = helpers.decode_hex(digits)
state.update_integer_stats(ival, sign=sign, position=start_position)
n = state.options.make_int(ival, sign, number_format=NUMBER_FORMAT_HEX)
return n
elif c == '0' and (buf.peek(1) in ['o', 'O']):
# ----- NEW-STYLE OCTAL NUMBERS 0o123
prefix = buf.popstr(2)
digits = buf.popwhile(helpers.is_octal_digit)
state.push_cond(self.options.octal_numbers,
"Octal literals are not allowed in strict JSON", prefix + digits,
position=start_position)
if len(digits) == 0:
state.push_error("Octal number is invalid", position=start_position)
self.recover_parser(state)
return undefined
ival = helpers.decode_octal(digits)
state.update_integer_stats(ival, sign=sign, position=start_position)
n = state.options.make_int(ival, sign, number_format=NUMBER_FORMAT_OCTAL)
return n
elif c == '0' and (buf.peek(1) in ['b', 'B']):
# ----- NEW-STYLE BINARY NUMBERS 0b1101
prefix = buf.popstr(2)
digits = buf.popwhile(helpers.is_binary_digit)
state.push_cond(self.options.binary_numbers,
"Binary literals are not allowed in strict JSON", prefix + digits,
position=start_position)
if len(digits) == 0:
state.push_error("Binary number is invalid", position=start_position)
self.recover_parser(state)
return undefined
ival = helpers.decode_binary(digits)
state.update_integer_stats(ival, sign=sign, position=start_position)
n = state.options.make_int(ival, sign, number_format=NUMBER_FORMAT_BINARY)
return n
else:
# ----- DECIMAL OR LEGACY-OCTAL NUMBER. 123, 0123
# General syntax is: \d+[\.\d+][e[+-]?\d+]
number = buf.popwhile(lambda c: c in '0123456789.+-eE')
imax = len(number)
if imax == 0:
state.push_error('Missing numeric value', position=start_position)
has_leading_zero = False
units_digits = [] # digits making up whole number portion
fraction_digits = [] # digits making up fractional portion
exponent_digits = [] # digits making up exponent portion (excluding sign)
esign = '+' # sign of exponent
sigdigits = 0 # number of significant digits (approximate)
saw_decimal_point = False
saw_exponent = False
# Break number into parts in a first pass...use a mini state machine
in_part = 'units'
for i, c in enumerate(number):
if c == '.':
if in_part != 'units':
state.push_error('Bad number', number, position=start_position)
self.recover_parser(state)
return undefined
in_part = 'fraction'
saw_decimal_point = True
elif c in 'eE':
if in_part == 'exponent':
state.push_error('Bad number', number, position=start_position)
self.recover_parser(state)
return undefined
in_part = 'exponent'
saw_exponent = True
elif c in '+-':
if in_part != 'exponent' or exponent_digits:
state.push_error('Bad number', number, position=start_position)
self.recover_parser(state)
return undefined
esign = c
else: # digit
if in_part == 'units':
units_digits.append(c)
elif in_part == 'fraction':
fraction_digits.append(c)
elif in_part == 'exponent':
exponent_digits.append(c)
units_s = ''.join(units_digits)
fraction_s = ''.join(fraction_digits)
exponent_s = ''.join(exponent_digits)
# Basic syntax rules checking
is_integer = not (saw_decimal_point or saw_exponent)
if not units_s and not fraction_s:
state.push_error('Bad number', number, position=start_position)
self.recover_parser(state)
return undefined
if saw_decimal_point and not fraction_s:
state.push_cond(self.options.trailing_decimal_point,
'Bad number, decimal point must be followed by at least one digit',
number, position=start_position)
fraction_s = '0'
if saw_exponent and not exponent_s:
state.push_error('Bad number, exponent is missing', number, position=start_position)
self.recover_parser(state)
return undefined
if not units_s:
state.push_cond(self.options.initial_decimal_point,
'Bad number, decimal point must be preceded by at least one digit',
number, position=start_position)
units = '0'
elif len(units_s) > 1 and units_s[0] == '0':
has_leading_zero = True
if self.options.is_forbid_leading_zeros:
state.push_cond(self.options.leading_zeros,
'Numbers may not have extra leading zeros',
number, position=start_position)
elif self.options.is_warn_leading_zeros:
state.push_cond(self.options.leading_zeros,
'Numbers may not have leading zeros; interpreting as %s' \
% self.options.leading_zero_radix_as_word,
number, position=start_position)
# Estimate number of significant digits
sigdigits = len((units_s + fraction_s).replace('0', ' ').strip())
# Handle legacy octal integers.
if has_leading_zero and is_integer and self.options.leading_zero_radix == 8:
# ----- LEGACY-OCTAL 0123
try:
ival = helpers.decode_octal(units_s)
except ValueError:
state.push_error('Bad number, not a valid octal value', number, position=start_position)
self.recover_parser(state)
return self.options.nan # undefined
state.update_integer_stats(ival, sign=sign, position=start_position)
n = state.options.make_int(ival, sign, number_format=NUMBER_FORMAT_LEGACYOCTAL)
return n
# Determine the exponential part
if exponent_s:
try:
exponent = int(exponent_s)
except ValueError:
state.push_error('Bad number, bad exponent', number, position=start_position)
self.recover_parser(state)
return undefined
if esign == '-':
exponent = - exponent
else:
exponent = 0
# Try to make an int/long first.
if not saw_decimal_point and exponent >= 0:
# ----- A DECIMAL INTEGER
ival = int(units_s)
if exponent != 0:
ival *= 10 ** exponent
state.update_integer_stats(ival, sign=sign, position=start_position)
n = state.options.make_int(ival, sign)
else:
# ----- A FLOATING-POINT NUMBER
try:
if exponent < float_minexp or exponent > float_maxexp or sigdigits > float_sigdigits:
n = state.options.make_decimal(number, sign)
else:
n = state.options.make_float(number, sign)
except ValueError as err:
state.push_error('Bad number, %s' % err.message, number, position=start_position)
n = undefined
else:
state.update_float_stats(n, sign=sign, position=start_position)
return n
|
https://github.com/akfamily/akshare/blob/087025d8d6f799b30ca114013e82c1ad22dc9294/project25/akshare/utils/demjson.py#L3747-L4025
| 25 |
[
0,
1,
2,
3,
4,
5,
6,
7,
8,
9,
10,
11,
12,
13
] | 5.017921 |
[
14,
15,
16,
19,
20,
21,
22,
23,
24,
25,
26,
27,
29,
31,
32,
33,
34,
35,
36,
37,
38,
40,
41,
43,
46,
47,
48,
49,
50,
51,
52,
53,
54,
55,
56,
57,
58,
59,
60,
62,
63,
66,
67,
70,
71,
72,
73,
74,
75,
76,
77,
78,
81,
82,
83,
84,
87,
88,
89,
91,
93,
94,
97,
99,
100,
101,
104,
105,
106,
107,
108,
109,
110,
111,
112,
114,
115,
116,
119,
120,
121,
122,
123,
124,
125,
126,
127,
129,
130,
131,
134,
135,
136,
137,
138,
139,
140,
141,
145,
146,
147,
148,
149,
150,
151,
152,
153,
154,
155,
156,
159,
160,
162,
163,
164,
165,
166,
167,
168,
169,
170,
171,
172,
173,
174,
175,
176,
177,
178,
179,
180,
181,
183,
184,
185,
186,
187,
188,
189,
190,
191,
194,
196,
197,
198,
199,
201,
202,
205,
207,
208,
209,
210,
212,
213,
216,
217,
218,
219,
220,
223,
224,
230,
233,
235,
236,
237,
238,
239,
240,
241,
242,
243,
246,
247,
248,
249,
250,
251,
252,
253,
254,
256,
259,
261,
262,
263,
264,
265,
268,
269,
270,
272,
273,
274,
275,
277,
278
] | 70.967742 | false | 14.825334 | 279 | 73 | 29.032258 | 11 |
def decode_number(self, state):
buf = state.buf
self.skipws(state)
start_position = buf.position
# Use external number parser hook if available
if self.has_hook('decode_number') or self.has_hook('decode_float'):
c = buf.peek()
if c and c in '-+0123456789.': # First chars for a number-like value
buf.save_position()
nbr = buf.pop_while_in('-+0123456789abcdefABCDEF' 'NaN' 'Infinity.')
if '.' in nbr and self.has_hook('decode_float'):
hook_name = 'decode_float'
elif self.has_hook('decode_number'):
hook_name = 'decode_number'
else:
hook_name = None
if hook_name:
try:
val = self.call_hook(hook_name, nbr, position=start_position)
except JSONSkipHook:
pass
except JSONError as err:
state.push_exception(err)
val = undefined
else:
buf.clear_saved_position()
return val
# Hook didn't handle it, restore old position
buf.restore_position()
# Detect initial sign character(s)
sign = +1
sign_count = 0
sign_saw_plus = False
sign_saw_ws = False
c = buf.peek()
while c and c in '+-':
if c == '-':
sign = sign * -1
elif c == '+':
sign_saw_plus = True
sign_count += 1
buf.skip()
if self.skipws_nocomments(state) > 0:
sign_saw_ws = True
c = buf.peek()
if sign_count > 1 or sign_saw_plus:
state.push_cond(self.options.all_numeric_signs,
'Numbers may only have a single "-" as a sign prefix',
position=start_position)
if sign_saw_ws:
state.push_error('Spaces may not appear between a +/- number sign and the digits', position=start_position)
# Check for ECMAScript symbolic non-numbers
if not c:
state.push_error('Missing numeric value after sign', position=start_position)
self.recover_parser(state)
self.stats.num_undefineds += 1
return undefined
elif c.isalpha() or c in '_$':
kw = buf.popwhile(lambda c: c.isalnum() or c in '_$')
if kw == 'NaN':
state.push_cond(self.options.non_numbers,
'NaN literals are not allowed in strict JSON',
position=start_position)
state.stats.num_nans += 1
return self.options.nan
elif kw == 'Infinity':
state.push_cond(self.options.non_numbers,
'Infinity literals are not allowed in strict JSON',
position=start_position)
state.stats.num_infinities += 1
if sign < 0:
return self.options.neginf
else:
return self.options.inf
else:
state.push_error('Unknown numeric value keyword', kw, position=start_position)
return undefined
# Check for radix-prefixed numbers
elif c == '0' and (buf.peek(1) in ['x', 'X']):
# ----- HEX NUMBERS 0x123
prefix = buf.popstr(2)
digits = buf.popwhile(helpers.is_hex_digit)
state.push_cond(self.options.hex_numbers,
'Hexadecimal literals are not allowed in strict JSON', prefix + digits,
position=start_position)
if len(digits) == 0:
state.push_error('Hexadecimal number is invalid', position=start_position)
self.recover_parser(state)
return undefined
ival = helpers.decode_hex(digits)
state.update_integer_stats(ival, sign=sign, position=start_position)
n = state.options.make_int(ival, sign, number_format=NUMBER_FORMAT_HEX)
return n
elif c == '0' and (buf.peek(1) in ['o', 'O']):
# ----- NEW-STYLE OCTAL NUMBERS 0o123
prefix = buf.popstr(2)
digits = buf.popwhile(helpers.is_octal_digit)
state.push_cond(self.options.octal_numbers,
"Octal literals are not allowed in strict JSON", prefix + digits,
position=start_position)
if len(digits) == 0:
state.push_error("Octal number is invalid", position=start_position)
self.recover_parser(state)
return undefined
ival = helpers.decode_octal(digits)
state.update_integer_stats(ival, sign=sign, position=start_position)
n = state.options.make_int(ival, sign, number_format=NUMBER_FORMAT_OCTAL)
return n
elif c == '0' and (buf.peek(1) in ['b', 'B']):
# ----- NEW-STYLE BINARY NUMBERS 0b1101
prefix = buf.popstr(2)
digits = buf.popwhile(helpers.is_binary_digit)
state.push_cond(self.options.binary_numbers,
"Binary literals are not allowed in strict JSON", prefix + digits,
position=start_position)
if len(digits) == 0:
state.push_error("Binary number is invalid", position=start_position)
self.recover_parser(state)
return undefined
ival = helpers.decode_binary(digits)
state.update_integer_stats(ival, sign=sign, position=start_position)
n = state.options.make_int(ival, sign, number_format=NUMBER_FORMAT_BINARY)
return n
else:
# ----- DECIMAL OR LEGACY-OCTAL NUMBER. 123, 0123
# General syntax is: \d+[\.\d+][e[+-]?\d+]
number = buf.popwhile(lambda c: c in '0123456789.+-eE')
imax = len(number)
if imax == 0:
state.push_error('Missing numeric value', position=start_position)
has_leading_zero = False
units_digits = [] # digits making up whole number portion
fraction_digits = [] # digits making up fractional portion
exponent_digits = [] # digits making up exponent portion (excluding sign)
esign = '+' # sign of exponent
sigdigits = 0 # number of significant digits (approximate)
saw_decimal_point = False
saw_exponent = False
# Break number into parts in a first pass...use a mini state machine
in_part = 'units'
for i, c in enumerate(number):
if c == '.':
if in_part != 'units':
state.push_error('Bad number', number, position=start_position)
self.recover_parser(state)
return undefined
in_part = 'fraction'
saw_decimal_point = True
elif c in 'eE':
if in_part == 'exponent':
state.push_error('Bad number', number, position=start_position)
self.recover_parser(state)
return undefined
in_part = 'exponent'
saw_exponent = True
elif c in '+-':
if in_part != 'exponent' or exponent_digits:
state.push_error('Bad number', number, position=start_position)
self.recover_parser(state)
return undefined
esign = c
else: # digit
if in_part == 'units':
units_digits.append(c)
elif in_part == 'fraction':
fraction_digits.append(c)
elif in_part == 'exponent':
exponent_digits.append(c)
units_s = ''.join(units_digits)
fraction_s = ''.join(fraction_digits)
exponent_s = ''.join(exponent_digits)
# Basic syntax rules checking
is_integer = not (saw_decimal_point or saw_exponent)
if not units_s and not fraction_s:
state.push_error('Bad number', number, position=start_position)
self.recover_parser(state)
return undefined
if saw_decimal_point and not fraction_s:
state.push_cond(self.options.trailing_decimal_point,
'Bad number, decimal point must be followed by at least one digit',
number, position=start_position)
fraction_s = '0'
if saw_exponent and not exponent_s:
state.push_error('Bad number, exponent is missing', number, position=start_position)
self.recover_parser(state)
return undefined
if not units_s:
state.push_cond(self.options.initial_decimal_point,
'Bad number, decimal point must be preceded by at least one digit',
number, position=start_position)
units = '0'
elif len(units_s) > 1 and units_s[0] == '0':
has_leading_zero = True
if self.options.is_forbid_leading_zeros:
state.push_cond(self.options.leading_zeros,
'Numbers may not have extra leading zeros',
number, position=start_position)
elif self.options.is_warn_leading_zeros:
state.push_cond(self.options.leading_zeros,
'Numbers may not have leading zeros; interpreting as %s' \
% self.options.leading_zero_radix_as_word,
number, position=start_position)
# Estimate number of significant digits
sigdigits = len((units_s + fraction_s).replace('0', ' ').strip())
# Handle legacy octal integers.
if has_leading_zero and is_integer and self.options.leading_zero_radix == 8:
# ----- LEGACY-OCTAL 0123
try:
ival = helpers.decode_octal(units_s)
except ValueError:
state.push_error('Bad number, not a valid octal value', number, position=start_position)
self.recover_parser(state)
return self.options.nan # undefined
state.update_integer_stats(ival, sign=sign, position=start_position)
n = state.options.make_int(ival, sign, number_format=NUMBER_FORMAT_LEGACYOCTAL)
return n
# Determine the exponential part
if exponent_s:
try:
exponent = int(exponent_s)
except ValueError:
state.push_error('Bad number, bad exponent', number, position=start_position)
self.recover_parser(state)
return undefined
if esign == '-':
exponent = - exponent
else:
exponent = 0
# Try to make an int/long first.
if not saw_decimal_point and exponent >= 0:
# ----- A DECIMAL INTEGER
ival = int(units_s)
if exponent != 0:
ival *= 10 ** exponent
state.update_integer_stats(ival, sign=sign, position=start_position)
n = state.options.make_int(ival, sign)
else:
# ----- A FLOATING-POINT NUMBER
try:
if exponent < float_minexp or exponent > float_maxexp or sigdigits > float_sigdigits:
n = state.options.make_decimal(number, sign)
else:
n = state.options.make_float(number, sign)
except ValueError as err:
state.push_error('Bad number, %s' % err.message, number, position=start_position)
n = undefined
else:
state.update_float_stats(n, sign=sign, position=start_position)
return n
| 18,540 |
|
akfamily/akshare
|
087025d8d6f799b30ca114013e82c1ad22dc9294
|
akshare/utils/demjson.py
|
JSON.encode_number
|
(self, n, state)
|
Encodes a Python numeric type into a JSON numeric literal.
The special non-numeric values of float('nan'), float('inf')
and float('-inf') are translated into appropriate JSON
literals.
Note that Python complex types are not handled, as there is no
ECMAScript equivalent type.
|
Encodes a Python numeric type into a JSON numeric literal.
The special non-numeric values of float('nan'), float('inf')
and float('-inf') are translated into appropriate JSON
literals.
Note that Python complex types are not handled, as there is no
ECMAScript equivalent type.
| 4,027 | 4,087 |
def encode_number(self, n, state):
"""Encodes a Python numeric type into a JSON numeric literal.
The special non-numeric values of float('nan'), float('inf')
and float('-inf') are translated into appropriate JSON
literals.
Note that Python complex types are not handled, as there is no
ECMAScript equivalent type.
"""
if isinstance(n, complex):
if n.imag:
raise JSONEncodeError('Can not encode a complex number that has a non-zero imaginary part', n)
n = n.real
if isinstance(n, json_int):
state.append(n.json_format())
return
if isinstance(n, int):
state.append(str(n))
return
if decimal and isinstance(n, decimal.Decimal):
if n.is_nan(): # Could be 'NaN' or 'sNaN'
state.append('NaN')
elif n.is_infinite():
if n.is_signed():
state.append('-Infinity')
else:
state.append('Infinity')
else:
s = str(n).lower()
if 'e' not in s and '.' not in s:
s = s + '.0'
state.append(s)
return
global nan, inf, neginf
if n is nan:
state.append('NaN')
elif n is inf:
state.append('Infinity')
elif n is neginf:
state.append('-Infinity')
elif isinstance(n, float):
# Check for non-numbers.
# In python nan == inf == -inf, so must use repr() to distinguish
reprn = repr(n).lower()
if ('inf' in reprn and '-' in reprn) or n == neginf:
state.append('-Infinity')
elif 'inf' in reprn or n is inf:
state.append('Infinity')
elif 'nan' in reprn or n is nan:
state.append('NaN')
else:
# A normal float.
state.append(repr(n))
else:
raise TypeError('encode_number expected an integral, float, or decimal number type', type(n))
|
https://github.com/akfamily/akshare/blob/087025d8d6f799b30ca114013e82c1ad22dc9294/project25/akshare/utils/demjson.py#L4027-L4087
| 25 |
[
0,
1,
2,
3,
4,
5,
6,
7,
8,
9,
10
] | 18.032787 |
[
11,
12,
13,
14,
16,
17,
18,
20,
21,
22,
24,
25,
26,
27,
28,
29,
31,
33,
34,
35,
36,
37,
40,
41,
42,
43,
44,
45,
46,
49,
50,
51,
52,
53,
54,
55,
58,
60
] | 62.295082 | false | 14.825334 | 61 | 23 | 37.704918 | 8 |
def encode_number(self, n, state):
if isinstance(n, complex):
if n.imag:
raise JSONEncodeError('Can not encode a complex number that has a non-zero imaginary part', n)
n = n.real
if isinstance(n, json_int):
state.append(n.json_format())
return
if isinstance(n, int):
state.append(str(n))
return
if decimal and isinstance(n, decimal.Decimal):
if n.is_nan(): # Could be 'NaN' or 'sNaN'
state.append('NaN')
elif n.is_infinite():
if n.is_signed():
state.append('-Infinity')
else:
state.append('Infinity')
else:
s = str(n).lower()
if 'e' not in s and '.' not in s:
s = s + '.0'
state.append(s)
return
global nan, inf, neginf
if n is nan:
state.append('NaN')
elif n is inf:
state.append('Infinity')
elif n is neginf:
state.append('-Infinity')
elif isinstance(n, float):
# Check for non-numbers.
# In python nan == inf == -inf, so must use repr() to distinguish
reprn = repr(n).lower()
if ('inf' in reprn and '-' in reprn) or n == neginf:
state.append('-Infinity')
elif 'inf' in reprn or n is inf:
state.append('Infinity')
elif 'nan' in reprn or n is nan:
state.append('NaN')
else:
# A normal float.
state.append(repr(n))
else:
raise TypeError('encode_number expected an integral, float, or decimal number type', type(n))
| 18,541 |
|
akfamily/akshare
|
087025d8d6f799b30ca114013e82c1ad22dc9294
|
akshare/utils/demjson.py
|
JSON.decode_string
|
(self, state)
|
return s
|
Intermediate-level decoder for JSON string literals.
Takes a string and a starting index, and returns a Python
string (or unicode string) and the index of the next unparsed
character.
|
Intermediate-level decoder for JSON string literals.
| 4,089 | 4,376 |
def decode_string(self, state):
"""Intermediate-level decoder for JSON string literals.
Takes a string and a starting index, and returns a Python
string (or unicode string) and the index of the next unparsed
character.
"""
buf = state.buf
self.skipws(state)
quote = buf.peek()
if quote == '"':
pass
elif quote == "'":
state.push_cond(self.options.single_quoted_strings,
'String literals must use double quotation marks in strict JSON')
else:
state.push_error('String literal must be properly quoted')
return undefined
string_position = buf.position
buf.skip()
if self.options.is_forbid_js_string_escapes:
escapes = self._escapes_json
else:
escapes = self._escapes_js
ccallowed = not self.options.is_forbid_control_char_in_string
chunks = []
_append = chunks.append
# Used to track the last seen high-surrogate character
high_surrogate = None
highsur_position = None
# Used to track if errors occured so we don't keep reporting multiples
had_lineterm_error = False
# Start looping character by character until the final quotation mark
saw_final_quote = False
should_stop = False
while not saw_final_quote and not should_stop:
if buf.at_end:
state.push_error("String literal is not terminated",
outer_position=string_position, context='String')
break
c = buf.peek()
# Make sure a high surrogate is immediately followed by a low surrogate
if high_surrogate:
if 0xdc00 <= ord(c) <= 0xdfff:
low_surrogate = buf.pop()
try:
uc = helpers.surrogate_pair_as_unicode(high_surrogate, low_surrogate)
except ValueError as err:
state.push_error('Illegal Unicode surrogate pair', (high_surrogate, low_surrogate),
position=highsur_position, outer_position=string_position,
context='String')
should_stop = state.should_stop
uc = '\ufffd' # replacement char
_append(uc)
high_surrogate = None
highsur_position = None
continue # ==== NEXT CHAR
elif buf.peekstr(2) != '\\u':
state.push_error('High unicode surrogate must be followed by a low surrogate',
position=highsur_position, outer_position=string_position,
context='String')
should_stop = state.should_stop
_append('\ufffd') # replacement char
high_surrogate = None
highsur_position = None
if c == quote:
buf.skip() # skip over closing quote
saw_final_quote = True
break
elif c == '\\':
# Escaped character
escape_position = buf.position
buf.skip() # skip over backslash
c = buf.peek()
if not c:
state.push_error('Escape in string literal is incomplete', position=escape_position,
outer_position=string_position, context='String')
should_stop = state.should_stop
break
elif helpers.is_octal_digit(c):
# Handle octal escape codes first so special \0 doesn't kick in yet.
# Follow Annex B.1.2 of ECMAScript standard.
if '0' <= c <= '3':
maxdigits = 3
else:
maxdigits = 2
digits = buf.popwhile(helpers.is_octal_digit, maxchars=maxdigits)
n = helpers.decode_octal(digits)
if n == 0:
state.push_cond(self.options.zero_byte,
'Zero-byte character (U+0000) in string may not be universally safe',
"\\" + digits, position=escape_position, outer_position=string_position,
context='String')
else: # n != 0
state.push_cond(self.options.octal_numbers,
"JSON does not allow octal character escapes other than \"\\0\"",
"\\" + digits, position=escape_position, outer_position=string_position,
context='String')
should_stop = state.should_stop
if n < 128:
_append(chr(n))
else:
_append(helpers.safe_unichr(n))
elif c in escapes:
buf.skip()
_append(escapes[c])
elif c == 'u' or c == 'x':
buf.skip()
esc_opener = '\\' + c
esc_closer = ''
if c == 'u':
if buf.peek() == '{':
buf.skip()
esc_opener += '{'
esc_closer = '}'
maxdigits = None
state.push_cond(self.options.extended_unicode_escapes,
"JSON strings do not allow \\u{...} escapes",
position=escape_position, outer_position=string_position,
context='String')
else:
maxdigits = 4
else: # c== 'x'
state.push_cond(self.options.js_string_escapes,
"JSON strings may not use the \\x hex-escape",
position=escape_position, outer_position=string_position,
context='String')
should_stop = state.should_stop
maxdigits = 2
digits = buf.popwhile(helpers.is_hex_digit, maxchars=maxdigits)
if esc_closer:
if buf.peek() != esc_closer:
state.push_error("Unicode escape sequence is missing closing \'%s\'" % esc_closer,
esc_opener + digits,
position=escape_position, outer_position=string_position,
context='String')
should_stop = state.should_stop
else:
buf.skip()
esc_sequence = esc_opener + digits + esc_closer
if not digits:
state.push_error('numeric character escape sequence is truncated', esc_sequence,
position=escape_position, outer_position=string_position,
context='String')
should_stop = state.should_stop
codepoint = 0xfffd # replacement char
else:
if maxdigits and len(digits) != maxdigits:
state.push_error('escape sequence has too few hexadecimal digits', esc_sequence,
position=escape_position, outer_position=string_position,
context='String')
codepoint = helpers.decode_hex(digits)
if codepoint > 0x10FFFF:
state.push_error('Unicode codepoint is beyond U+10FFFF', esc_opener + digits + esc_closer,
position=escape_position, outer_position=string_position,
context='String')
codepoint = 0xfffd # replacement char
if high_surrogate:
# Decode surrogate pair and clear high surrogate
low_surrogate = chr(codepoint)
try:
uc = helpers.surrogate_pair_as_unicode(high_surrogate, low_surrogate)
except ValueError as err:
state.push_error('Illegal Unicode surrogate pair', (high_surrogate, low_surrogate),
position=highsur_position,
outer_position=string_position,
context='String')
should_stop = state.should_stop
uc = '\ufffd' # replacement char
_append(uc)
high_surrogate = None
highsur_position = None
elif codepoint < 128:
# ASCII chars always go in as a str
if codepoint == 0:
state.push_cond(self.options.zero_byte,
'Zero-byte character (U+0000) in string may not be universally safe',
position=escape_position, outer_position=string_position,
context='String')
should_stop = state.should_stop
_append(chr(codepoint))
elif 0xd800 <= codepoint <= 0xdbff: # high surrogate
high_surrogate = chr(codepoint) # remember until we get to the low surrogate
highsur_position = escape_position.copy()
elif 0xdc00 <= codepoint <= 0xdfff: # low surrogate
state.push_error('Low unicode surrogate must be proceeded by a high surrogate',
position=escape_position,
outer_position=string_position,
context='String')
should_stop = state.should_stop
_append('\ufffd') # replacement char
else:
# Other chars go in as a unicode char
_append(helpers.safe_unichr(codepoint))
else:
# Unknown escape sequence
state.push_cond(self.options.nonescape_characters,
'String escape code is not allowed in strict JSON',
'\\' + c, position=escape_position, outer_position=string_position,
context='String')
should_stop = state.should_stop
_append(c)
buf.skip()
elif ord(c) <= 0x1f: # A control character
if ord(c) == 0:
state.push_cond(self.options.zero_byte,
'Zero-byte character (U+0000) in string may not be universally safe',
position=buf.position, outer_position=string_position,
context='String')
should_stop = state.should_stop
if self.islineterm(c):
if not had_lineterm_error:
state.push_error('Line terminator characters must be escaped inside string literals',
'U+%04X' % ord(c),
position=buf.position, outer_position=string_position,
context='String')
should_stop = state.should_stop
had_lineterm_error = True
_append(c)
buf.skip()
elif ccallowed:
_append(c)
buf.skip()
else:
state.push_error('Control characters must be escaped inside JSON string literals',
'U+%04X' % ord(c),
position=buf.position, outer_position=string_position,
context='String')
should_stop = state.should_stop
buf.skip()
elif 0xd800 <= ord(c) <= 0xdbff: # a raw high surrogate
high_surrogate = buf.pop() # remember until we get to the low surrogate
highsur_position = buf.position.copy()
else: # A normal character; not an escape sequence or end-quote.
# Find a whole sequence of "safe" characters so we can append them
# all at once rather than one a time, for speed.
chunk = buf.popwhile(lambda c: c not in helpers.unsafe_string_chars and c != quote)
if not chunk:
_append(c)
buf.skip()
else:
_append(chunk)
# Check proper string termination
if high_surrogate:
state.push_error('High unicode surrogate must be followed by a low surrogate',
position=highsur_position, outer_position=string_position,
context='String')
_append('\ufffd') # replacement char
high_surrogate = None
highsur_position = None
if not saw_final_quote:
state.push_error('String literal is not terminated with a quotation mark', position=buf.position,
outer_position=string_position,
context='String')
if state.should_stop:
return undefined
# Compose the python string and update stats
s = ''.join(chunks)
state.update_string_stats(s, position=string_position)
# Call string hook
if self.has_hook('decode_string'):
try:
s = self.call_hook('decode_string', s, position=string_position)
except JSONSkipHook:
pass
except JSONError as err:
state.push_exception(err)
s = undefined
return s
|
https://github.com/akfamily/akshare/blob/087025d8d6f799b30ca114013e82c1ad22dc9294/project25/akshare/utils/demjson.py#L4089-L4376
| 25 |
[
0,
1,
2,
3,
4,
5,
6,
7
] | 2.777778 |
[
8,
9,
10,
11,
12,
13,
14,
17,
18,
20,
21,
23,
24,
26,
27,
28,
29,
32,
33,
36,
39,
40,
41,
42,
43,
45,
46,
49,
50,
51,
52,
53,
54,
55,
58,
59,
60,
61,
62,
63,
64,
65,
68,
69,
70,
71,
73,
74,
75,
76,
77,
79,
80,
81,
82,
83,
85,
86,
87,
90,
91,
93,
94,
95,
96,
97,
102,
106,
107,
108,
110,
111,
112,
113,
114,
115,
116,
117,
118,
119,
120,
121,
122,
123,
124,
129,
131,
135,
136,
138,
140,
141,
142,
146,
148,
150,
152,
153,
156,
157,
159,
160,
163,
165,
166,
169,
171,
173,
174,
175,
176,
177,
181,
182,
183,
184,
185,
186,
188,
189,
193,
194,
195,
196,
197,
198,
199,
203,
204,
207,
210,
214,
215,
216,
217,
218,
219,
223,
224,
225,
226,
230,
231,
232,
233,
234,
235,
236,
238,
242,
243,
244,
245,
246,
250,
251,
252,
253,
255,
258,
259,
262,
263,
264,
266,
267,
271,
272,
275,
276,
279,
280,
281,
282,
283,
284,
285,
286,
287
] | 62.152778 | false | 14.825334 | 288 | 49 | 37.847222 | 5 |
def decode_string(self, state):
buf = state.buf
self.skipws(state)
quote = buf.peek()
if quote == '"':
pass
elif quote == "'":
state.push_cond(self.options.single_quoted_strings,
'String literals must use double quotation marks in strict JSON')
else:
state.push_error('String literal must be properly quoted')
return undefined
string_position = buf.position
buf.skip()
if self.options.is_forbid_js_string_escapes:
escapes = self._escapes_json
else:
escapes = self._escapes_js
ccallowed = not self.options.is_forbid_control_char_in_string
chunks = []
_append = chunks.append
# Used to track the last seen high-surrogate character
high_surrogate = None
highsur_position = None
# Used to track if errors occured so we don't keep reporting multiples
had_lineterm_error = False
# Start looping character by character until the final quotation mark
saw_final_quote = False
should_stop = False
while not saw_final_quote and not should_stop:
if buf.at_end:
state.push_error("String literal is not terminated",
outer_position=string_position, context='String')
break
c = buf.peek()
# Make sure a high surrogate is immediately followed by a low surrogate
if high_surrogate:
if 0xdc00 <= ord(c) <= 0xdfff:
low_surrogate = buf.pop()
try:
uc = helpers.surrogate_pair_as_unicode(high_surrogate, low_surrogate)
except ValueError as err:
state.push_error('Illegal Unicode surrogate pair', (high_surrogate, low_surrogate),
position=highsur_position, outer_position=string_position,
context='String')
should_stop = state.should_stop
uc = '\ufffd' # replacement char
_append(uc)
high_surrogate = None
highsur_position = None
continue # ==== NEXT CHAR
elif buf.peekstr(2) != '\\u':
state.push_error('High unicode surrogate must be followed by a low surrogate',
position=highsur_position, outer_position=string_position,
context='String')
should_stop = state.should_stop
_append('\ufffd') # replacement char
high_surrogate = None
highsur_position = None
if c == quote:
buf.skip() # skip over closing quote
saw_final_quote = True
break
elif c == '\\':
# Escaped character
escape_position = buf.position
buf.skip() # skip over backslash
c = buf.peek()
if not c:
state.push_error('Escape in string literal is incomplete', position=escape_position,
outer_position=string_position, context='String')
should_stop = state.should_stop
break
elif helpers.is_octal_digit(c):
# Handle octal escape codes first so special \0 doesn't kick in yet.
# Follow Annex B.1.2 of ECMAScript standard.
if '0' <= c <= '3':
maxdigits = 3
else:
maxdigits = 2
digits = buf.popwhile(helpers.is_octal_digit, maxchars=maxdigits)
n = helpers.decode_octal(digits)
if n == 0:
state.push_cond(self.options.zero_byte,
'Zero-byte character (U+0000) in string may not be universally safe',
"\\" + digits, position=escape_position, outer_position=string_position,
context='String')
else: # n != 0
state.push_cond(self.options.octal_numbers,
"JSON does not allow octal character escapes other than \"\\0\"",
"\\" + digits, position=escape_position, outer_position=string_position,
context='String')
should_stop = state.should_stop
if n < 128:
_append(chr(n))
else:
_append(helpers.safe_unichr(n))
elif c in escapes:
buf.skip()
_append(escapes[c])
elif c == 'u' or c == 'x':
buf.skip()
esc_opener = '\\' + c
esc_closer = ''
if c == 'u':
if buf.peek() == '{':
buf.skip()
esc_opener += '{'
esc_closer = '}'
maxdigits = None
state.push_cond(self.options.extended_unicode_escapes,
"JSON strings do not allow \\u{...} escapes",
position=escape_position, outer_position=string_position,
context='String')
else:
maxdigits = 4
else: # c== 'x'
state.push_cond(self.options.js_string_escapes,
"JSON strings may not use the \\x hex-escape",
position=escape_position, outer_position=string_position,
context='String')
should_stop = state.should_stop
maxdigits = 2
digits = buf.popwhile(helpers.is_hex_digit, maxchars=maxdigits)
if esc_closer:
if buf.peek() != esc_closer:
state.push_error("Unicode escape sequence is missing closing \'%s\'" % esc_closer,
esc_opener + digits,
position=escape_position, outer_position=string_position,
context='String')
should_stop = state.should_stop
else:
buf.skip()
esc_sequence = esc_opener + digits + esc_closer
if not digits:
state.push_error('numeric character escape sequence is truncated', esc_sequence,
position=escape_position, outer_position=string_position,
context='String')
should_stop = state.should_stop
codepoint = 0xfffd # replacement char
else:
if maxdigits and len(digits) != maxdigits:
state.push_error('escape sequence has too few hexadecimal digits', esc_sequence,
position=escape_position, outer_position=string_position,
context='String')
codepoint = helpers.decode_hex(digits)
if codepoint > 0x10FFFF:
state.push_error('Unicode codepoint is beyond U+10FFFF', esc_opener + digits + esc_closer,
position=escape_position, outer_position=string_position,
context='String')
codepoint = 0xfffd # replacement char
if high_surrogate:
# Decode surrogate pair and clear high surrogate
low_surrogate = chr(codepoint)
try:
uc = helpers.surrogate_pair_as_unicode(high_surrogate, low_surrogate)
except ValueError as err:
state.push_error('Illegal Unicode surrogate pair', (high_surrogate, low_surrogate),
position=highsur_position,
outer_position=string_position,
context='String')
should_stop = state.should_stop
uc = '\ufffd' # replacement char
_append(uc)
high_surrogate = None
highsur_position = None
elif codepoint < 128:
# ASCII chars always go in as a str
if codepoint == 0:
state.push_cond(self.options.zero_byte,
'Zero-byte character (U+0000) in string may not be universally safe',
position=escape_position, outer_position=string_position,
context='String')
should_stop = state.should_stop
_append(chr(codepoint))
elif 0xd800 <= codepoint <= 0xdbff: # high surrogate
high_surrogate = chr(codepoint) # remember until we get to the low surrogate
highsur_position = escape_position.copy()
elif 0xdc00 <= codepoint <= 0xdfff: # low surrogate
state.push_error('Low unicode surrogate must be proceeded by a high surrogate',
position=escape_position,
outer_position=string_position,
context='String')
should_stop = state.should_stop
_append('\ufffd') # replacement char
else:
# Other chars go in as a unicode char
_append(helpers.safe_unichr(codepoint))
else:
# Unknown escape sequence
state.push_cond(self.options.nonescape_characters,
'String escape code is not allowed in strict JSON',
'\\' + c, position=escape_position, outer_position=string_position,
context='String')
should_stop = state.should_stop
_append(c)
buf.skip()
elif ord(c) <= 0x1f: # A control character
if ord(c) == 0:
state.push_cond(self.options.zero_byte,
'Zero-byte character (U+0000) in string may not be universally safe',
position=buf.position, outer_position=string_position,
context='String')
should_stop = state.should_stop
if self.islineterm(c):
if not had_lineterm_error:
state.push_error('Line terminator characters must be escaped inside string literals',
'U+%04X' % ord(c),
position=buf.position, outer_position=string_position,
context='String')
should_stop = state.should_stop
had_lineterm_error = True
_append(c)
buf.skip()
elif ccallowed:
_append(c)
buf.skip()
else:
state.push_error('Control characters must be escaped inside JSON string literals',
'U+%04X' % ord(c),
position=buf.position, outer_position=string_position,
context='String')
should_stop = state.should_stop
buf.skip()
elif 0xd800 <= ord(c) <= 0xdbff: # a raw high surrogate
high_surrogate = buf.pop() # remember until we get to the low surrogate
highsur_position = buf.position.copy()
else: # A normal character; not an escape sequence or end-quote.
# Find a whole sequence of "safe" characters so we can append them
# all at once rather than one a time, for speed.
chunk = buf.popwhile(lambda c: c not in helpers.unsafe_string_chars and c != quote)
if not chunk:
_append(c)
buf.skip()
else:
_append(chunk)
# Check proper string termination
if high_surrogate:
state.push_error('High unicode surrogate must be followed by a low surrogate',
position=highsur_position, outer_position=string_position,
context='String')
_append('\ufffd') # replacement char
high_surrogate = None
highsur_position = None
if not saw_final_quote:
state.push_error('String literal is not terminated with a quotation mark', position=buf.position,
outer_position=string_position,
context='String')
if state.should_stop:
return undefined
# Compose the python string and update stats
s = ''.join(chunks)
state.update_string_stats(s, position=string_position)
# Call string hook
if self.has_hook('decode_string'):
try:
s = self.call_hook('decode_string', s, position=string_position)
except JSONSkipHook:
pass
except JSONError as err:
state.push_exception(err)
s = undefined
return s
| 18,542 |
akfamily/akshare
|
087025d8d6f799b30ca114013e82c1ad22dc9294
|
akshare/utils/demjson.py
|
JSON.encode_string
|
(self, s, state)
|
Encodes a Python string into a JSON string literal.
|
Encodes a Python string into a JSON string literal.
| 4,378 | 4,507 |
def encode_string(self, s, state):
"""Encodes a Python string into a JSON string literal.
"""
# Must handle instances of UserString specially in order to be
# able to use ord() on it's simulated "characters". Also
# convert Python2 'str' types to unicode strings first.
import unicodedata, sys
import collections
py2strenc = self.options.py2str_encoding
if isinstance(s, collections.UserString):
def tochar(c):
c2 = c.data
if py2strenc and not isinstance(c2, str):
return c2.decode(py2strenc)
else:
return c2
elif py2strenc and not isinstance(s, str):
s = s.decode(py2strenc)
tochar = None
else:
# Could use "lambda c:c", but that is too slow. So we set to None
# and use an explicit if test inside the loop.
tochar = None
chunks = []
chunks.append('"')
revesc = self._rev_escapes
optrevesc = self._optional_rev_escapes
asciiencodable = self._asciiencodable
always_escape = state.options.always_escape_chars
encunicode = state.escape_unicode_test
i = 0
imax = len(s)
while i < imax:
if tochar:
c = tochar(s[i])
else:
c = s[i]
cord = ord(c)
if cord < 256 and asciiencodable[cord] and isinstance(encunicode, bool) \
and not (always_escape and c in always_escape):
# Contiguous runs of plain old printable ASCII can be copied
# directly to the JSON output without worry (unless the user
# has supplied a custom is-encodable function).
j = i
i += 1
while i < imax:
if tochar:
c = tochar(s[i])
else:
c = s[i]
cord = ord(c)
if cord < 256 and asciiencodable[cord] \
and not (always_escape and c in always_escape):
i += 1
else:
break
chunks.append(str(s[j:i]))
elif c in revesc:
# Has a shortcut escape sequence, like "\n"
chunks.append(revesc[c])
i += 1
elif cord <= 0x1F:
# Always unicode escape ASCII-control characters
chunks.append(r'\u%04x' % cord)
i += 1
elif 0xD800 <= cord <= 0xDFFF:
# A raw surrogate character!
# This should ONLY happen in "narrow" Python builds
# where (sys.maxunicode == 65535) as Python itself
# uses UTF-16. But for "wide" Python builds, a raw
# surrogate should never happen.
handled_raw_surrogates = False
if sys.maxunicode == 0xFFFF and 0xD800 <= cord <= 0xDBFF and (i + 1) < imax:
# In a NARROW Python, output surrogate pair as-is
hsurrogate = cord
i += 1
if tochar:
c = tochar(s[i])
else:
c = s[i]
cord = ord(c)
i += 1
if 0xDC00 <= cord <= 0xDFFF:
lsurrogate = cord
chunks.append(r'\u%04x\u%04x' % (hsurrogate, lsurrogate))
handled_raw_surrogates = True
if not handled_raw_surrogates:
cname = 'U+%04X' % cord
raise JSONEncodeError('can not include or escape a Unicode surrogate character', cname)
elif cord <= 0xFFFF:
# Other BMP Unicode character
if always_escape and c in always_escape:
doesc = True
elif unicodedata.category(c) in ['Cc', 'Cf', 'Zl', 'Zp']:
doesc = True
elif callable(encunicode):
doesc = encunicode(c)
else:
doesc = encunicode
if doesc:
if c in optrevesc:
chunks.append(optrevesc[c])
else:
chunks.append(r'\u%04x' % cord)
else:
chunks.append(c)
i += 1
else: # ord(c) >= 0x10000
# Non-BMP Unicode
if always_escape and c in always_escape:
doesc = True
elif unicodedata.category(c) in ['Cc', 'Cf', 'Zl', 'Zp']:
doesc = True
elif callable(encunicode):
doesc = encunicode(c)
else:
doesc = encunicode
if doesc:
for surrogate in helpers.unicode_as_surrogate_pair(c):
chunks.append(r'\u%04x' % ord(surrogate))
else:
chunks.append(c)
i += 1
chunks.append('"')
state.append(''.join(chunks))
|
https://github.com/akfamily/akshare/blob/087025d8d6f799b30ca114013e82c1ad22dc9294/project25/akshare/utils/demjson.py#L4378-L4507
| 25 |
[
0,
1,
2,
3,
4,
5,
6
] | 5.384615 |
[
7,
8,
9,
10,
11,
12,
13,
14,
16,
17,
18,
19,
23,
25,
26,
27,
28,
29,
30,
31,
32,
33,
34,
35,
36,
38,
39,
40,
45,
46,
47,
48,
49,
51,
52,
53,
55,
57,
58,
59,
61,
62,
63,
65,
66,
67,
73,
74,
76,
77,
78,
79,
81,
82,
83,
84,
85,
86,
87,
88,
89,
90,
91,
93,
94,
95,
96,
97,
98,
100,
102,
103,
104,
106,
108,
109,
112,
113,
114,
115,
116,
117,
119,
121,
122,
123,
125,
126,
128,
129
] | 69.230769 | false | 14.825334 | 130 | 42 | 30.769231 | 1 |
def encode_string(self, s, state):
# Must handle instances of UserString specially in order to be
# able to use ord() on it's simulated "characters". Also
# convert Python2 'str' types to unicode strings first.
import unicodedata, sys
import collections
py2strenc = self.options.py2str_encoding
if isinstance(s, collections.UserString):
def tochar(c):
c2 = c.data
if py2strenc and not isinstance(c2, str):
return c2.decode(py2strenc)
else:
return c2
elif py2strenc and not isinstance(s, str):
s = s.decode(py2strenc)
tochar = None
else:
# Could use "lambda c:c", but that is too slow. So we set to None
# and use an explicit if test inside the loop.
tochar = None
chunks = []
chunks.append('"')
revesc = self._rev_escapes
optrevesc = self._optional_rev_escapes
asciiencodable = self._asciiencodable
always_escape = state.options.always_escape_chars
encunicode = state.escape_unicode_test
i = 0
imax = len(s)
while i < imax:
if tochar:
c = tochar(s[i])
else:
c = s[i]
cord = ord(c)
if cord < 256 and asciiencodable[cord] and isinstance(encunicode, bool) \
and not (always_escape and c in always_escape):
# Contiguous runs of plain old printable ASCII can be copied
# directly to the JSON output without worry (unless the user
# has supplied a custom is-encodable function).
j = i
i += 1
while i < imax:
if tochar:
c = tochar(s[i])
else:
c = s[i]
cord = ord(c)
if cord < 256 and asciiencodable[cord] \
and not (always_escape and c in always_escape):
i += 1
else:
break
chunks.append(str(s[j:i]))
elif c in revesc:
# Has a shortcut escape sequence, like "\n"
chunks.append(revesc[c])
i += 1
elif cord <= 0x1F:
# Always unicode escape ASCII-control characters
chunks.append(r'\u%04x' % cord)
i += 1
elif 0xD800 <= cord <= 0xDFFF:
# A raw surrogate character!
# This should ONLY happen in "narrow" Python builds
# where (sys.maxunicode == 65535) as Python itself
# uses UTF-16. But for "wide" Python builds, a raw
# surrogate should never happen.
handled_raw_surrogates = False
if sys.maxunicode == 0xFFFF and 0xD800 <= cord <= 0xDBFF and (i + 1) < imax:
# In a NARROW Python, output surrogate pair as-is
hsurrogate = cord
i += 1
if tochar:
c = tochar(s[i])
else:
c = s[i]
cord = ord(c)
i += 1
if 0xDC00 <= cord <= 0xDFFF:
lsurrogate = cord
chunks.append(r'\u%04x\u%04x' % (hsurrogate, lsurrogate))
handled_raw_surrogates = True
if not handled_raw_surrogates:
cname = 'U+%04X' % cord
raise JSONEncodeError('can not include or escape a Unicode surrogate character', cname)
elif cord <= 0xFFFF:
# Other BMP Unicode character
if always_escape and c in always_escape:
doesc = True
elif unicodedata.category(c) in ['Cc', 'Cf', 'Zl', 'Zp']:
doesc = True
elif callable(encunicode):
doesc = encunicode(c)
else:
doesc = encunicode
if doesc:
if c in optrevesc:
chunks.append(optrevesc[c])
else:
chunks.append(r'\u%04x' % cord)
else:
chunks.append(c)
i += 1
else: # ord(c) >= 0x10000
# Non-BMP Unicode
if always_escape and c in always_escape:
doesc = True
elif unicodedata.category(c) in ['Cc', 'Cf', 'Zl', 'Zp']:
doesc = True
elif callable(encunicode):
doesc = encunicode(c)
else:
doesc = encunicode
if doesc:
for surrogate in helpers.unicode_as_surrogate_pair(c):
chunks.append(r'\u%04x' % ord(surrogate))
else:
chunks.append(c)
i += 1
chunks.append('"')
state.append(''.join(chunks))
| 18,543 |
|
akfamily/akshare
|
087025d8d6f799b30ca114013e82c1ad22dc9294
|
akshare/utils/demjson.py
|
JSON.decode_identifier
|
(self, state, identifier_as_string=False)
|
return obj
|
Decodes an identifier/keyword.
|
Decodes an identifier/keyword.
| 4,509 | 4,582 |
def decode_identifier(self, state, identifier_as_string=False):
"""Decodes an identifier/keyword.
"""
buf = state.buf
self.skipws(state)
start_position = buf.position
obj = None
kw = buf.pop_identifier()
if not kw:
state.push_error("Expected an identifier", position=start_position)
elif kw == 'null':
obj = None
state.stats.num_nulls += 1
elif kw == 'true':
obj = True
state.stats.num_bools += 1
elif kw == 'false':
obj = False
state.stats.num_bools += 1
elif kw == 'undefined':
state.push_cond(self.options.undefined_values,
"Strict JSON does not allow the 'undefined' keyword",
kw, position=start_position)
obj = undefined
state.stats.num_undefineds += 1
elif kw == 'NaN' or kw == 'Infinity':
state.push_cond(self.options.non_numbers,
"%s literals are not allowed in strict JSON" % kw,
kw, position=start_position)
if self.has_hook('decode_float'):
try:
val = self.call_hook('decode_float', kw, position=start_position)
except JSONSkipHook:
pass
except JSONError as err:
state.push_exception(err)
return undefined
else:
return val
elif self.has_hook('decode_number'):
try:
val = self.call_hook('decode_number', kw, position=start_position)
except JSONSkipHook:
pass
except JSONError as err:
state.push_exception(err)
return undefined
else:
return val
if kw == 'NaN':
state.stats.num_nans += 1
obj = state.options.nan
else:
state.stats.num_infinities += 1
obj = state.options.inf
else:
# Convert unknown identifiers into strings
if identifier_as_string:
if kw in helpers.javascript_reserved_words:
state.push_warning("Identifier is a JavaScript reserved word",
kw, position=start_position)
state.push_cond(self.options.identifier_keys,
"JSON does not allow identifiers to be used as strings",
kw, position=start_position)
state.stats.num_identifiers += 1
obj = self.decode_javascript_identifier(kw)
else:
state.push_error("Unknown identifier", kw, position=start_position)
obj = undefined
state.stats.num_identifiers += 1
return obj
|
https://github.com/akfamily/akshare/blob/087025d8d6f799b30ca114013e82c1ad22dc9294/project25/akshare/utils/demjson.py#L4509-L4582
| 25 |
[
0,
1,
2,
3
] | 5.405405 |
[
4,
5,
6,
7,
9,
11,
12,
13,
14,
15,
16,
17,
18,
19,
20,
21,
22,
23,
26,
27,
28,
29,
32,
33,
34,
35,
36,
37,
38,
39,
41,
42,
43,
44,
45,
46,
47,
48,
49,
51,
52,
53,
54,
56,
57,
60,
61,
62,
64,
67,
68,
70,
71,
72,
73
] | 74.324324 | false | 14.825334 | 74 | 17 | 25.675676 | 1 |
def decode_identifier(self, state, identifier_as_string=False):
buf = state.buf
self.skipws(state)
start_position = buf.position
obj = None
kw = buf.pop_identifier()
if not kw:
state.push_error("Expected an identifier", position=start_position)
elif kw == 'null':
obj = None
state.stats.num_nulls += 1
elif kw == 'true':
obj = True
state.stats.num_bools += 1
elif kw == 'false':
obj = False
state.stats.num_bools += 1
elif kw == 'undefined':
state.push_cond(self.options.undefined_values,
"Strict JSON does not allow the 'undefined' keyword",
kw, position=start_position)
obj = undefined
state.stats.num_undefineds += 1
elif kw == 'NaN' or kw == 'Infinity':
state.push_cond(self.options.non_numbers,
"%s literals are not allowed in strict JSON" % kw,
kw, position=start_position)
if self.has_hook('decode_float'):
try:
val = self.call_hook('decode_float', kw, position=start_position)
except JSONSkipHook:
pass
except JSONError as err:
state.push_exception(err)
return undefined
else:
return val
elif self.has_hook('decode_number'):
try:
val = self.call_hook('decode_number', kw, position=start_position)
except JSONSkipHook:
pass
except JSONError as err:
state.push_exception(err)
return undefined
else:
return val
if kw == 'NaN':
state.stats.num_nans += 1
obj = state.options.nan
else:
state.stats.num_infinities += 1
obj = state.options.inf
else:
# Convert unknown identifiers into strings
if identifier_as_string:
if kw in helpers.javascript_reserved_words:
state.push_warning("Identifier is a JavaScript reserved word",
kw, position=start_position)
state.push_cond(self.options.identifier_keys,
"JSON does not allow identifiers to be used as strings",
kw, position=start_position)
state.stats.num_identifiers += 1
obj = self.decode_javascript_identifier(kw)
else:
state.push_error("Unknown identifier", kw, position=start_position)
obj = undefined
state.stats.num_identifiers += 1
return obj
| 18,544 |
akfamily/akshare
|
087025d8d6f799b30ca114013e82c1ad22dc9294
|
akshare/utils/demjson.py
|
JSON.skip_comment
|
(self, state)
|
Skips an ECMAScript comment, either // or /* style.
The contents of the comment are returned as a string, as well
as the index of the character immediately after the comment.
|
Skips an ECMAScript comment, either // or /* style.
| 4,584 | 4,621 |
def skip_comment(self, state):
"""Skips an ECMAScript comment, either // or /* style.
The contents of the comment are returned as a string, as well
as the index of the character immediately after the comment.
"""
buf = state.buf
uniws = self.options.unicode_whitespace
s = buf.peekstr(2)
if s != '//' and s != '/*':
return None
state.push_cond(self.options.comments, 'Comments are not allowed in strict JSON')
start_position = buf.position
buf.skip(2)
multiline = (s == '/*')
saw_close = False
while not buf.at_end:
if multiline:
if buf.peekstr(2) == '*/':
buf.skip(2)
saw_close = True
break
elif buf.peekstr(2) == '/*':
state.push_error('Multiline /* */ comments may not nest',
outer_position=start_position,
context='Comment')
else:
if buf.at_eol(uniws):
buf.skip_to_next_line(uniws)
saw_close = True
break
buf.pop()
if not saw_close and multiline:
state.push_error('Comment was never terminated', outer_position=start_position,
context='Comment')
state.stats.num_comments += 1
|
https://github.com/akfamily/akshare/blob/087025d8d6f799b30ca114013e82c1ad22dc9294/project25/akshare/utils/demjson.py#L4584-L4621
| 25 |
[
0,
1,
2,
3,
4,
5,
6
] | 18.421053 |
[
7,
8,
9,
10,
11,
12,
13,
14,
15,
16,
17,
18,
19,
20,
21,
22,
23,
24,
28,
29,
30,
31,
32,
34,
35,
37
] | 68.421053 | false | 14.825334 | 38 | 10 | 31.578947 | 4 |
def skip_comment(self, state):
buf = state.buf
uniws = self.options.unicode_whitespace
s = buf.peekstr(2)
if s != '//' and s != '/*':
return None
state.push_cond(self.options.comments, 'Comments are not allowed in strict JSON')
start_position = buf.position
buf.skip(2)
multiline = (s == '/*')
saw_close = False
while not buf.at_end:
if multiline:
if buf.peekstr(2) == '*/':
buf.skip(2)
saw_close = True
break
elif buf.peekstr(2) == '/*':
state.push_error('Multiline /* */ comments may not nest',
outer_position=start_position,
context='Comment')
else:
if buf.at_eol(uniws):
buf.skip_to_next_line(uniws)
saw_close = True
break
buf.pop()
if not saw_close and multiline:
state.push_error('Comment was never terminated', outer_position=start_position,
context='Comment')
state.stats.num_comments += 1
| 18,545 |
|
akfamily/akshare
|
087025d8d6f799b30ca114013e82c1ad22dc9294
|
akshare/utils/demjson.py
|
JSON.skipws_nocomments
|
(self, state)
|
return state.buf.skipws(not self.options.is_forbid_unicode_whitespace)
|
Skips whitespace (will not allow comments).
|
Skips whitespace (will not allow comments).
| 4,623 | 4,626 |
def skipws_nocomments(self, state):
"""Skips whitespace (will not allow comments).
"""
return state.buf.skipws(not self.options.is_forbid_unicode_whitespace)
|
https://github.com/akfamily/akshare/blob/087025d8d6f799b30ca114013e82c1ad22dc9294/project25/akshare/utils/demjson.py#L4623-L4626
| 25 |
[
0,
1,
2
] | 75 |
[
3
] | 25 | false | 14.825334 | 4 | 1 | 75 | 1 |
def skipws_nocomments(self, state):
return state.buf.skipws(not self.options.is_forbid_unicode_whitespace)
| 18,546 |
akfamily/akshare
|
087025d8d6f799b30ca114013e82c1ad22dc9294
|
akshare/utils/demjson.py
|
JSON.skipws
|
(self, state)
|
Skips all whitespace, including comments and unicode whitespace
Takes a string and a starting index, and returns the index of the
next non-whitespace character.
If the 'skip_comments' behavior is True and not running in
strict JSON mode, then comments will be skipped over just like
whitespace.
|
Skips all whitespace, including comments and unicode whitespace
| 4,628 | 4,648 |
def skipws(self, state):
"""Skips all whitespace, including comments and unicode whitespace
Takes a string and a starting index, and returns the index of the
next non-whitespace character.
If the 'skip_comments' behavior is True and not running in
strict JSON mode, then comments will be skipped over just like
whitespace.
"""
buf = state.buf
uniws = not self.options.unicode_whitespace
while not buf.at_end:
c = buf.peekstr(2)
if c == '/*' or c == '//':
cmt = self.skip_comment(state)
elif buf.at_ws(uniws):
buf.skipws(uniws)
else:
break
|
https://github.com/akfamily/akshare/blob/087025d8d6f799b30ca114013e82c1ad22dc9294/project25/akshare/utils/demjson.py#L4628-L4648
| 25 |
[
0,
1,
2,
3,
4,
5,
6,
7,
8,
9,
10
] | 52.380952 |
[
11,
12,
13,
14,
15,
16,
17,
18,
20
] | 42.857143 | false | 14.825334 | 21 | 5 | 57.142857 | 8 |
def skipws(self, state):
buf = state.buf
uniws = not self.options.unicode_whitespace
while not buf.at_end:
c = buf.peekstr(2)
if c == '/*' or c == '//':
cmt = self.skip_comment(state)
elif buf.at_ws(uniws):
buf.skipws(uniws)
else:
break
| 18,547 |
|
akfamily/akshare
|
087025d8d6f799b30ca114013e82c1ad22dc9294
|
akshare/utils/demjson.py
|
JSON.decode_composite
|
(self, state)
|
return obj
|
Intermediate-level JSON decoder for composite literal types (array and object).
|
Intermediate-level JSON decoder for composite literal types (array and object).
| 4,650 | 4,852 |
def decode_composite(self, state):
"""Intermediate-level JSON decoder for composite literal types (array and object).
"""
if state.should_stop:
return None
buf = state.buf
self.skipws(state)
opener = buf.peek()
if opener not in '{[':
state.push_error('Composite data must start with "[" or "{"')
return None
start_position = buf.position
buf.skip()
if opener == '[':
isdict = False
closer = ']'
obj = []
else:
isdict = True
closer = '}'
if state.options.sort_keys == SORT_PRESERVE and _OrderedDict:
obj = _OrderedDict()
else:
obj = {}
num_items = 0
self.skipws(state)
c = buf.peek()
if c == closer:
# empty composite
buf.skip()
done = True
else:
saw_value = False # set to false at beginning and after commas
done = False
while not done and not buf.at_end and not state.should_stop:
self.skipws(state)
c = buf.peek()
if c == '':
break # will report error futher down because done==False
elif c == ',':
if not saw_value:
# no preceding value, an elided (omitted) element
if isdict:
state.push_error('Can not omit elements of an object (dictionary)',
outer_position=start_position,
context='Object')
else:
state.push_cond(self.options.omitted_array_elements,
'Can not omit elements of an array (list)',
outer_position=start_position,
context='Array')
obj.append(undefined)
if state.stats:
state.stats.num_undefineds += 1
buf.skip() # skip over comma
saw_value = False
continue
elif c == closer:
if not saw_value:
if isdict:
state.push_cond(self.options.trailing_comma,
'Strict JSON does not allow a final comma in an object (dictionary) literal',
outer_position=start_position,
context='Object')
else:
state.push_cond(self.options.trailing_comma,
'Strict JSON does not allow a final comma in an array (list) literal',
outer_position=start_position,
context='Array')
buf.skip() # skip over closer
done = True
break
elif c in ']}':
if isdict:
cdesc = 'Object'
else:
cdesc = 'Array'
state.push_error("Expected a '%c' but saw '%c'" % (closer, c),
outer_position=start_position, context=cdesc)
done = True
break
if state.should_stop:
break
# Decode the item/value
value_position = buf.position
if isdict:
val = self.decodeobj(state, identifier_as_string=True)
else:
val = self.decodeobj(state, identifier_as_string=False)
if val is syntax_error:
recover_c = self.recover_parser(state)
if recover_c not in ':':
continue
if state.should_stop:
break
if saw_value:
# Two values without a separating comma
if isdict:
cdesc = 'Object'
else:
cdesc = 'Array'
state.push_error('Values must be separated by a comma',
position=value_position, outer_position=start_position,
context=cdesc)
saw_value = True
self.skipws(state)
if state.should_stop:
break
if isdict:
skip_item = False
key = val # Ref 11.1.5
key_position = value_position
if not helpers.isstringtype(key):
if helpers.isnumbertype(key):
state.push_cond(self.options.nonstring_keys,
'JSON only permits string literals as object properties (keys)',
position=key_position, outer_position=start_position,
context='Object')
else:
state.push_error(
'Object properties (keys) must be string literals, numbers, or identifiers',
position=key_position, outer_position=start_position,
context='Object')
skip_item = True
c = buf.peek()
if c != ':':
state.push_error('Missing value for object property, expected ":"',
position=value_position, outer_position=start_position,
context='Object')
buf.skip() # skip over colon
self.skipws(state)
rval = self.decodeobj(state)
self.skipws(state)
if not skip_item:
if key in obj:
state.push_cond(self.options.duplicate_keys,
'Object contains duplicate key',
key, position=key_position, outer_position=start_position,
context='Object')
if key == '':
state.push_cond(self.options.non_portable,
'Using an empty string "" as an object key may not be portable',
position=key_position, outer_position=start_position,
context='Object')
obj[key] = rval
num_items += 1
else: # islist
obj.append(val)
num_items += 1
# end while
if state.stats:
if isdict:
state.stats.max_items_in_object = max(state.stats.max_items_in_object, num_items)
else:
state.stats.max_items_in_array = max(state.stats.max_items_in_array, num_items)
if state.should_stop:
return obj
# Make sure composite value is properly terminated
if not done:
if isdict:
state.push_error('Object literal (dictionary) is not terminated',
outer_position=start_position, context='Object')
else:
state.push_error('Array literal (list) is not terminated',
outer_position=start_position, context='Array')
# Update stats and run hooks
if isdict:
state.stats.num_objects += 1
if self.has_hook('decode_object'):
try:
obj = self.call_hook('decode_object', obj, position=start_position)
except JSONSkipHook:
pass
except JSONError as err:
state.push_exception(err)
obj = undefined
else:
state.stats.num_arrays += 1
if self.has_hook('decode_array'):
try:
obj = self.call_hook('decode_array', obj, position=start_position)
except JSONSkipHook:
pass
except JSONError as err:
state.push_exception(err)
obj = undefined
return obj
|
https://github.com/akfamily/akshare/blob/087025d8d6f799b30ca114013e82c1ad22dc9294/project25/akshare/utils/demjson.py#L4650-L4852
| 25 |
[
0,
1,
2,
3
] | 1.970443 |
[
4,
5,
6,
7,
8,
9,
10,
11,
12,
13,
14,
15,
16,
17,
19,
20,
21,
22,
24,
25,
26,
28,
29,
31,
32,
34,
35,
36,
37,
38,
39,
40,
41,
42,
44,
45,
49,
53,
54,
55,
56,
57,
58,
59,
60,
61,
62,
67,
71,
72,
73,
74,
75,
76,
78,
79,
81,
82,
84,
85,
88,
90,
91,
93,
95,
96,
97,
98,
100,
101,
103,
105,
106,
108,
109,
113,
114,
116,
117,
119,
120,
121,
122,
123,
124,
125,
130,
134,
135,
136,
137,
140,
141,
143,
144,
145,
146,
147,
151,
152,
156,
157,
159,
160,
163,
164,
165,
167,
169,
170,
173,
174,
175,
178,
182,
183,
184,
185,
186,
187,
188,
189,
190,
191,
193,
194,
195,
196,
197,
198,
199,
200,
201,
202
] | 66.009852 | false | 14.825334 | 203 | 47 | 33.990148 | 1 |
def decode_composite(self, state):
if state.should_stop:
return None
buf = state.buf
self.skipws(state)
opener = buf.peek()
if opener not in '{[':
state.push_error('Composite data must start with "[" or "{"')
return None
start_position = buf.position
buf.skip()
if opener == '[':
isdict = False
closer = ']'
obj = []
else:
isdict = True
closer = '}'
if state.options.sort_keys == SORT_PRESERVE and _OrderedDict:
obj = _OrderedDict()
else:
obj = {}
num_items = 0
self.skipws(state)
c = buf.peek()
if c == closer:
# empty composite
buf.skip()
done = True
else:
saw_value = False # set to false at beginning and after commas
done = False
while not done and not buf.at_end and not state.should_stop:
self.skipws(state)
c = buf.peek()
if c == '':
break # will report error futher down because done==False
elif c == ',':
if not saw_value:
# no preceding value, an elided (omitted) element
if isdict:
state.push_error('Can not omit elements of an object (dictionary)',
outer_position=start_position,
context='Object')
else:
state.push_cond(self.options.omitted_array_elements,
'Can not omit elements of an array (list)',
outer_position=start_position,
context='Array')
obj.append(undefined)
if state.stats:
state.stats.num_undefineds += 1
buf.skip() # skip over comma
saw_value = False
continue
elif c == closer:
if not saw_value:
if isdict:
state.push_cond(self.options.trailing_comma,
'Strict JSON does not allow a final comma in an object (dictionary) literal',
outer_position=start_position,
context='Object')
else:
state.push_cond(self.options.trailing_comma,
'Strict JSON does not allow a final comma in an array (list) literal',
outer_position=start_position,
context='Array')
buf.skip() # skip over closer
done = True
break
elif c in ']}':
if isdict:
cdesc = 'Object'
else:
cdesc = 'Array'
state.push_error("Expected a '%c' but saw '%c'" % (closer, c),
outer_position=start_position, context=cdesc)
done = True
break
if state.should_stop:
break
# Decode the item/value
value_position = buf.position
if isdict:
val = self.decodeobj(state, identifier_as_string=True)
else:
val = self.decodeobj(state, identifier_as_string=False)
if val is syntax_error:
recover_c = self.recover_parser(state)
if recover_c not in ':':
continue
if state.should_stop:
break
if saw_value:
# Two values without a separating comma
if isdict:
cdesc = 'Object'
else:
cdesc = 'Array'
state.push_error('Values must be separated by a comma',
position=value_position, outer_position=start_position,
context=cdesc)
saw_value = True
self.skipws(state)
if state.should_stop:
break
if isdict:
skip_item = False
key = val # Ref 11.1.5
key_position = value_position
if not helpers.isstringtype(key):
if helpers.isnumbertype(key):
state.push_cond(self.options.nonstring_keys,
'JSON only permits string literals as object properties (keys)',
position=key_position, outer_position=start_position,
context='Object')
else:
state.push_error(
'Object properties (keys) must be string literals, numbers, or identifiers',
position=key_position, outer_position=start_position,
context='Object')
skip_item = True
c = buf.peek()
if c != ':':
state.push_error('Missing value for object property, expected ":"',
position=value_position, outer_position=start_position,
context='Object')
buf.skip() # skip over colon
self.skipws(state)
rval = self.decodeobj(state)
self.skipws(state)
if not skip_item:
if key in obj:
state.push_cond(self.options.duplicate_keys,
'Object contains duplicate key',
key, position=key_position, outer_position=start_position,
context='Object')
if key == '':
state.push_cond(self.options.non_portable,
'Using an empty string "" as an object key may not be portable',
position=key_position, outer_position=start_position,
context='Object')
obj[key] = rval
num_items += 1
else: # islist
obj.append(val)
num_items += 1
# end while
if state.stats:
if isdict:
state.stats.max_items_in_object = max(state.stats.max_items_in_object, num_items)
else:
state.stats.max_items_in_array = max(state.stats.max_items_in_array, num_items)
if state.should_stop:
return obj
# Make sure composite value is properly terminated
if not done:
if isdict:
state.push_error('Object literal (dictionary) is not terminated',
outer_position=start_position, context='Object')
else:
state.push_error('Array literal (list) is not terminated',
outer_position=start_position, context='Array')
# Update stats and run hooks
if isdict:
state.stats.num_objects += 1
if self.has_hook('decode_object'):
try:
obj = self.call_hook('decode_object', obj, position=start_position)
except JSONSkipHook:
pass
except JSONError as err:
state.push_exception(err)
obj = undefined
else:
state.stats.num_arrays += 1
if self.has_hook('decode_array'):
try:
obj = self.call_hook('decode_array', obj, position=start_position)
except JSONSkipHook:
pass
except JSONError as err:
state.push_exception(err)
obj = undefined
return obj
| 18,548 |
akfamily/akshare
|
087025d8d6f799b30ca114013e82c1ad22dc9294
|
akshare/utils/demjson.py
|
JSON.decode_javascript_identifier
|
(self, name)
|
return name
|
Convert a JavaScript identifier into a Python string object.
This method can be overriden by a subclass to redefine how JavaScript
identifiers are turned into Python objects. By default this just
converts them into strings.
|
Convert a JavaScript identifier into a Python string object.
| 4,854 | 4,862 |
def decode_javascript_identifier(self, name):
"""Convert a JavaScript identifier into a Python string object.
This method can be overriden by a subclass to redefine how JavaScript
identifiers are turned into Python objects. By default this just
converts them into strings.
"""
return name
|
https://github.com/akfamily/akshare/blob/087025d8d6f799b30ca114013e82c1ad22dc9294/project25/akshare/utils/demjson.py#L4854-L4862
| 25 |
[
0,
1,
2,
3,
4,
5,
6,
7
] | 88.888889 |
[
8
] | 11.111111 | false | 14.825334 | 9 | 1 | 88.888889 | 5 |
def decode_javascript_identifier(self, name):
return name
| 18,549 |
akfamily/akshare
|
087025d8d6f799b30ca114013e82c1ad22dc9294
|
akshare/utils/demjson.py
|
JSON.decodeobj
|
(self, state, identifier_as_string=False, at_document_start=False)
|
return obj
|
Intermediate-level JSON decoder.
Takes a string and a starting index, and returns a two-tuple consting
of a Python object and the index of the next unparsed character.
If there is no value at all (empty string, etc), then None is
returned instead of a tuple.
|
Intermediate-level JSON decoder.
| 4,864 | 4,903 |
def decodeobj(self, state, identifier_as_string=False, at_document_start=False):
"""Intermediate-level JSON decoder.
Takes a string and a starting index, and returns a two-tuple consting
of a Python object and the index of the next unparsed character.
If there is no value at all (empty string, etc), then None is
returned instead of a tuple.
"""
buf = state.buf
obj = None
self.skipws(state)
if buf.at_end:
state.push_error('Unexpected end of input')
c = buf.peek()
if c in '{[':
state.cur_depth += 1
try:
state.update_depth_stats()
obj = self.decode_composite(state)
finally:
state.cur_depth -= 1
else:
if at_document_start:
state.push_cond(self.options.any_type_at_start,
'JSON document must start with an object or array type only')
if c in self._string_quotes:
obj = self.decode_string(state)
elif c.isdigit() or c in '.+-':
obj = self.decode_number(state)
elif c.isalpha() or c in '_$':
obj = self.decode_identifier(state, identifier_as_string=identifier_as_string)
else:
state.push_error('Can not decode value starting with character %r' % c)
buf.skip()
self.recover_parser(state)
obj = syntax_error
return obj
|
https://github.com/akfamily/akshare/blob/087025d8d6f799b30ca114013e82c1ad22dc9294/project25/akshare/utils/demjson.py#L4864-L4903
| 25 |
[
0,
1,
2,
3,
4,
5,
6,
7,
8,
9
] | 25 |
[
10,
11,
12,
13,
14,
16,
17,
18,
19,
20,
21,
23,
25,
26,
28,
29,
30,
31,
32,
33,
35,
36,
37,
38,
39
] | 62.5 | false | 14.825334 | 40 | 9 | 37.5 | 7 |
def decodeobj(self, state, identifier_as_string=False, at_document_start=False):
buf = state.buf
obj = None
self.skipws(state)
if buf.at_end:
state.push_error('Unexpected end of input')
c = buf.peek()
if c in '{[':
state.cur_depth += 1
try:
state.update_depth_stats()
obj = self.decode_composite(state)
finally:
state.cur_depth -= 1
else:
if at_document_start:
state.push_cond(self.options.any_type_at_start,
'JSON document must start with an object or array type only')
if c in self._string_quotes:
obj = self.decode_string(state)
elif c.isdigit() or c in '.+-':
obj = self.decode_number(state)
elif c.isalpha() or c in '_$':
obj = self.decode_identifier(state, identifier_as_string=identifier_as_string)
else:
state.push_error('Can not decode value starting with character %r' % c)
buf.skip()
self.recover_parser(state)
obj = syntax_error
return obj
| 18,550 |
akfamily/akshare
|
087025d8d6f799b30ca114013e82c1ad22dc9294
|
akshare/utils/demjson.py
|
JSON.decode
|
(self, txt, encoding=None, return_errors=False, return_stats=False)
|
Decodes a JSON-encoded string into a Python object.
The 'return_errors' parameter controls what happens if the
input JSON has errors in it.
* False: the first error will be raised as a Python
exception. If there are no errors then the corresponding
Python object will be returned.
* True: the return value is always a 2-tuple: (object, error_list)
|
Decodes a JSON-encoded string into a Python object.
| 4,905 | 4,961 |
def decode(self, txt, encoding=None, return_errors=False, return_stats=False):
"""Decodes a JSON-encoded string into a Python object.
The 'return_errors' parameter controls what happens if the
input JSON has errors in it.
* False: the first error will be raised as a Python
exception. If there are no errors then the corresponding
Python object will be returned.
* True: the return value is always a 2-tuple: (object, error_list)
"""
import sys
state = decode_state(options=self.options)
# Prepare the input
state.set_input(txt, encoding=encoding)
# Do the decoding
if not state.has_errors:
self.__sanity_check_start(state)
if not state.has_errors:
try:
self._do_decode(state) # DECODE!
except JSONException as err:
state.push_exception(err)
except Exception as err: # Mainly here to catch maximum recursion depth exceeded
e2 = sys.exc_info()
raise
newerr = JSONDecodeError("An unexpected failure occured", severity='fatal', position=state.buf.position)
newerr.__cause__ = err
newerr.__traceback__ = e2[2]
state.push_exception(newerr)
if return_stats and state.buf:
state.stats.num_excess_whitespace = state.buf.num_ws_skipped
state.stats.total_chars = state.buf.position.char_position
# Handle the errors
result_type = _namedtuple('json_results', ['object', 'errors', 'stats'])
if return_errors:
if return_stats:
return result_type(state.obj, state.errors, state.stats)
else:
return result_type(state.obj, state.errors, None)
else:
# Don't cause warnings to raise an error
errors = [err for err in state.errors if err.severity in ('fatal', 'error')]
if errors:
raise errors[0]
if return_stats:
return result_type(state.obj, None, state.stats)
else:
return state.obj
|
https://github.com/akfamily/akshare/blob/087025d8d6f799b30ca114013e82c1ad22dc9294/project25/akshare/utils/demjson.py#L4905-L4961
| 25 |
[
0,
1,
2,
3,
4,
5,
6,
7,
8,
9,
10,
11,
12
] | 22.807018 |
[
13,
14,
17,
20,
21,
23,
24,
25,
26,
27,
28,
29,
30,
36,
37,
38,
41,
43,
44,
45,
47,
50,
51,
52,
53,
54,
56
] | 47.368421 | false | 14.825334 | 57 | 12 | 52.631579 | 10 |
def decode(self, txt, encoding=None, return_errors=False, return_stats=False):
import sys
state = decode_state(options=self.options)
# Prepare the input
state.set_input(txt, encoding=encoding)
# Do the decoding
if not state.has_errors:
self.__sanity_check_start(state)
if not state.has_errors:
try:
self._do_decode(state) # DECODE!
except JSONException as err:
state.push_exception(err)
except Exception as err: # Mainly here to catch maximum recursion depth exceeded
e2 = sys.exc_info()
raise
newerr = JSONDecodeError("An unexpected failure occured", severity='fatal', position=state.buf.position)
newerr.__cause__ = err
newerr.__traceback__ = e2[2]
state.push_exception(newerr)
if return_stats and state.buf:
state.stats.num_excess_whitespace = state.buf.num_ws_skipped
state.stats.total_chars = state.buf.position.char_position
# Handle the errors
result_type = _namedtuple('json_results', ['object', 'errors', 'stats'])
if return_errors:
if return_stats:
return result_type(state.obj, state.errors, state.stats)
else:
return result_type(state.obj, state.errors, None)
else:
# Don't cause warnings to raise an error
errors = [err for err in state.errors if err.severity in ('fatal', 'error')]
if errors:
raise errors[0]
if return_stats:
return result_type(state.obj, None, state.stats)
else:
return state.obj
| 18,551 |
|
akfamily/akshare
|
087025d8d6f799b30ca114013e82c1ad22dc9294
|
akshare/utils/demjson.py
|
JSON.__sanity_check_start
|
(self, state)
|
return is_sane
|
Check that the document seems sane by looking at the first couple characters.
Check that the decoding seems sane. Per RFC 4627 section 3:
"Since the first two characters of a JSON text will
always be ASCII characters [RFC0020], ..."
[WAS removed from RFC 7158, but still valid via the grammar.]
This check is probably not necessary, but it allows us to
raise a suitably descriptive error rather than an obscure
syntax error later on.
Note that the RFC requirements of two ASCII characters seems
to be an incorrect statement as a JSON string literal may have
as it's first character any unicode character. Thus the first
two characters will always be ASCII, unless the first
character is a quotation mark. And in non-strict mode we can
also have a few other characters too.
|
Check that the document seems sane by looking at the first couple characters.
| 4,963 | 5,003 |
def __sanity_check_start(self, state):
"""Check that the document seems sane by looking at the first couple characters.
Check that the decoding seems sane. Per RFC 4627 section 3:
"Since the first two characters of a JSON text will
always be ASCII characters [RFC0020], ..."
[WAS removed from RFC 7158, but still valid via the grammar.]
This check is probably not necessary, but it allows us to
raise a suitably descriptive error rather than an obscure
syntax error later on.
Note that the RFC requirements of two ASCII characters seems
to be an incorrect statement as a JSON string literal may have
as it's first character any unicode character. Thus the first
two characters will always be ASCII, unless the first
character is a quotation mark. And in non-strict mode we can
also have a few other characters too.
"""
is_sane = True
unitxt = state.buf.peekstr(2)
if len(unitxt) >= 2:
first, second = unitxt[:2]
if first in self._string_quotes:
pass # second can be anything inside string literal
else:
if ((ord(first) < 0x20 or ord(first) > 0x7f) or \
(ord(second) < 0x20 or ord(second) > 0x7f)) and \
(not self.isws(first) and not self.isws(second)):
# Found non-printable ascii, must check unicode
# categories to see if the character is legal.
# Only whitespace, line and paragraph separators,
# and format control chars are legal here.
import unicodedata
catfirst = unicodedata.category(str(first))
catsecond = unicodedata.category(str(second))
if catfirst not in ('Zs', 'Zl', 'Zp', 'Cf') or \
catsecond not in ('Zs', 'Zl', 'Zp', 'Cf'):
state.push_fatal('The input is gibberish, is the Unicode encoding correct?')
return is_sane
|
https://github.com/akfamily/akshare/blob/087025d8d6f799b30ca114013e82c1ad22dc9294/project25/akshare/utils/demjson.py#L4963-L5003
| 25 |
[
0,
1,
2,
3,
4,
5,
6,
7,
8,
9,
10,
11,
12,
13,
14,
15,
16,
17,
18,
19
] | 48.780488 |
[
20,
21,
22,
23,
24,
25,
27,
34,
35,
36,
37,
39,
40
] | 31.707317 | false | 14.825334 | 41 | 11 | 68.292683 | 17 |
def __sanity_check_start(self, state):
is_sane = True
unitxt = state.buf.peekstr(2)
if len(unitxt) >= 2:
first, second = unitxt[:2]
if first in self._string_quotes:
pass # second can be anything inside string literal
else:
if ((ord(first) < 0x20 or ord(first) > 0x7f) or \
(ord(second) < 0x20 or ord(second) > 0x7f)) and \
(not self.isws(first) and not self.isws(second)):
# Found non-printable ascii, must check unicode
# categories to see if the character is legal.
# Only whitespace, line and paragraph separators,
# and format control chars are legal here.
import unicodedata
catfirst = unicodedata.category(str(first))
catsecond = unicodedata.category(str(second))
if catfirst not in ('Zs', 'Zl', 'Zp', 'Cf') or \
catsecond not in ('Zs', 'Zl', 'Zp', 'Cf'):
state.push_fatal('The input is gibberish, is the Unicode encoding correct?')
return is_sane
| 18,552 |
akfamily/akshare
|
087025d8d6f799b30ca114013e82c1ad22dc9294
|
akshare/utils/demjson.py
|
JSON._do_decode
|
(self, state)
|
This is the internal function that does the JSON decoding.
Called by the decode() method, after it has performed any Unicode decoding, etc.
|
This is the internal function that does the JSON decoding.
| 5,005 | 5,028 |
def _do_decode(self, state):
"""This is the internal function that does the JSON decoding.
Called by the decode() method, after it has performed any Unicode decoding, etc.
"""
buf = state.buf
self.skipws(state)
if buf.at_end:
state.push_error('No value to decode')
else:
if state.options.decimal_context:
dec_ctx = decimal.localcontext(state.options.decimal_context)
else:
dec_ctx = _dummy_context_manager
with dec_ctx:
state.obj = self.decodeobj(state, at_document_start=True)
if not state.should_stop:
# Make sure there's nothing at the end
self.skipws(state)
if not buf.at_end:
state.push_error('Unexpected text after end of JSON value')
|
https://github.com/akfamily/akshare/blob/087025d8d6f799b30ca114013e82c1ad22dc9294/project25/akshare/utils/demjson.py#L5005-L5028
| 25 |
[
0,
1,
2,
3,
4
] | 20.833333 |
[
5,
6,
8,
9,
11,
12,
14,
16,
17,
19,
21,
22,
23
] | 54.166667 | false | 14.825334 | 24 | 6 | 45.833333 | 3 |
def _do_decode(self, state):
buf = state.buf
self.skipws(state)
if buf.at_end:
state.push_error('No value to decode')
else:
if state.options.decimal_context:
dec_ctx = decimal.localcontext(state.options.decimal_context)
else:
dec_ctx = _dummy_context_manager
with dec_ctx:
state.obj = self.decodeobj(state, at_document_start=True)
if not state.should_stop:
# Make sure there's nothing at the end
self.skipws(state)
if not buf.at_end:
state.push_error('Unexpected text after end of JSON value')
| 18,553 |
|
akfamily/akshare
|
087025d8d6f799b30ca114013e82c1ad22dc9294
|
akshare/utils/demjson.py
|
JSON._classify_for_encoding
|
(self, obj)
|
return c
| 5,030 | 5,075 |
def _classify_for_encoding(self, obj):
import datetime
c = 'other'
if obj is None:
c = 'null'
elif obj is undefined:
c = 'undefined'
elif isinstance(obj, bool):
c = 'bool'
elif isinstance(obj, (int, float, complex)) or \
(decimal and isinstance(obj, decimal.Decimal)):
c = 'number'
elif isinstance(obj, str) or helpers.isstringtype(obj):
c = 'string'
else:
if isinstance(obj, dict):
c = 'dict'
elif isinstance(obj, tuple) and hasattr(obj, '_asdict') and callable(obj._asdict):
# Have a named tuple
enc_nt = self.options.encode_namedtuple_as_object
if enc_nt and (enc_nt is True or (callable(enc_nt) and enc_nt(obj))):
c = 'namedtuple'
else:
c = 'sequence'
elif isinstance(obj, (list, tuple, set, frozenset)):
c = 'sequence'
elif hasattr(obj, 'iterkeys') or (hasattr(obj, '__getitem__') and hasattr(obj, 'keys')):
c = 'dict'
elif isinstance(obj, datetime.datetime):
# Check datetime before date because it is a subclass!
c = 'datetime'
elif isinstance(obj, datetime.date):
c = 'date'
elif isinstance(obj, datetime.time):
c = 'time'
elif isinstance(obj, datetime.timedelta):
c = 'timedelta'
elif _py_major >= 3 and isinstance(obj, (bytes, bytearray)):
c = 'bytes'
elif _py_major >= 3 and isinstance(obj, memoryview):
c = 'memoryview'
elif _enum is not None and isinstance(obj, _enum):
c = 'enum'
else:
c = 'other'
return c
|
https://github.com/akfamily/akshare/blob/087025d8d6f799b30ca114013e82c1ad22dc9294/project25/akshare/utils/demjson.py#L5030-L5075
| 25 |
[
0
] | 2.173913 |
[
1,
2,
3,
4,
5,
6,
7,
8,
9,
11,
12,
13,
15,
16,
17,
19,
20,
21,
23,
24,
25,
26,
27,
28,
30,
31,
32,
33,
34,
35,
36,
37,
38,
39,
40,
41,
42,
44,
45
] | 84.782609 | false | 14.825334 | 46 | 31 | 15.217391 | 0 |
def _classify_for_encoding(self, obj):
import datetime
c = 'other'
if obj is None:
c = 'null'
elif obj is undefined:
c = 'undefined'
elif isinstance(obj, bool):
c = 'bool'
elif isinstance(obj, (int, float, complex)) or \
(decimal and isinstance(obj, decimal.Decimal)):
c = 'number'
elif isinstance(obj, str) or helpers.isstringtype(obj):
c = 'string'
else:
if isinstance(obj, dict):
c = 'dict'
elif isinstance(obj, tuple) and hasattr(obj, '_asdict') and callable(obj._asdict):
# Have a named tuple
enc_nt = self.options.encode_namedtuple_as_object
if enc_nt and (enc_nt is True or (callable(enc_nt) and enc_nt(obj))):
c = 'namedtuple'
else:
c = 'sequence'
elif isinstance(obj, (list, tuple, set, frozenset)):
c = 'sequence'
elif hasattr(obj, 'iterkeys') or (hasattr(obj, '__getitem__') and hasattr(obj, 'keys')):
c = 'dict'
elif isinstance(obj, datetime.datetime):
# Check datetime before date because it is a subclass!
c = 'datetime'
elif isinstance(obj, datetime.date):
c = 'date'
elif isinstance(obj, datetime.time):
c = 'time'
elif isinstance(obj, datetime.timedelta):
c = 'timedelta'
elif _py_major >= 3 and isinstance(obj, (bytes, bytearray)):
c = 'bytes'
elif _py_major >= 3 and isinstance(obj, memoryview):
c = 'memoryview'
elif _enum is not None and isinstance(obj, _enum):
c = 'enum'
else:
c = 'other'
return c
| 18,554 |
||
akfamily/akshare
|
087025d8d6f799b30ca114013e82c1ad22dc9294
|
akshare/utils/demjson.py
|
JSON.encode
|
(self, obj, encoding=None)
|
return output
|
Encodes the Python object into a JSON string representation.
This method will first attempt to encode an object by seeing
if it has a json_equivalent() method. If so than it will
call that method and then recursively attempt to encode
the object resulting from that call.
Next it will attempt to determine if the object is a native
type or acts like a squence or dictionary. If so it will
encode that object directly.
Finally, if no other strategy for encoding the object of that
type exists, it will call the encode_default() method. That
method currently raises an error, but it could be overridden
by subclasses to provide a hook for extending the types which
can be encoded.
|
Encodes the Python object into a JSON string representation.
| 5,077 | 5,172 |
def encode(self, obj, encoding=None):
"""Encodes the Python object into a JSON string representation.
This method will first attempt to encode an object by seeing
if it has a json_equivalent() method. If so than it will
call that method and then recursively attempt to encode
the object resulting from that call.
Next it will attempt to determine if the object is a native
type or acts like a squence or dictionary. If so it will
encode that object directly.
Finally, if no other strategy for encoding the object of that
type exists, it will call the encode_default() method. That
method currently raises an error, but it could be overridden
by subclasses to provide a hook for extending the types which
can be encoded.
"""
import sys, codecs
# Make a fresh encoding state
state = encode_state(self.options)
# Find the codec to use. CodecInfo will be in 'cdk' and name in 'encoding'.
#
# Also set the state's 'escape_unicode_test' property which is used to
# determine what characters to \u-escape.
if encoding is None:
cdk = None
elif isinstance(encoding, codecs.CodecInfo):
cdk = encoding
encoding = cdk.name
else:
cdk = helpers.lookup_codec(encoding)
if not cdk:
raise JSONEncodeError('no codec available for character encoding', encoding)
if self.options.escape_unicode and callable(self.options.escape_unicode):
# User-supplied repertoire test function
state.escape_unicode_test = self.options.escape_unicode
else:
if self.options.escape_unicode == True or not cdk or cdk.name.lower() == 'ascii':
# ASCII, ISO8859-1, or and Unknown codec -- \u escape anything not ASCII
state.escape_unicode_test = lambda c: ord(c) >= 0x80
elif cdk.name == 'iso8859-1':
state.escape_unicode_test = lambda c: ord(c) >= 0x100
elif cdk and cdk.name.lower().startswith('utf'):
# All UTF-x encodings can do the whole Unicode repertoire, so
# do nothing special.
state.escape_unicode_test = False
else:
# An unusual codec. We need to test every character
# to see if it is in the codec's repertoire to determine
# if we should \u escape that character.
enc_func = cdk.encode
def escape_unicode_hardway(c):
try:
enc_func(c)
except UnicodeEncodeError:
return True
else:
return False
state.escape_unicode_test = escape_unicode_hardway
# Make sure the encoding is not degenerate: it can encode the minimal
# number of characters needed by the JSON syntax rules.
if encoding is not None:
try:
output, nchars = cdk.encode(JSON.json_syntax_characters)
except UnicodeError as err:
raise JSONEncodeError("Output encoding %s is not sufficient to encode JSON" % cdk.name)
# Do the JSON encoding!
self._do_encode(obj, state)
if not self.options.encode_compactly:
state.append('\n')
unitxt = state.combine()
# Do the final Unicode encoding
if encoding is None:
output = unitxt
else:
try:
output, nchars = cdk.encode(unitxt)
except UnicodeEncodeError as err:
# Re-raise as a JSONDecodeError
e2 = sys.exc_info()
newerr = JSONEncodeError("a Unicode encoding error occurred")
# Simulate Python 3's: "raise X from Y" exception chaining
newerr.__cause__ = err
newerr.__traceback__ = e2[2]
raise newerr
return output
|
https://github.com/akfamily/akshare/blob/087025d8d6f799b30ca114013e82c1ad22dc9294/project25/akshare/utils/demjson.py#L5077-L5172
| 25 |
[
0,
1,
2,
3,
4,
5,
6,
7,
8,
9,
10,
11,
12,
13,
14,
15,
16,
17,
18
] | 19.791667 |
[
19,
22,
28,
29,
30,
31,
32,
34,
35,
36,
38,
40,
42,
44,
45,
46,
47,
50,
55,
57,
58,
59,
60,
61,
63,
65,
69,
70,
71,
72,
73,
76,
77,
78,
79,
82,
83,
85,
86,
87,
89,
90,
92,
93,
94,
95
] | 47.916667 | false | 14.825334 | 96 | 19 | 52.083333 | 16 |
def encode(self, obj, encoding=None):
import sys, codecs
# Make a fresh encoding state
state = encode_state(self.options)
# Find the codec to use. CodecInfo will be in 'cdk' and name in 'encoding'.
#
# Also set the state's 'escape_unicode_test' property which is used to
# determine what characters to \u-escape.
if encoding is None:
cdk = None
elif isinstance(encoding, codecs.CodecInfo):
cdk = encoding
encoding = cdk.name
else:
cdk = helpers.lookup_codec(encoding)
if not cdk:
raise JSONEncodeError('no codec available for character encoding', encoding)
if self.options.escape_unicode and callable(self.options.escape_unicode):
# User-supplied repertoire test function
state.escape_unicode_test = self.options.escape_unicode
else:
if self.options.escape_unicode == True or not cdk or cdk.name.lower() == 'ascii':
# ASCII, ISO8859-1, or and Unknown codec -- \u escape anything not ASCII
state.escape_unicode_test = lambda c: ord(c) >= 0x80
elif cdk.name == 'iso8859-1':
state.escape_unicode_test = lambda c: ord(c) >= 0x100
elif cdk and cdk.name.lower().startswith('utf'):
# All UTF-x encodings can do the whole Unicode repertoire, so
# do nothing special.
state.escape_unicode_test = False
else:
# An unusual codec. We need to test every character
# to see if it is in the codec's repertoire to determine
# if we should \u escape that character.
enc_func = cdk.encode
def escape_unicode_hardway(c):
try:
enc_func(c)
except UnicodeEncodeError:
return True
else:
return False
state.escape_unicode_test = escape_unicode_hardway
# Make sure the encoding is not degenerate: it can encode the minimal
# number of characters needed by the JSON syntax rules.
if encoding is not None:
try:
output, nchars = cdk.encode(JSON.json_syntax_characters)
except UnicodeError as err:
raise JSONEncodeError("Output encoding %s is not sufficient to encode JSON" % cdk.name)
# Do the JSON encoding!
self._do_encode(obj, state)
if not self.options.encode_compactly:
state.append('\n')
unitxt = state.combine()
# Do the final Unicode encoding
if encoding is None:
output = unitxt
else:
try:
output, nchars = cdk.encode(unitxt)
except UnicodeEncodeError as err:
# Re-raise as a JSONDecodeError
e2 = sys.exc_info()
newerr = JSONEncodeError("a Unicode encoding error occurred")
# Simulate Python 3's: "raise X from Y" exception chaining
newerr.__cause__ = err
newerr.__traceback__ = e2[2]
raise newerr
return output
| 18,555 |
akfamily/akshare
|
087025d8d6f799b30ca114013e82c1ad22dc9294
|
akshare/utils/demjson.py
|
JSON._do_encode
|
(self, obj, state)
|
Internal encode function.
|
Internal encode function.
| 5,174 | 5,232 |
def _do_encode(self, obj, state):
"""Internal encode function."""
obj_classification = self._classify_for_encoding(obj)
if self.has_hook('encode_value'):
orig_obj = obj
try:
obj = self.call_hook('encode_value', obj)
except JSONSkipHook:
pass
if obj is not orig_obj:
prev_cls = obj_classification
obj_classification = self._classify_for_encoding(obj)
if obj_classification != prev_cls:
# Got a different type of object, re-encode again
self._do_encode(obj, state)
return
if hasattr(obj, 'json_equivalent'):
success = self.encode_equivalent(obj, state)
if success:
return
if obj_classification == 'null':
self.encode_null(state)
elif obj_classification == 'undefined':
if not self.options.is_forbid_undefined_values:
self.encode_undefined(state)
else:
raise JSONEncodeError('strict JSON does not permit "undefined" values')
elif obj_classification == 'bool':
self.encode_boolean(obj, state)
elif obj_classification == 'number':
try:
self.encode_number(obj, state)
except JSONEncodeError as err1:
# Bad number, probably a complex with non-zero imaginary part.
# Let the default encoders take a shot at encoding.
try:
self.try_encode_default(obj, state)
except Exception as err2:
# Default handlers couldn't deal with it, re-raise original exception.
raise err1
elif obj_classification == 'string':
self.encode_string(obj, state)
elif obj_classification == 'enum': # Python 3.4 enum.Enum
self.encode_enum(obj, state)
elif obj_classification == 'datetime': # Python datetime.datetime
self.encode_datetime(obj, state)
elif obj_classification == 'date': # Python datetime.date
self.encode_date(obj, state)
elif obj_classification == 'time': # Python datetime.time
self.encode_time(obj, state)
elif obj_classification == 'timedelta': # Python datetime.time
self.encode_timedelta(obj, state)
else:
# Anything left is probably composite, or an unconvertable type.
self.encode_composite(obj, state)
|
https://github.com/akfamily/akshare/blob/087025d8d6f799b30ca114013e82c1ad22dc9294/project25/akshare/utils/demjson.py#L5174-L5232
| 25 |
[
0,
1
] | 3.389831 |
[
2,
4,
5,
6,
7,
8,
9,
11,
12,
13,
14,
16,
17,
19,
20,
21,
22,
24,
25,
26,
27,
28,
30,
31,
32,
33,
34,
35,
36,
39,
40,
41,
43,
44,
45,
46,
47,
48,
49,
50,
51,
52,
53,
54,
55,
58
] | 77.966102 | false | 14.825334 | 59 | 20 | 22.033898 | 1 |
def _do_encode(self, obj, state):
obj_classification = self._classify_for_encoding(obj)
if self.has_hook('encode_value'):
orig_obj = obj
try:
obj = self.call_hook('encode_value', obj)
except JSONSkipHook:
pass
if obj is not orig_obj:
prev_cls = obj_classification
obj_classification = self._classify_for_encoding(obj)
if obj_classification != prev_cls:
# Got a different type of object, re-encode again
self._do_encode(obj, state)
return
if hasattr(obj, 'json_equivalent'):
success = self.encode_equivalent(obj, state)
if success:
return
if obj_classification == 'null':
self.encode_null(state)
elif obj_classification == 'undefined':
if not self.options.is_forbid_undefined_values:
self.encode_undefined(state)
else:
raise JSONEncodeError('strict JSON does not permit "undefined" values')
elif obj_classification == 'bool':
self.encode_boolean(obj, state)
elif obj_classification == 'number':
try:
self.encode_number(obj, state)
except JSONEncodeError as err1:
# Bad number, probably a complex with non-zero imaginary part.
# Let the default encoders take a shot at encoding.
try:
self.try_encode_default(obj, state)
except Exception as err2:
# Default handlers couldn't deal with it, re-raise original exception.
raise err1
elif obj_classification == 'string':
self.encode_string(obj, state)
elif obj_classification == 'enum': # Python 3.4 enum.Enum
self.encode_enum(obj, state)
elif obj_classification == 'datetime': # Python datetime.datetime
self.encode_datetime(obj, state)
elif obj_classification == 'date': # Python datetime.date
self.encode_date(obj, state)
elif obj_classification == 'time': # Python datetime.time
self.encode_time(obj, state)
elif obj_classification == 'timedelta': # Python datetime.time
self.encode_timedelta(obj, state)
else:
# Anything left is probably composite, or an unconvertable type.
self.encode_composite(obj, state)
| 18,556 |
|
akfamily/akshare
|
087025d8d6f799b30ca114013e82c1ad22dc9294
|
akshare/utils/demjson.py
|
JSON.encode_enum
|
(self, val, state)
|
Encode a Python Enum value into JSON.
|
Encode a Python Enum value into JSON.
| 5,234 | 5,242 |
def encode_enum(self, val, state):
"""Encode a Python Enum value into JSON."""
eas = self.options.encode_enum_as
if eas == 'qname':
self.encode_string(str(val), state)
elif eas == 'value':
self._do_encode(val.value, state)
else: # eas == 'name'
self.encode_string(val.name, state)
|
https://github.com/akfamily/akshare/blob/087025d8d6f799b30ca114013e82c1ad22dc9294/project25/akshare/utils/demjson.py#L5234-L5242
| 25 |
[
0,
1
] | 22.222222 |
[
2,
3,
4,
5,
6,
8
] | 66.666667 | false | 14.825334 | 9 | 3 | 33.333333 | 1 |
def encode_enum(self, val, state):
eas = self.options.encode_enum_as
if eas == 'qname':
self.encode_string(str(val), state)
elif eas == 'value':
self._do_encode(val.value, state)
else: # eas == 'name'
self.encode_string(val.name, state)
| 18,557 |
|
akfamily/akshare
|
087025d8d6f799b30ca114013e82c1ad22dc9294
|
akshare/utils/demjson.py
|
JSON.encode_date
|
(self, dt, state)
| 5,244 | 5,248 |
def encode_date(self, dt, state):
fmt = self.options.date_format
if not fmt or fmt == 'iso':
fmt = '%Y-%m-%d'
self.encode_string(dt.strftime(fmt), state)
|
https://github.com/akfamily/akshare/blob/087025d8d6f799b30ca114013e82c1ad22dc9294/project25/akshare/utils/demjson.py#L5244-L5248
| 25 |
[
0
] | 20 |
[
1,
2,
3,
4
] | 80 | false | 14.825334 | 5 | 3 | 20 | 0 |
def encode_date(self, dt, state):
fmt = self.options.date_format
if not fmt or fmt == 'iso':
fmt = '%Y-%m-%d'
self.encode_string(dt.strftime(fmt), state)
| 18,558 |
|||
akfamily/akshare
|
087025d8d6f799b30ca114013e82c1ad22dc9294
|
akshare/utils/demjson.py
|
JSON.encode_datetime
|
(self, dt, state)
| 5,250 | 5,261 |
def encode_datetime(self, dt, state):
fmt = self.options.datetime_format
is_iso = not fmt or fmt == 'iso'
if is_iso:
if dt.microsecond == 0:
fmt = '%Y-%m-%dT%H:%M:%S%z'
else:
fmt = '%Y-%m-%dT%H:%M:%S.%f%z'
s = dt.strftime(fmt)
if is_iso and s.endswith('-00:00') or s.endswith('+00:00'):
s = s[:-6] + 'Z' # Change UTC to use 'Z' notation
self.encode_string(s, state)
|
https://github.com/akfamily/akshare/blob/087025d8d6f799b30ca114013e82c1ad22dc9294/project25/akshare/utils/demjson.py#L5250-L5261
| 25 |
[
0
] | 8.333333 |
[
1,
2,
3,
4,
5,
7,
8,
9,
10,
11
] | 83.333333 | false | 14.825334 | 12 | 7 | 16.666667 | 0 |
def encode_datetime(self, dt, state):
fmt = self.options.datetime_format
is_iso = not fmt or fmt == 'iso'
if is_iso:
if dt.microsecond == 0:
fmt = '%Y-%m-%dT%H:%M:%S%z'
else:
fmt = '%Y-%m-%dT%H:%M:%S.%f%z'
s = dt.strftime(fmt)
if is_iso and s.endswith('-00:00') or s.endswith('+00:00'):
s = s[:-6] + 'Z' # Change UTC to use 'Z' notation
self.encode_string(s, state)
| 18,559 |
|||
akfamily/akshare
|
087025d8d6f799b30ca114013e82c1ad22dc9294
|
akshare/utils/demjson.py
|
JSON.encode_time
|
(self, dt, state)
| 5,263 | 5,274 |
def encode_time(self, dt, state):
fmt = self.options.datetime_format
is_iso = not fmt or fmt == 'iso'
if is_iso:
if dt.microsecond == 0:
fmt = 'T%H:%M:%S%z'
else:
fmt = 'T%H:%M:%S.%f%z'
s = dt.strftime(fmt)
if is_iso and s.endswith('-00:00') or s.endswith('+00:00'):
s = s[:-6] + 'Z' # Change UTC to use 'Z' notation
self.encode_string(s, state)
|
https://github.com/akfamily/akshare/blob/087025d8d6f799b30ca114013e82c1ad22dc9294/project25/akshare/utils/demjson.py#L5263-L5274
| 25 |
[
0
] | 8.333333 |
[
1,
2,
3,
4,
5,
7,
8,
9,
10,
11
] | 83.333333 | false | 14.825334 | 12 | 7 | 16.666667 | 0 |
def encode_time(self, dt, state):
fmt = self.options.datetime_format
is_iso = not fmt or fmt == 'iso'
if is_iso:
if dt.microsecond == 0:
fmt = 'T%H:%M:%S%z'
else:
fmt = 'T%H:%M:%S.%f%z'
s = dt.strftime(fmt)
if is_iso and s.endswith('-00:00') or s.endswith('+00:00'):
s = s[:-6] + 'Z' # Change UTC to use 'Z' notation
self.encode_string(s, state)
| 18,560 |
|||
akfamily/akshare
|
087025d8d6f799b30ca114013e82c1ad22dc9294
|
akshare/utils/demjson.py
|
JSON.encode_timedelta
|
(self, td, state)
| 5,276 | 5,284 |
def encode_timedelta(self, td, state):
fmt = self.options.timedelta_format
if not fmt or fmt == 'iso':
s = helpers.format_timedelta_iso(td)
elif fmt == 'hms':
s = str(td)
else:
raise ValueError("Unknown timedelta_format %r" % fmt)
self.encode_string(s, state)
|
https://github.com/akfamily/akshare/blob/087025d8d6f799b30ca114013e82c1ad22dc9294/project25/akshare/utils/demjson.py#L5276-L5284
| 25 |
[
0
] | 11.111111 |
[
1,
2,
3,
4,
5,
7,
8
] | 77.777778 | false | 14.825334 | 9 | 4 | 22.222222 | 0 |
def encode_timedelta(self, td, state):
fmt = self.options.timedelta_format
if not fmt or fmt == 'iso':
s = helpers.format_timedelta_iso(td)
elif fmt == 'hms':
s = str(td)
else:
raise ValueError("Unknown timedelta_format %r" % fmt)
self.encode_string(s, state)
| 18,561 |
|||
akfamily/akshare
|
087025d8d6f799b30ca114013e82c1ad22dc9294
|
akshare/utils/demjson.py
|
JSON.encode_composite
|
(self, obj, state, obj_classification=None)
|
Encodes just composite objects: dictionaries, lists, or sequences.
Basically handles any python type for which iter() can create
an iterator object.
This method is not intended to be called directly. Use the
encode() method instead.
|
Encodes just composite objects: dictionaries, lists, or sequences.
| 5,286 | 5,484 |
def encode_composite(self, obj, state, obj_classification=None):
"""Encodes just composite objects: dictionaries, lists, or sequences.
Basically handles any python type for which iter() can create
an iterator object.
This method is not intended to be called directly. Use the
encode() method instead.
"""
import sys
if not obj_classification:
obj_classification = self._classify_for_encoding(obj)
# Convert namedtuples to dictionaries
if obj_classification == 'namedtuple':
obj = obj._asdict()
obj_classification = 'dict'
# Convert 'unsigned byte' memory views into plain bytes
if obj_classification == 'memoryview' and obj.format == 'B':
obj = obj.tobytes()
obj_classification = 'bytes'
# Run hooks
hook_name = None
if obj_classification == 'dict':
hook_name = 'encode_dict'
elif obj_classification == 'sequence':
hook_name = 'encode_sequence'
elif obj_classification == 'bytes':
hook_name = 'encode_bytes'
if self.has_hook(hook_name):
try:
new_obj = self.call_hook(hook_name, obj)
except JSONSkipHook:
pass
else:
if new_obj is not obj:
obj = new_obj
prev_cls = obj_classification
obj_classification = self._classify_for_encoding(obj)
if obj_classification != prev_cls:
# Transformed to a different kind of object, call
# back to the general encode() method.
self._do_encode(obj, state)
return
# Else, fall through
# At his point we have decided to do with an object or an array
isdict = (obj_classification == 'dict')
# Get iterator
it = None
if isdict and hasattr(obj, 'iterkeys'):
try:
it = iter(obj.keys())
except AttributeError:
pass
else:
try:
it = iter(obj)
except TypeError:
pass
# Convert each member to JSON
if it is not None:
# Try to get length, but don't fail if we can't
try:
numitems = len(obj)
except TypeError:
numitems = 0
# Output the opening bracket or brace
compactly = self.options.encode_compactly
if not compactly:
indent0 = self.options.indentation_for_level(state.nest_level)
indent = self.options.indentation_for_level(state.nest_level + 1)
spaces_after_opener = ''
if isdict:
opener = '{'
closer = '}'
if compactly:
dictcolon = ':'
else:
dictcolon = ' : '
else:
opener = '['
closer = ']'
if not compactly:
# opener = opener + ' '
spaces_after_opener = self.options.spaces_to_next_indent_level(subtract=len(opener))
state.append(opener)
state.append(spaces_after_opener)
# Now iterate through all the items and collect their representations
parts = [] # Collects each of the members
part_keys = [] # For dictionary key sorting, tuples (key,index)
try: # while not StopIteration
part_idx = 0
while True:
obj2 = next(it)
part_idx += 1 # Note, will start counting at 1
if obj2 is obj:
raise JSONEncodeError('trying to encode an infinite sequence', obj)
if isdict:
obj3 = obj[obj2]
# Dictionary key is in obj2 and value in obj3.
# Let any hooks transform the key.
if self.has_hook('encode_value'):
try:
newobj = self.call_hook('encode_value', obj2)
except JSONSkipHook:
pass
else:
obj2 = newobj
if self.has_hook('encode_dict_key'):
try:
newkey = self.call_hook('encode_dict_key', obj2)
except JSONSkipHook:
pass
else:
obj2 = newkey
# Check JSON restrictions on key types
if not helpers.isstringtype(obj2):
if helpers.isnumbertype(obj2):
if not self.options.is_allow_nonstring_keys:
raise JSONEncodeError(
'object properties (dictionary keys) must be strings in strict JSON', obj2)
else:
raise JSONEncodeError(
'object properties (dictionary keys) can only be strings or numbers in ECMAScript',
obj2)
part_keys.append((obj2, part_idx - 1))
# Encode this item in the sequence and put into item_chunks
substate = state.make_substate()
self._do_encode(obj2, substate)
if isdict:
substate.append(dictcolon)
substate2 = substate.make_substate()
self._do_encode(obj3, substate2)
substate.join_substate(substate2)
parts.append(substate)
# Next item iteration
except StopIteration:
pass
# Sort dictionary keys
if isdict:
srt = self.options.sort_keys
if srt == SORT_PRESERVE:
if _OrderedDict and isinstance(obj, _OrderedDict):
srt = SORT_NONE # Will keep order
else:
srt = SORT_SMART
if not srt or srt in (SORT_NONE, SORT_PRESERVE):
srt = None
elif callable(srt):
part_keys.sort(key=(lambda t: (srt(t[0]), t[0])))
elif srt == SORT_SMART:
part_keys.sort(key=(lambda t: (smart_sort_transform(t[0]), t[0])))
elif srt == SORT_ALPHA_CI:
part_keys.sort(key=(lambda t: (str(t[0]).upper(), t[0])))
elif srt or srt == SORT_ALPHA:
part_keys.sort(key=(lambda t: str(t[0])))
# Now make parts match the new sort order
if srt is not None:
parts = [parts[pk[1]] for pk in part_keys]
if compactly:
sep = ','
elif len(parts) <= self.options.max_items_per_line:
sep = ', '
else:
# state.append(spaces_after_opener)
state.append('\n' + indent)
sep = ',\n' + indent
for pnum, substate in enumerate(parts):
if pnum > 0:
state.append(sep)
state.join_substate(substate)
if not compactly:
if numitems > self.options.max_items_per_line:
state.append('\n' + indent0)
else:
state.append(' ')
state.append(closer) # final '}' or ']'
else: # Can't create an iterator for the object
self.try_encode_default(obj, state)
|
https://github.com/akfamily/akshare/blob/087025d8d6f799b30ca114013e82c1ad22dc9294/project25/akshare/utils/demjson.py#L5286-L5484
| 25 |
[
0,
1,
2,
3,
4,
5,
6,
7,
8,
9
] | 5.025126 |
[
10,
11,
12,
15,
16,
17,
20,
21,
22,
25,
26,
27,
28,
29,
30,
31,
33,
34,
35,
36,
37,
39,
40,
41,
42,
43,
46,
47,
51,
54,
55,
56,
57,
58,
59,
61,
62,
63,
64,
67,
69,
70,
71,
72,
75,
76,
77,
78,
80,
81,
82,
83,
84,
85,
87,
89,
90,
91,
93,
95,
96,
99,
100,
102,
103,
104,
105,
106,
107,
108,
109,
110,
114,
115,
116,
117,
118,
120,
121,
122,
123,
124,
125,
127,
130,
131,
132,
133,
136,
139,
142,
143,
144,
145,
146,
147,
148,
149,
151,
152,
155,
156,
157,
158,
159,
161,
163,
164,
165,
166,
167,
168,
169,
170,
171,
172,
174,
175,
177,
178,
179,
180,
183,
184,
186,
187,
188,
189,
191,
192,
193,
195,
196,
198
] | 67.336683 | false | 14.825334 | 199 | 53 | 32.663317 | 7 |
def encode_composite(self, obj, state, obj_classification=None):
import sys
if not obj_classification:
obj_classification = self._classify_for_encoding(obj)
# Convert namedtuples to dictionaries
if obj_classification == 'namedtuple':
obj = obj._asdict()
obj_classification = 'dict'
# Convert 'unsigned byte' memory views into plain bytes
if obj_classification == 'memoryview' and obj.format == 'B':
obj = obj.tobytes()
obj_classification = 'bytes'
# Run hooks
hook_name = None
if obj_classification == 'dict':
hook_name = 'encode_dict'
elif obj_classification == 'sequence':
hook_name = 'encode_sequence'
elif obj_classification == 'bytes':
hook_name = 'encode_bytes'
if self.has_hook(hook_name):
try:
new_obj = self.call_hook(hook_name, obj)
except JSONSkipHook:
pass
else:
if new_obj is not obj:
obj = new_obj
prev_cls = obj_classification
obj_classification = self._classify_for_encoding(obj)
if obj_classification != prev_cls:
# Transformed to a different kind of object, call
# back to the general encode() method.
self._do_encode(obj, state)
return
# Else, fall through
# At his point we have decided to do with an object or an array
isdict = (obj_classification == 'dict')
# Get iterator
it = None
if isdict and hasattr(obj, 'iterkeys'):
try:
it = iter(obj.keys())
except AttributeError:
pass
else:
try:
it = iter(obj)
except TypeError:
pass
# Convert each member to JSON
if it is not None:
# Try to get length, but don't fail if we can't
try:
numitems = len(obj)
except TypeError:
numitems = 0
# Output the opening bracket or brace
compactly = self.options.encode_compactly
if not compactly:
indent0 = self.options.indentation_for_level(state.nest_level)
indent = self.options.indentation_for_level(state.nest_level + 1)
spaces_after_opener = ''
if isdict:
opener = '{'
closer = '}'
if compactly:
dictcolon = ':'
else:
dictcolon = ' : '
else:
opener = '['
closer = ']'
if not compactly:
# opener = opener + ' '
spaces_after_opener = self.options.spaces_to_next_indent_level(subtract=len(opener))
state.append(opener)
state.append(spaces_after_opener)
# Now iterate through all the items and collect their representations
parts = [] # Collects each of the members
part_keys = [] # For dictionary key sorting, tuples (key,index)
try: # while not StopIteration
part_idx = 0
while True:
obj2 = next(it)
part_idx += 1 # Note, will start counting at 1
if obj2 is obj:
raise JSONEncodeError('trying to encode an infinite sequence', obj)
if isdict:
obj3 = obj[obj2]
# Dictionary key is in obj2 and value in obj3.
# Let any hooks transform the key.
if self.has_hook('encode_value'):
try:
newobj = self.call_hook('encode_value', obj2)
except JSONSkipHook:
pass
else:
obj2 = newobj
if self.has_hook('encode_dict_key'):
try:
newkey = self.call_hook('encode_dict_key', obj2)
except JSONSkipHook:
pass
else:
obj2 = newkey
# Check JSON restrictions on key types
if not helpers.isstringtype(obj2):
if helpers.isnumbertype(obj2):
if not self.options.is_allow_nonstring_keys:
raise JSONEncodeError(
'object properties (dictionary keys) must be strings in strict JSON', obj2)
else:
raise JSONEncodeError(
'object properties (dictionary keys) can only be strings or numbers in ECMAScript',
obj2)
part_keys.append((obj2, part_idx - 1))
# Encode this item in the sequence and put into item_chunks
substate = state.make_substate()
self._do_encode(obj2, substate)
if isdict:
substate.append(dictcolon)
substate2 = substate.make_substate()
self._do_encode(obj3, substate2)
substate.join_substate(substate2)
parts.append(substate)
# Next item iteration
except StopIteration:
pass
# Sort dictionary keys
if isdict:
srt = self.options.sort_keys
if srt == SORT_PRESERVE:
if _OrderedDict and isinstance(obj, _OrderedDict):
srt = SORT_NONE # Will keep order
else:
srt = SORT_SMART
if not srt or srt in (SORT_NONE, SORT_PRESERVE):
srt = None
elif callable(srt):
part_keys.sort(key=(lambda t: (srt(t[0]), t[0])))
elif srt == SORT_SMART:
part_keys.sort(key=(lambda t: (smart_sort_transform(t[0]), t[0])))
elif srt == SORT_ALPHA_CI:
part_keys.sort(key=(lambda t: (str(t[0]).upper(), t[0])))
elif srt or srt == SORT_ALPHA:
part_keys.sort(key=(lambda t: str(t[0])))
# Now make parts match the new sort order
if srt is not None:
parts = [parts[pk[1]] for pk in part_keys]
if compactly:
sep = ','
elif len(parts) <= self.options.max_items_per_line:
sep = ', '
else:
# state.append(spaces_after_opener)
state.append('\n' + indent)
sep = ',\n' + indent
for pnum, substate in enumerate(parts):
if pnum > 0:
state.append(sep)
state.join_substate(substate)
if not compactly:
if numitems > self.options.max_items_per_line:
state.append('\n' + indent0)
else:
state.append(' ')
state.append(closer) # final '}' or ']'
else: # Can't create an iterator for the object
self.try_encode_default(obj, state)
| 18,562 |
|
akfamily/akshare
|
087025d8d6f799b30ca114013e82c1ad22dc9294
|
akshare/utils/demjson.py
|
JSON.encode_equivalent
|
(self, obj, state)
|
This method is used to encode user-defined class objects.
The object being encoded should have a json_equivalent()
method defined which returns another equivalent object which
is easily JSON-encoded. If the object in question has no
json_equivalent() method available then None is returned
instead of a string so that the encoding will attempt the next
strategy.
If a caller wishes to disable the calling of json_equivalent()
methods, then subclass this class and override this method
to just return None.
|
This method is used to encode user-defined class objects.
| 5,486 | 5,510 |
def encode_equivalent(self, obj, state):
"""This method is used to encode user-defined class objects.
The object being encoded should have a json_equivalent()
method defined which returns another equivalent object which
is easily JSON-encoded. If the object in question has no
json_equivalent() method available then None is returned
instead of a string so that the encoding will attempt the next
strategy.
If a caller wishes to disable the calling of json_equivalent()
methods, then subclass this class and override this method
to just return None.
"""
if hasattr(obj, 'json_equivalent') \
and callable(getattr(obj, 'json_equivalent')):
obj2 = obj.json_equivalent()
if obj2 is obj:
# Try to prevent careless infinite recursion
raise JSONEncodeError('object has a json_equivalent() method that returns itself', obj)
self._do_encode(obj2, state)
return True
else:
return False
|
https://github.com/akfamily/akshare/blob/087025d8d6f799b30ca114013e82c1ad22dc9294/project25/akshare/utils/demjson.py#L5486-L5510
| 25 |
[
0,
1,
2,
3,
4,
5,
6,
7,
8,
9,
10,
11,
12,
13,
14
] | 60 |
[
15,
17,
18,
20,
21,
22,
24
] | 28 | false | 14.825334 | 25 | 4 | 72 | 12 |
def encode_equivalent(self, obj, state):
if hasattr(obj, 'json_equivalent') \
and callable(getattr(obj, 'json_equivalent')):
obj2 = obj.json_equivalent()
if obj2 is obj:
# Try to prevent careless infinite recursion
raise JSONEncodeError('object has a json_equivalent() method that returns itself', obj)
self._do_encode(obj2, state)
return True
else:
return False
| 18,563 |
|
akfamily/akshare
|
087025d8d6f799b30ca114013e82c1ad22dc9294
|
akshare/utils/demjson.py
|
JSON.try_encode_default
|
(self, obj, state)
| 5,512 | 5,525 |
def try_encode_default(self, obj, state):
orig_obj = obj
if self.has_hook('encode_default'):
try:
obj = self.call_hook('encode_default', obj)
except JSONSkipHook:
pass
else:
if obj is not orig_obj:
# Hook made a transformation, re-encode it
return self._do_encode(obj, state)
# End of the road.
raise JSONEncodeError('can not encode object into a JSON representation', obj)
|
https://github.com/akfamily/akshare/blob/087025d8d6f799b30ca114013e82c1ad22dc9294/project25/akshare/utils/demjson.py#L5512-L5525
| 25 |
[
0
] | 7.142857 |
[
1,
2,
3,
4,
5,
6,
8,
10,
13
] | 64.285714 | false | 14.825334 | 14 | 4 | 35.714286 | 0 |
def try_encode_default(self, obj, state):
orig_obj = obj
if self.has_hook('encode_default'):
try:
obj = self.call_hook('encode_default', obj)
except JSONSkipHook:
pass
else:
if obj is not orig_obj:
# Hook made a transformation, re-encode it
return self._do_encode(obj, state)
# End of the road.
raise JSONEncodeError('can not encode object into a JSON representation', obj)
| 18,564 |
|||
akfamily/akshare
|
087025d8d6f799b30ca114013e82c1ad22dc9294
|
akshare/utils/demjson.py
|
jsonlint.__init__
|
(self, program_name='jsonlint', stdin=None, stdout=None, stderr=None)
|
Create an instance of a "jsonlint" program.
You can optionally pass options to define the program's environment:
* program_name - the name of the program, usually sys.argv[0]
* stdin - the file object to use for input, default sys.stdin
* stdout - the file object to use for output, default sys.stdout
* stderr - the file object to use for error output, default sys.stderr
After creating an instance, you typically call the main() method.
|
Create an instance of a "jsonlint" program.
| 5,926 | 5,955 |
def __init__(self, program_name='jsonlint', stdin=None, stdout=None, stderr=None):
"""Create an instance of a "jsonlint" program.
You can optionally pass options to define the program's environment:
* program_name - the name of the program, usually sys.argv[0]
* stdin - the file object to use for input, default sys.stdin
* stdout - the file object to use for output, default sys.stdout
* stderr - the file object to use for error output, default sys.stderr
After creating an instance, you typically call the main() method.
"""
import os, sys
self.program_path = program_name
self.program_name = os.path.basename(program_name)
if stdin:
self.stdin = stdin
else:
self.stdin = sys.stdin
if stdout:
self.stdout = stdout
else:
self.stdout = sys.stdout
if stderr:
self.stderr = stderr
else:
self.stderr = sys.stderr
|
https://github.com/akfamily/akshare/blob/087025d8d6f799b30ca114013e82c1ad22dc9294/project25/akshare/utils/demjson.py#L5926-L5955
| 25 |
[
0,
1,
2,
3,
4,
5,
6,
7,
8,
9,
10,
11,
12
] | 43.333333 |
[
13,
14,
15,
16,
17,
19,
21,
22,
24,
26,
27,
29
] | 40 | false | 14.825334 | 30 | 4 | 60 | 10 |
def __init__(self, program_name='jsonlint', stdin=None, stdout=None, stderr=None):
import os, sys
self.program_path = program_name
self.program_name = os.path.basename(program_name)
if stdin:
self.stdin = stdin
else:
self.stdin = sys.stdin
if stdout:
self.stdout = stdout
else:
self.stdout = sys.stdout
if stderr:
self.stderr = stderr
else:
self.stderr = sys.stderr
| 18,565 |
|
akfamily/akshare
|
087025d8d6f799b30ca114013e82c1ad22dc9294
|
akshare/utils/demjson.py
|
jsonlint.usage
|
(self)
|
return self._jsonlint_usage % {'program_name': self.program_name,
'homepage': __homepage__,
'sort_options_help': sorthelp}
|
A multi-line string containing the program usage instructions.
|
A multi-line string containing the program usage instructions.
| 5,958 | 5,966 |
def usage(self):
"""A multi-line string containing the program usage instructions.
"""
sorthelp = '\n'.join([
" %12s - %s" % (sm, sd)
for sm, sd in sorted(sorting_methods.items()) if sm != SORT_NONE])
return self._jsonlint_usage % {'program_name': self.program_name,
'homepage': __homepage__,
'sort_options_help': sorthelp}
|
https://github.com/akfamily/akshare/blob/087025d8d6f799b30ca114013e82c1ad22dc9294/project25/akshare/utils/demjson.py#L5958-L5966
| 25 |
[
0,
1,
2
] | 33.333333 |
[
3,
6
] | 22.222222 | false | 14.825334 | 9 | 2 | 77.777778 | 1 |
def usage(self):
sorthelp = '\n'.join([
" %12s - %s" % (sm, sd)
for sm, sd in sorted(sorting_methods.items()) if sm != SORT_NONE])
return self._jsonlint_usage % {'program_name': self.program_name,
'homepage': __homepage__,
'sort_options_help': sorthelp}
| 18,566 |
akfamily/akshare
|
087025d8d6f799b30ca114013e82c1ad22dc9294
|
akshare/utils/demjson.py
|
jsonlint._lintcheck_data
|
(self,
jsondata,
verbose_fp=None,
reformat=False,
show_stats=False,
input_encoding=None, output_encoding=None, escape_unicode=True,
pfx='',
jsonopts=None)
|
return (success, reformatted)
| 5,968 | 6,019 |
def _lintcheck_data(self,
jsondata,
verbose_fp=None,
reformat=False,
show_stats=False,
input_encoding=None, output_encoding=None, escape_unicode=True,
pfx='',
jsonopts=None):
global decode, encode
success = self.SUCCESS_FAIL
reformatted = None
if show_stats:
stats_fp = verbose_fp
else:
stats_fp = None
try:
results = decode(jsondata, encoding=input_encoding,
return_errors=True,
return_stats=True,
write_errors=verbose_fp,
write_stats=stats_fp,
filename_for_errors=pfx,
json_options=jsonopts)
except JSONError as err:
success = self.SUCCESS_FAIL
if verbose_fp:
verbose_fp.write('%s%s\n' % (pfx, err.pretty_description()))
except Exception as err:
success = self.SUCCESS_FAIL
if verbose_fp:
verbose_fp.write('%s%s\n' % (pfx, str(err)))
else:
errors = [err for err in results.errors if err.severity in ('fatal', 'error')]
warnings = [err for err in results.errors if err.severity in ('warning',)]
if errors:
success = self.SUCCESS_FAIL
elif warnings:
success = self.SUCCESS_WARNING
else:
success = self.SUCCESS_OK
if reformat:
encopts = jsonopts.copy()
encopts.strictness = STRICTNESS_TOLERANT
if reformat == 'compactly':
encopts.encode_compactly = True
else:
encopts.encode_compactly = False
reformatted = encode(results.object, encoding=output_encoding, json_options=encopts)
return (success, reformatted)
|
https://github.com/akfamily/akshare/blob/087025d8d6f799b30ca114013e82c1ad22dc9294/project25/akshare/utils/demjson.py#L5968-L6019
| 25 |
[
0
] | 1.923077 |
[
9,
10,
11,
12,
14,
15,
16,
23,
24,
25,
26,
27,
28,
29,
30,
32,
33,
34,
35,
36,
37,
39,
41,
42,
43,
44,
45,
47,
49,
51
] | 57.692308 | false | 14.825334 | 52 | 12 | 42.307692 | 0 |
def _lintcheck_data(self,
jsondata,
verbose_fp=None,
reformat=False,
show_stats=False,
input_encoding=None, output_encoding=None, escape_unicode=True,
pfx='',
jsonopts=None):
global decode, encode
success = self.SUCCESS_FAIL
reformatted = None
if show_stats:
stats_fp = verbose_fp
else:
stats_fp = None
try:
results = decode(jsondata, encoding=input_encoding,
return_errors=True,
return_stats=True,
write_errors=verbose_fp,
write_stats=stats_fp,
filename_for_errors=pfx,
json_options=jsonopts)
except JSONError as err:
success = self.SUCCESS_FAIL
if verbose_fp:
verbose_fp.write('%s%s\n' % (pfx, err.pretty_description()))
except Exception as err:
success = self.SUCCESS_FAIL
if verbose_fp:
verbose_fp.write('%s%s\n' % (pfx, str(err)))
else:
errors = [err for err in results.errors if err.severity in ('fatal', 'error')]
warnings = [err for err in results.errors if err.severity in ('warning',)]
if errors:
success = self.SUCCESS_FAIL
elif warnings:
success = self.SUCCESS_WARNING
else:
success = self.SUCCESS_OK
if reformat:
encopts = jsonopts.copy()
encopts.strictness = STRICTNESS_TOLERANT
if reformat == 'compactly':
encopts.encode_compactly = True
else:
encopts.encode_compactly = False
reformatted = encode(results.object, encoding=output_encoding, json_options=encopts)
return (success, reformatted)
| 18,567 |
||
akfamily/akshare
|
087025d8d6f799b30ca114013e82c1ad22dc9294
|
akshare/utils/demjson.py
|
jsonlint._lintcheck
|
(self, filename, output_filename,
verbose=False,
reformat=False,
show_stats=False,
input_encoding=None, output_encoding=None, escape_unicode=True,
jsonopts=None)
|
return success
| 6,021 | 6,076 |
def _lintcheck(self, filename, output_filename,
verbose=False,
reformat=False,
show_stats=False,
input_encoding=None, output_encoding=None, escape_unicode=True,
jsonopts=None):
import sys
verbose_fp = None
if not filename or filename == "-":
pfx = '<stdin>: '
jsondata = self.stdin.read()
if verbose:
verbose_fp = self.stderr
else:
pfx = '%s: ' % filename
try:
fp = open(filename, 'rb')
jsondata = fp.read()
fp.close()
except IOError as err:
self.stderr.write('%s: %s\n' % (pfx, str(err)))
return self.SUCCESS_FAIL
if verbose:
verbose_fp = self.stdout
success, reformatted = self._lintcheck_data(
jsondata,
verbose_fp=verbose_fp,
reformat=reformat,
show_stats=show_stats,
input_encoding=input_encoding, output_encoding=output_encoding,
pfx=pfx,
jsonopts=jsonopts)
if success != self.SUCCESS_FAIL and reformat:
if output_filename:
try:
fp = open(output_filename, 'wb')
fp.write(reformatted)
except IOError as err:
self.stderr.write('%s: %s\n' % (pfx, str(err)))
success = False
else:
if hasattr(sys.stdout, 'buffer'): # To write binary data rather than strings
self.stdout.buffer.write(reformatted)
else:
self.stdout.write(reformatted)
elif success == self.SUCCESS_OK and verbose_fp:
verbose_fp.write('%sok\n' % pfx)
elif success == self.SUCCESS_WARNING and verbose_fp:
verbose_fp.write('%sok, with warnings\n' % pfx)
elif verbose_fp:
verbose_fp.write("%shas errors\n" % pfx)
return success
|
https://github.com/akfamily/akshare/blob/087025d8d6f799b30ca114013e82c1ad22dc9294/project25/akshare/utils/demjson.py#L6021-L6076
| 25 |
[
0
] | 1.785714 |
[
6,
7,
9,
10,
11,
12,
13,
15,
16,
17,
18,
19,
20,
21,
22,
23,
24,
26,
35,
36,
37,
38,
39,
40,
41,
42,
44,
45,
47,
48,
49,
50,
51,
52,
53,
55
] | 64.285714 | false | 14.825334 | 56 | 16 | 35.714286 | 0 |
def _lintcheck(self, filename, output_filename,
verbose=False,
reformat=False,
show_stats=False,
input_encoding=None, output_encoding=None, escape_unicode=True,
jsonopts=None):
import sys
verbose_fp = None
if not filename or filename == "-":
pfx = '<stdin>: '
jsondata = self.stdin.read()
if verbose:
verbose_fp = self.stderr
else:
pfx = '%s: ' % filename
try:
fp = open(filename, 'rb')
jsondata = fp.read()
fp.close()
except IOError as err:
self.stderr.write('%s: %s\n' % (pfx, str(err)))
return self.SUCCESS_FAIL
if verbose:
verbose_fp = self.stdout
success, reformatted = self._lintcheck_data(
jsondata,
verbose_fp=verbose_fp,
reformat=reformat,
show_stats=show_stats,
input_encoding=input_encoding, output_encoding=output_encoding,
pfx=pfx,
jsonopts=jsonopts)
if success != self.SUCCESS_FAIL and reformat:
if output_filename:
try:
fp = open(output_filename, 'wb')
fp.write(reformatted)
except IOError as err:
self.stderr.write('%s: %s\n' % (pfx, str(err)))
success = False
else:
if hasattr(sys.stdout, 'buffer'): # To write binary data rather than strings
self.stdout.buffer.write(reformatted)
else:
self.stdout.write(reformatted)
elif success == self.SUCCESS_OK and verbose_fp:
verbose_fp.write('%sok\n' % pfx)
elif success == self.SUCCESS_WARNING and verbose_fp:
verbose_fp.write('%sok, with warnings\n' % pfx)
elif verbose_fp:
verbose_fp.write("%shas errors\n" % pfx)
return success
| 18,568 |
||
akfamily/akshare
|
087025d8d6f799b30ca114013e82c1ad22dc9294
|
akshare/utils/demjson.py
|
jsonlint.main
|
(self, argv)
|
return 0
|
The main routine for program "jsonlint".
Should be called with sys.argv[1:] as its sole argument.
Note sys.argv[0] which normally contains the program name
should not be passed to main(); instead this class itself
is initialized with sys.argv[0].
Use "--help" for usage syntax, or consult the 'usage' member.
|
The main routine for program "jsonlint".
| 6,078 | 6,319 |
def main(self, argv):
"""The main routine for program "jsonlint".
Should be called with sys.argv[1:] as its sole argument.
Note sys.argv[0] which normally contains the program name
should not be passed to main(); instead this class itself
is initialized with sys.argv[0].
Use "--help" for usage syntax, or consult the 'usage' member.
"""
import sys, os, getopt, unicodedata
recursion_limit = None
success = True
verbose = 'auto' # one of 'auto', True, or False
reformat = False
show_stats = False
output_filename = None
input_encoding = None
output_encoding = 'utf-8'
kwoptions = { # Will be used to initialize json_options
"sort_keys": SORT_SMART,
"strict": STRICTNESS_WARN,
"keep_format": True,
"decimal_context": 100,
}
try:
opts, args = getopt.getopt(argv,
'vqfFe:o:sSW',
['verbose', 'quiet',
'format', 'format-compactly',
'stats',
'output',
'strict', 'nonstrict', 'warn',
'html-safe', 'xml-safe',
'encoding=',
'input-encoding=', 'output-encoding=',
'sort=',
'recursion-limit=',
'leading-zero-radix=',
'keep-format',
'no-keep-format',
'indent=',
'indent-amount=',
'indent-limit=',
'indent-tab-width=',
'max-items-per-line=',
'allow=', 'warn=', 'forbid=', 'deny=',
'help', 'help-behaviors',
'version', 'copyright'])
except getopt.GetoptError as err:
self.stderr.write("Error: %s. Use \"%s --help\" for usage information.\n" \
% (err.msg, self.program_name))
return 1
# Set verbose before looking at any other options
for opt, val in opts:
if opt in ('-v', '--verbose'):
verbose = True
# Process all options
for opt, val in opts:
if opt in ('-h', '--help'):
self.stdout.write(self.usage)
return 0
elif opt == '--help-behaviors':
self.stdout.write("""
BEHAVIOR OPTIONS:
These set of options let you control which checks are to be performed.
They may be turned on or off by listing them as arguments to one of
the options --allow, --warn, or --forbid ; for example:
%(program_name)s --allow comments,hex-numbers --forbid duplicate-keys
""" % {"program_name": self.program_name})
self.stdout.write("The default shown is for %s mode\n\n" % kwoptions['strict'])
self.stdout.write('%-7s %-25s %s\n' % ("Default", "Behavior_name", "Description"))
self.stdout.write('-' * 7 + ' ' + '-' * 25 + ' ' + '-' * 50 + '\n')
j = json_options(**kwoptions)
for behavior in sorted(j.all_behaviors):
v = j.get_behavior(behavior)
desc = j.describe_behavior(behavior)
self.stdout.write('%-7s %-25s %s\n' % (v.lower(), behavior.replace('_', '-'), desc))
return 0
elif opt == '--version':
self.stdout.write('%s (%s) version %s (%s)\n' \
% (self.program_name, __name__, __version__, __date__))
if verbose == True:
self.stdout.write('demjson from %r\n' % (__file__,))
if verbose == True:
self.stdout.write('Python version: %s\n' % (sys.version.replace('\n', ' '),))
self.stdout.write('This python implementation supports:\n')
self.stdout.write(' * Max unicode: U+%X\n' % (sys.maxunicode,))
self.stdout.write(' * Unicode version: %s\n' % (unicodedata.unidata_version,))
self.stdout.write(' * Floating-point significant digits: %d\n' % (float_sigdigits,))
self.stdout.write(' * Floating-point max 10^exponent: %d\n' % (float_maxexp,))
if str(0.0) == str(-0.0):
szero = 'No'
else:
szero = 'Yes'
self.stdout.write(' * Floating-point has signed-zeros: %s\n' % (szero,))
if decimal:
has_dec = 'Yes'
else:
has_dec = 'No'
self.stdout.write(' * Decimal (bigfloat) support: %s\n' % (has_dec,))
return 0
elif opt == '--copyright':
self.stdout.write("%s is distributed as part of the \"demjson\" python package.\n" \
% (self.program_name,))
self.stdout.write("See %s\n\n\n" % (__homepage__,))
self.stdout.write(__credits__)
return 0
elif opt in ('-v', '--verbose'):
verbose = True
elif opt in ('-q', '--quiet'):
verbose = False
elif opt in ('-s', '--strict'):
kwoptions['strict'] = STRICTNESS_STRICT
kwoptions['keep_format'] = False
elif opt in ('-S', '--nonstrict'):
kwoptions['strict'] = STRICTNESS_TOLERANT
elif opt in ('-W', '--tolerant'):
kwoptions['strict'] = STRICTNESS_WARN
elif opt in ('-f', '--format'):
reformat = True
kwoptions['encode_compactly'] = False
elif opt in ('-F', '--format-compactly'):
kwoptions['encode_compactly'] = True
reformat = 'compactly'
elif opt in ('--stats',):
show_stats = True
elif opt in ('-o', '--output'):
output_filename = val
elif opt in ('-e', '--encoding'):
input_encoding = val
output_encoding = val
escape_unicode = False
elif opt in ('--output-encoding'):
output_encoding = val
escape_unicode = False
elif opt in ('--input-encoding'):
input_encoding = val
elif opt in ('--html-safe', '--xml-safe'):
kwoptions['html_safe'] = True
elif opt in ('--allow', '--warn', '--forbid'):
action = opt[2:]
if action in kwoptions:
kwoptions[action] += "," + val
else:
kwoptions[action] = val
elif opt in ('--keep-format',):
kwoptions['keep_format'] = True
elif opt in ('--no-keep-format',):
kwoptions['keep_format'] = False
elif opt == '--leading-zero-radix':
kwoptions['leading_zero_radix'] = val
elif opt in ('--indent', '--indent-amount'):
if val in ('tab', 'tabs'):
kwoptions['indent_amount'] = 8
kwoptions['indent_tab_width'] = 8
else:
try:
kwoptions['indent_amount'] = int(val)
except ValueError:
self.stderr.write("Indentation amount must be a number\n")
return 1
elif opt == 'indent-tab-width':
try:
kwoptions['indent_tab_width'] = int(val)
except ValueError:
self.stderr.write("Indentation tab width must be a number\n")
return 1
elif opt == '--max-items-per-line':
try:
kwoptions['max_items_per_line'] = int(val)
except ValueError:
self.stderr.write("Max items per line must be a number\n")
return 1
elif opt == '--sort':
val = val.lower()
if val == 'alpha':
kwoptions['sort_keys'] = SORT_ALPHA
elif val == 'alpha_ci':
kwoptions['sort_keys'] = SORT_ALPHA_CI
elif val == 'preserve':
kwoptions['sort_keys'] = SORT_PRESERVE
else:
kwoptions['sort_keys'] = SORT_SMART
elif opt == '--recursion-limit':
try:
recursion_limit = int(val)
except ValueError:
self.stderr.write("Recursion limit must be a number: %r\n" % val)
return 1
else:
max_limit = 100000
old_limit = sys.getrecursionlimit()
if recursion_limit > max_limit:
self.stderr.write(
"Recursion limit must be a number between %d and %d\n" % (old_limit, max_limit))
return 1
elif recursion_limit > old_limit:
sys.setrecursionlimit(recursion_limit)
else:
self.stderr.write('Unknown option %r\n' % opt)
return 1
# Make the JSON options
kwoptions['decimal_context'] = 100
jsonopts = json_options(**kwoptions)
# Now decode each file...
if not args:
args = [None]
for fn in args:
try:
rc = self._lintcheck(fn, output_filename=output_filename,
verbose=verbose,
reformat=reformat,
show_stats=show_stats,
input_encoding=input_encoding,
output_encoding=output_encoding,
jsonopts=jsonopts)
if rc != self.SUCCESS_OK:
# Warnings or errors should result in failure. If
# checking multiple files, do not change a
# previous error back to ok.
success = False
except KeyboardInterrupt as err:
sys.stderr.write("\njsonlint interrupted!\n")
sys.exit(1)
if not success:
return 1
return 0
|
https://github.com/akfamily/akshare/blob/087025d8d6f799b30ca114013e82c1ad22dc9294/project25/akshare/utils/demjson.py#L6078-L6319
| 25 |
[
0,
1,
2,
3,
4,
5,
6,
7,
8,
9,
10,
11
] | 4.958678 |
[
12,
14,
15,
16,
17,
18,
19,
20,
21,
23,
30,
31,
54,
55,
57,
60,
61,
62,
65,
66,
67,
68,
69,
70,
80,
81,
82,
83,
84,
85,
86,
87,
88,
89,
90,
92,
93,
94,
95,
96,
97,
98,
99,
100,
101,
102,
104,
105,
106,
107,
109,
110,
111,
112,
113,
115,
116,
117,
118,
119,
120,
121,
122,
123,
124,
125,
126,
127,
128,
129,
130,
131,
132,
133,
134,
135,
136,
137,
138,
139,
140,
141,
142,
143,
144,
145,
146,
147,
148,
149,
150,
151,
152,
153,
155,
156,
157,
158,
159,
160,
161,
162,
163,
164,
165,
167,
168,
169,
170,
171,
172,
173,
174,
175,
176,
177,
178,
179,
180,
181,
182,
183,
184,
185,
186,
187,
188,
189,
190,
191,
193,
194,
195,
196,
197,
198,
199,
201,
202,
203,
204,
206,
207,
208,
210,
211,
214,
215,
218,
219,
221,
222,
223,
230,
234,
235,
236,
237,
239,
240,
241
] | 66.528926 | false | 14.825334 | 242 | 52 | 33.471074 | 9 |
def main(self, argv):
import sys, os, getopt, unicodedata
recursion_limit = None
success = True
verbose = 'auto' # one of 'auto', True, or False
reformat = False
show_stats = False
output_filename = None
input_encoding = None
output_encoding = 'utf-8'
kwoptions = { # Will be used to initialize json_options
"sort_keys": SORT_SMART,
"strict": STRICTNESS_WARN,
"keep_format": True,
"decimal_context": 100,
}
try:
opts, args = getopt.getopt(argv,
'vqfFe:o:sSW',
['verbose', 'quiet',
'format', 'format-compactly',
'stats',
'output',
'strict', 'nonstrict', 'warn',
'html-safe', 'xml-safe',
'encoding=',
'input-encoding=', 'output-encoding=',
'sort=',
'recursion-limit=',
'leading-zero-radix=',
'keep-format',
'no-keep-format',
'indent=',
'indent-amount=',
'indent-limit=',
'indent-tab-width=',
'max-items-per-line=',
'allow=', 'warn=', 'forbid=', 'deny=',
'help', 'help-behaviors',
'version', 'copyright'])
except getopt.GetoptError as err:
self.stderr.write("Error: %s. Use \"%s --help\" for usage information.\n" \
% (err.msg, self.program_name))
return 1
# Set verbose before looking at any other options
for opt, val in opts:
if opt in ('-v', '--verbose'):
verbose = True
# Process all options
for opt, val in opts:
if opt in ('-h', '--help'):
self.stdout.write(self.usage)
return 0
elif opt == '--help-behaviors':
self.stdout.write("""
BEHAVIOR OPTIONS:
These set of options let you control which checks are to be performed.
They may be turned on or off by listing them as arguments to one of
the options --allow, --warn, or --forbid ; for example:
%(program_name)s --allow comments,hex-numbers --forbid duplicate-keys
""" % {"program_name": self.program_name})
self.stdout.write("The default shown is for %s mode\n\n" % kwoptions['strict'])
self.stdout.write('%-7s %-25s %s\n' % ("Default", "Behavior_name", "Description"))
self.stdout.write('-' * 7 + ' ' + '-' * 25 + ' ' + '-' * 50 + '\n')
j = json_options(**kwoptions)
for behavior in sorted(j.all_behaviors):
v = j.get_behavior(behavior)
desc = j.describe_behavior(behavior)
self.stdout.write('%-7s %-25s %s\n' % (v.lower(), behavior.replace('_', '-'), desc))
return 0
elif opt == '--version':
self.stdout.write('%s (%s) version %s (%s)\n' \
% (self.program_name, __name__, __version__, __date__))
if verbose == True:
self.stdout.write('demjson from %r\n' % (__file__,))
if verbose == True:
self.stdout.write('Python version: %s\n' % (sys.version.replace('\n', ' '),))
self.stdout.write('This python implementation supports:\n')
self.stdout.write(' * Max unicode: U+%X\n' % (sys.maxunicode,))
self.stdout.write(' * Unicode version: %s\n' % (unicodedata.unidata_version,))
self.stdout.write(' * Floating-point significant digits: %d\n' % (float_sigdigits,))
self.stdout.write(' * Floating-point max 10^exponent: %d\n' % (float_maxexp,))
if str(0.0) == str(-0.0):
szero = 'No'
else:
szero = 'Yes'
self.stdout.write(' * Floating-point has signed-zeros: %s\n' % (szero,))
if decimal:
has_dec = 'Yes'
else:
has_dec = 'No'
self.stdout.write(' * Decimal (bigfloat) support: %s\n' % (has_dec,))
return 0
elif opt == '--copyright':
self.stdout.write("%s is distributed as part of the \"demjson\" python package.\n" \
% (self.program_name,))
self.stdout.write("See %s\n\n\n" % (__homepage__,))
self.stdout.write(__credits__)
return 0
elif opt in ('-v', '--verbose'):
verbose = True
elif opt in ('-q', '--quiet'):
verbose = False
elif opt in ('-s', '--strict'):
kwoptions['strict'] = STRICTNESS_STRICT
kwoptions['keep_format'] = False
elif opt in ('-S', '--nonstrict'):
kwoptions['strict'] = STRICTNESS_TOLERANT
elif opt in ('-W', '--tolerant'):
kwoptions['strict'] = STRICTNESS_WARN
elif opt in ('-f', '--format'):
reformat = True
kwoptions['encode_compactly'] = False
elif opt in ('-F', '--format-compactly'):
kwoptions['encode_compactly'] = True
reformat = 'compactly'
elif opt in ('--stats',):
show_stats = True
elif opt in ('-o', '--output'):
output_filename = val
elif opt in ('-e', '--encoding'):
input_encoding = val
output_encoding = val
escape_unicode = False
elif opt in ('--output-encoding'):
output_encoding = val
escape_unicode = False
elif opt in ('--input-encoding'):
input_encoding = val
elif opt in ('--html-safe', '--xml-safe'):
kwoptions['html_safe'] = True
elif opt in ('--allow', '--warn', '--forbid'):
action = opt[2:]
if action in kwoptions:
kwoptions[action] += "," + val
else:
kwoptions[action] = val
elif opt in ('--keep-format',):
kwoptions['keep_format'] = True
elif opt in ('--no-keep-format',):
kwoptions['keep_format'] = False
elif opt == '--leading-zero-radix':
kwoptions['leading_zero_radix'] = val
elif opt in ('--indent', '--indent-amount'):
if val in ('tab', 'tabs'):
kwoptions['indent_amount'] = 8
kwoptions['indent_tab_width'] = 8
else:
try:
kwoptions['indent_amount'] = int(val)
except ValueError:
self.stderr.write("Indentation amount must be a number\n")
return 1
elif opt == 'indent-tab-width':
try:
kwoptions['indent_tab_width'] = int(val)
except ValueError:
self.stderr.write("Indentation tab width must be a number\n")
return 1
elif opt == '--max-items-per-line':
try:
kwoptions['max_items_per_line'] = int(val)
except ValueError:
self.stderr.write("Max items per line must be a number\n")
return 1
elif opt == '--sort':
val = val.lower()
if val == 'alpha':
kwoptions['sort_keys'] = SORT_ALPHA
elif val == 'alpha_ci':
kwoptions['sort_keys'] = SORT_ALPHA_CI
elif val == 'preserve':
kwoptions['sort_keys'] = SORT_PRESERVE
else:
kwoptions['sort_keys'] = SORT_SMART
elif opt == '--recursion-limit':
try:
recursion_limit = int(val)
except ValueError:
self.stderr.write("Recursion limit must be a number: %r\n" % val)
return 1
else:
max_limit = 100000
old_limit = sys.getrecursionlimit()
if recursion_limit > max_limit:
self.stderr.write(
"Recursion limit must be a number between %d and %d\n" % (old_limit, max_limit))
return 1
elif recursion_limit > old_limit:
sys.setrecursionlimit(recursion_limit)
else:
self.stderr.write('Unknown option %r\n' % opt)
return 1
# Make the JSON options
kwoptions['decimal_context'] = 100
jsonopts = json_options(**kwoptions)
# Now decode each file...
if not args:
args = [None]
for fn in args:
try:
rc = self._lintcheck(fn, output_filename=output_filename,
verbose=verbose,
reformat=reformat,
show_stats=show_stats,
input_encoding=input_encoding,
output_encoding=output_encoding,
jsonopts=jsonopts)
if rc != self.SUCCESS_OK:
# Warnings or errors should result in failure. If
# checking multiple files, do not change a
# previous error back to ok.
success = False
except KeyboardInterrupt as err:
sys.stderr.write("\njsonlint interrupted!\n")
sys.exit(1)
if not success:
return 1
return 0
| 18,569 |
akfamily/akshare
|
087025d8d6f799b30ca114013e82c1ad22dc9294
|
akshare/utils/token_process.py
|
set_token
|
(token)
| 14 | 18 |
def set_token(token):
df = pd.DataFrame([token], columns=['token'])
user_home = os.path.expanduser('~')
fp = os.path.join(user_home, cons.TOKEN_F_P)
df.to_csv(fp, index=False)
|
https://github.com/akfamily/akshare/blob/087025d8d6f799b30ca114013e82c1ad22dc9294/project25/akshare/utils/token_process.py#L14-L18
| 25 |
[
0
] | 20 |
[
1,
2,
3,
4
] | 80 | false | 36.842105 | 5 | 1 | 20 | 0 |
def set_token(token):
df = pd.DataFrame([token], columns=['token'])
user_home = os.path.expanduser('~')
fp = os.path.join(user_home, cons.TOKEN_F_P)
df.to_csv(fp, index=False)
| 18,570 |
|||
akfamily/akshare
|
087025d8d6f799b30ca114013e82c1ad22dc9294
|
akshare/utils/token_process.py
|
get_token
|
()
| 21 | 29 |
def get_token():
user_home = os.path.expanduser('~')
fp = os.path.join(user_home, cons.TOKEN_F_P)
if os.path.exists(fp):
df = pd.read_csv(fp)
return str(df.iloc[0]['token'])
else:
print(cons.TOKEN_ERR_MSG)
return
|
https://github.com/akfamily/akshare/blob/087025d8d6f799b30ca114013e82c1ad22dc9294/project25/akshare/utils/token_process.py#L21-L29
| 25 |
[
0
] | 11.111111 |
[
1,
2,
3,
4,
5,
7,
8
] | 77.777778 | false | 36.842105 | 9 | 2 | 22.222222 | 0 |
def get_token():
user_home = os.path.expanduser('~')
fp = os.path.join(user_home, cons.TOKEN_F_P)
if os.path.exists(fp):
df = pd.read_csv(fp)
return str(df.iloc[0]['token'])
else:
print(cons.TOKEN_ERR_MSG)
return
| 18,571 |
|||
akfamily/akshare
|
087025d8d6f799b30ca114013e82c1ad22dc9294
|
akshare/other/other_game.py
|
club_rank_game
|
(symbol: str = "英雄联盟") -> pd.D
|
return temp_df
|
中国电竞价值排行榜-俱乐部排名
http://rank.uuu9.com/
:param symbol: choice of {'英雄联盟', '绝地求生', '王者荣耀', 'DOTA2', '穿越火线', '和平精英'}
:type symbol: str
:return: 俱乐部排名
:rtype: pandas.DataFrame
|
中国电竞价值排行榜-俱乐部排名
http://rank.uuu9.com/
:param symbol: choice of {'英雄联盟', '绝地求生', '王者荣耀', 'DOTA2', '穿越火线', '和平精英'}
:type symbol: str
:return: 俱乐部排名
:rtype: pandas.DataFrame
| 13 | 80 |
def club_rank_game(symbol: str = "英雄联盟") -> pd.DataFrame:
"""
中国电竞价值排行榜-俱乐部排名
http://rank.uuu9.com/
:param symbol: choice of {'英雄联盟', '绝地求生', '王者荣耀', 'DOTA2', '穿越火线', '和平精英'}
:type symbol: str
:return: 俱乐部排名
:rtype: pandas.DataFrame
"""
symbol_map = {
"DOTA2": "1",
"英雄联盟": "2",
"绝地求生": "3",
"王者荣耀": "4",
"穿越火线": "5",
"和平精英": "6",
}
url = "http://rank.uuu9.com/club/ranking"
params = {"gameId": symbol_map[symbol], "type": "0"}
r = requests.get(url, params=params)
soup = BeautifulSoup(r.text, "lxml")
data_text = soup.find("div", attrs={"class": "ec_data"}).text
report_date = data_text.split(":")[-1]
temp_df = pd.read_html(r.text)[0]
if symbol in {"英雄联盟", "王者荣耀", "DOTA2"}:
temp_df.columns = [
"排名",
"俱乐部名称",
"人气指数",
"舆论指数",
"战绩指数",
"综合指数",
"排名变动",
"-",
]
temp_df = temp_df[
[
"排名",
"俱乐部名称",
"人气指数",
"舆论指数",
"战绩指数",
"综合指数",
"排名变动",
]
]
else:
temp_df.columns = [
"排名",
"俱乐部名称",
"人气指数",
"舆论指数",
"综合指数",
"排名变动",
"-",
]
temp_df = temp_df[
[
"排名",
"俱乐部名称",
"人气指数",
"舆论指数",
"综合指数",
"排名变动",
]
]
temp_df['更新时间'] = report_date
return temp_df
|
https://github.com/akfamily/akshare/blob/087025d8d6f799b30ca114013e82c1ad22dc9294/project25/akshare/other/other_game.py#L13-L80
| 25 |
[
0,
1,
2,
3,
4,
5,
6,
7,
8
] | 13.235294 |
[
9,
17,
18,
19,
20,
21,
22,
23,
24,
25,
35,
47,
56,
66,
67
] | 22.058824 | false | 13.461538 | 68 | 2 | 77.941176 | 6 |
def club_rank_game(symbol: str = "英雄联盟") -> pd.DataFrame:
symbol_map = {
"DOTA2": "1",
"英雄联盟": "2",
"绝地求生": "3",
"王者荣耀": "4",
"穿越火线": "5",
"和平精英": "6",
}
url = "http://rank.uuu9.com/club/ranking"
params = {"gameId": symbol_map[symbol], "type": "0"}
r = requests.get(url, params=params)
soup = BeautifulSoup(r.text, "lxml")
data_text = soup.find("div", attrs={"class": "ec_data"}).text
report_date = data_text.split(":")[-1]
temp_df = pd.read_html(r.text)[0]
if symbol in {"英雄联盟", "王者荣耀", "DOTA2"}:
temp_df.columns = [
"排名",
"俱乐部名称",
"人气指数",
"舆论指数",
"战绩指数",
"综合指数",
"排名变动",
"-",
]
temp_df = temp_df[
[
"排名",
"俱乐部名称",
"人气指数",
"舆论指数",
"战绩指数",
"综合指数",
"排名变动",
]
]
else:
temp_df.columns = [
"排名",
"俱乐部名称",
"人气指数",
"舆论指数",
"综合指数",
"排名变动",
"-",
]
temp_df = temp_df[
[
"排名",
"俱乐部名称",
"人气指数",
"舆论指数",
"综合指数",
"排名变动",
]
]
temp_df['更新时间'] = report_date
return temp_df
| 18,572 |
akfamily/akshare
|
087025d8d6f799b30ca114013e82c1ad22dc9294
|
akshare/other/other_game.py
|
player_rank_game
|
(symbol: str = "英雄联盟") -> pd.D
|
return temp_df
|
中国电竞价值排行榜-选手排行榜
http://rank.uuu9.com/player/ranking
:param symbol: choice of {'英雄联盟', '绝地求生', '王者荣耀', 'DOTA2', '穿越火线', '和平精英'}
:type symbol: str
:return: 选手排行榜
:rtype: pandas.DataFrame
|
中国电竞价值排行榜-选手排行榜
http://rank.uuu9.com/player/ranking
:param symbol: choice of {'英雄联盟', '绝地求生', '王者荣耀', 'DOTA2', '穿越火线', '和平精英'}
:type symbol: str
:return: 选手排行榜
:rtype: pandas.DataFrame
| 83 | 184 |
def player_rank_game(symbol: str = "英雄联盟") -> pd.DataFrame:
"""
中国电竞价值排行榜-选手排行榜
http://rank.uuu9.com/player/ranking
:param symbol: choice of {'英雄联盟', '绝地求生', '王者荣耀', 'DOTA2', '穿越火线', '和平精英'}
:type symbol: str
:return: 选手排行榜
:rtype: pandas.DataFrame
"""
symbol_map = {
"DOTA2": "1",
"英雄联盟": "2",
"绝地求生": "3",
"王者荣耀": "4",
"穿越火线": "5",
"和平精英": "6",
}
url = "http://rank.uuu9.com/player/ranking"
params = {"gameId": symbol_map[symbol], "type": "0"}
r = requests.get(url, params=params)
soup = BeautifulSoup(r.text, "lxml")
data_text = soup.find("div", attrs={"class": "ec_data"}).text
report_date = data_text.split(":")[-1]
temp_df = pd.read_html(r.text)[0]
if symbol == "王者荣耀":
temp_df.columns = [
"排名",
"选手ID",
"所属战队",
"人气指数",
"舆论指数",
"战绩指数",
"综合指数",
"排名变动",
"-",
]
temp_df = temp_df[
[
"排名",
"选手ID",
"所属战队",
"人气指数",
"舆论指数",
"战绩指数",
"综合指数",
"排名变动",
]
]
temp_df['更新时间'] = report_date
return temp_df
if symbol in {"英雄联盟", "DOTA2"}:
temp_df.columns = [
"排名",
"选手ID",
"所属战队",
"人气指数",
"舆论指数",
"战绩指数",
"综合指数",
"身价",
"排名变动",
"-",
]
temp_df = temp_df[
[
"排名",
"选手ID",
"所属战队",
"人气指数",
"舆论指数",
"战绩指数",
"综合指数",
"身价",
"排名变动",
]
]
else:
temp_df.columns = [
"排名",
"选手ID",
"所属战队",
"人气指数",
"舆论指数",
"综合指数",
"身价",
"排名变动",
"-",
]
temp_df = temp_df[
[
"排名",
"选手ID",
"所属战队",
"人气指数",
"舆论指数",
"综合指数",
"身价",
"排名变动",
]
]
temp_df['更新时间'] = report_date
return temp_df
|
https://github.com/akfamily/akshare/blob/087025d8d6f799b30ca114013e82c1ad22dc9294/project25/akshare/other/other_game.py#L83-L184
| 25 |
[
0,
1,
2,
3,
4,
5,
6,
7,
8
] | 8.823529 |
[
9,
17,
18,
19,
20,
21,
22,
23,
24,
25,
36,
48,
49,
50,
51,
63,
77,
88,
100,
101
] | 19.607843 | false | 13.461538 | 102 | 3 | 80.392157 | 6 |
def player_rank_game(symbol: str = "英雄联盟") -> pd.DataFrame:
symbol_map = {
"DOTA2": "1",
"英雄联盟": "2",
"绝地求生": "3",
"王者荣耀": "4",
"穿越火线": "5",
"和平精英": "6",
}
url = "http://rank.uuu9.com/player/ranking"
params = {"gameId": symbol_map[symbol], "type": "0"}
r = requests.get(url, params=params)
soup = BeautifulSoup(r.text, "lxml")
data_text = soup.find("div", attrs={"class": "ec_data"}).text
report_date = data_text.split(":")[-1]
temp_df = pd.read_html(r.text)[0]
if symbol == "王者荣耀":
temp_df.columns = [
"排名",
"选手ID",
"所属战队",
"人气指数",
"舆论指数",
"战绩指数",
"综合指数",
"排名变动",
"-",
]
temp_df = temp_df[
[
"排名",
"选手ID",
"所属战队",
"人气指数",
"舆论指数",
"战绩指数",
"综合指数",
"排名变动",
]
]
temp_df['更新时间'] = report_date
return temp_df
if symbol in {"英雄联盟", "DOTA2"}:
temp_df.columns = [
"排名",
"选手ID",
"所属战队",
"人气指数",
"舆论指数",
"战绩指数",
"综合指数",
"身价",
"排名变动",
"-",
]
temp_df = temp_df[
[
"排名",
"选手ID",
"所属战队",
"人气指数",
"舆论指数",
"战绩指数",
"综合指数",
"身价",
"排名变动",
]
]
else:
temp_df.columns = [
"排名",
"选手ID",
"所属战队",
"人气指数",
"舆论指数",
"综合指数",
"身价",
"排名变动",
"-",
]
temp_df = temp_df[
[
"排名",
"选手ID",
"所属战队",
"人气指数",
"舆论指数",
"综合指数",
"身价",
"排名变动",
]
]
temp_df['更新时间'] = report_date
return temp_df
| 18,573 |
akfamily/akshare
|
087025d8d6f799b30ca114013e82c1ad22dc9294
|
akshare/other/other_car.py
|
car_cpca_energy_sale
|
()
|
return big_df
|
乘联会-新能源细分市场-整体市场
http://data.cpcaauto.com/FuelMarket
:return: 新能源细分市场-整体市场
:rtype: pandas.DataFrame
|
乘联会-新能源细分市场-整体市场
http://data.cpcaauto.com/FuelMarket
:return: 新能源细分市场-整体市场
:rtype: pandas.DataFrame
| 16 | 49 |
def car_cpca_energy_sale():
"""
乘联会-新能源细分市场-整体市场
http://data.cpcaauto.com/FuelMarket
:return: 新能源细分市场-整体市场
:rtype: pandas.DataFrame
"""
url = 'http://cpcadata.chinaautomarket.com:8081/chartlist'
params = {
'charttype': '6'
}
r = requests.get(url, params=params)
data_json = r.json()
temp_df = pd.DataFrame(data_json[0]['dataList'])
temp_current_year_list = []
temp_previous_year_list = []
for item in data_json[0]['dataList']:
temp_previous_year_list.append(item[temp_df.columns[2]])
try:
temp_current_year_list.append(item[temp_df.columns[1]])
except:
continue
temp_current_year_df = pd.DataFrame(temp_current_year_list)
temp_previous_year_df = pd.DataFrame(temp_previous_year_list)
big_df = pd.DataFrame([temp_current_year_df.iloc[:, 2], temp_previous_year_df.iloc[:, 2]]).T
big_df.columns = [temp_df.columns[1], temp_df.columns[2]]
big_df["月份"] = temp_df["month"]
big_df = big_df[[
"月份",
temp_df.columns[2],
temp_df.columns[1],
]]
return big_df
|
https://github.com/akfamily/akshare/blob/087025d8d6f799b30ca114013e82c1ad22dc9294/project25/akshare/other/other_car.py#L16-L49
| 25 |
[
0,
1,
2,
3,
4,
5,
6
] | 20.588235 |
[
7,
8,
11,
12,
13,
14,
15,
16,
17,
18,
19,
20,
21,
22,
23,
24,
25,
26,
27,
33
] | 58.823529 | false | 15.909091 | 34 | 3 | 41.176471 | 4 |
def car_cpca_energy_sale():
url = 'http://cpcadata.chinaautomarket.com:8081/chartlist'
params = {
'charttype': '6'
}
r = requests.get(url, params=params)
data_json = r.json()
temp_df = pd.DataFrame(data_json[0]['dataList'])
temp_current_year_list = []
temp_previous_year_list = []
for item in data_json[0]['dataList']:
temp_previous_year_list.append(item[temp_df.columns[2]])
try:
temp_current_year_list.append(item[temp_df.columns[1]])
except:
continue
temp_current_year_df = pd.DataFrame(temp_current_year_list)
temp_previous_year_df = pd.DataFrame(temp_previous_year_list)
big_df = pd.DataFrame([temp_current_year_df.iloc[:, 2], temp_previous_year_df.iloc[:, 2]]).T
big_df.columns = [temp_df.columns[1], temp_df.columns[2]]
big_df["月份"] = temp_df["month"]
big_df = big_df[[
"月份",
temp_df.columns[2],
temp_df.columns[1],
]]
return big_df
| 18,574 |
akfamily/akshare
|
087025d8d6f799b30ca114013e82c1ad22dc9294
|
akshare/other/other_car.py
|
car_gasgoo_sale_rank
|
(symbol: str = "车企榜", date: str = "202109"):
|
return temp_df
|
盖世汽车-汽车行业制造企业数据库-销量数据
http://i.gasgoo.com/data/ranking
:param symbol: choice of {"车企榜", "品牌榜", "车型榜"}
:type symbol: str
:param date: 查询的年份和月份
:type date: str
:return: 销量数据
:rtype: pandas.DataFrame
|
盖世汽车-汽车行业制造企业数据库-销量数据
http://i.gasgoo.com/data/ranking
:param symbol: choice of {"车企榜", "品牌榜", "车型榜"}
:type symbol: str
:param date: 查询的年份和月份
:type date: str
:return: 销量数据
:rtype: pandas.DataFrame
| 52 | 107 |
def car_gasgoo_sale_rank(symbol: str = "车企榜", date: str = "202109"):
"""
盖世汽车-汽车行业制造企业数据库-销量数据
http://i.gasgoo.com/data/ranking
:param symbol: choice of {"车企榜", "品牌榜", "车型榜"}
:type symbol: str
:param date: 查询的年份和月份
:type date: str
:return: 销量数据
:rtype: pandas.DataFrame
"""
symbol_map = {
"车型榜": "M",
"车企榜": "F",
"品牌榜": "B",
}
url = "https://i.gasgoo.com/data/sales/AutoModelSalesRank.aspx/GetSalesRank"
payload = {
"rankType": symbol_map[symbol],
"startY": date[:4],
"startM": str(int(date[4:6])),
"endY": date[:4],
"endM": str(int(date[4:6])),
"orderBy": f"{date[:4]}-{str(int(date[4:6]))}",
"modelGradeID": "",
"modelTypeID": "",
"countryID": "",
"queryDate": f"{date[:4]}-{str(int(date[4:6]))}",
}
headers = {
'Accept': 'application/json, text/javascript, */*; q=0.01',
'Accept-Encoding': 'gzip, deflate, br',
'Accept-Language': 'zh-CN,zh;q=0.9,en;q=0.8',
'Cache-Control': 'no-cache',
'Connection': 'keep-alive',
'Content-Length': '195',
'Content-Type': 'application/json; charset=UTF-8',
'Cookie': 'Hm_lvt_8e90480b1bf68ede548c407057660718=1636981448; _ga=GA1.2.858318653.1636981449; _gid=GA1.2.1448165285.1636981449; _gat=1; Hm_lpvt_8e90480b1bf68ede548c407057660718=1636982578',
'Host': 'i.gasgoo.com',
'Origin': 'https://i.gasgoo.com',
'Pragma': 'no-cache',
'Referer': 'https://i.gasgoo.com/data/sales/AutoModelSalesRank.aspx/GetSalesRank',
'sec-ch-ua': '"Google Chrome";v="95", "Chromium";v="95", ";Not A Brand";v="99"',
'sec-ch-ua-mobile': '?0',
'sec-ch-ua-platform': '"Windows"',
'Sec-Fetch-Dest': 'empty',
'Sec-Fetch-Mode': 'cors',
'Sec-Fetch-Site': 'same-origin',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/95.0.4638.69 Safari/537.36',
'X-Requested-With': 'XMLHttpRequest'
}
r = requests.post(url, json=payload, headers=headers)
data_json = r.json()
data_json = demjson.decode(data_json['d'])
temp_df = pd.DataFrame(data_json)
return temp_df
|
https://github.com/akfamily/akshare/blob/087025d8d6f799b30ca114013e82c1ad22dc9294/project25/akshare/other/other_car.py#L52-L107
| 25 |
[
0,
1,
2,
3,
4,
5,
6,
7,
8,
9,
10
] | 19.642857 |
[
11,
16,
17,
29,
51,
52,
53,
54,
55
] | 16.071429 | false | 15.909091 | 56 | 1 | 83.928571 | 8 |
def car_gasgoo_sale_rank(symbol: str = "车企榜", date: str = "202109"):
symbol_map = {
"车型榜": "M",
"车企榜": "F",
"品牌榜": "B",
}
url = "https://i.gasgoo.com/data/sales/AutoModelSalesRank.aspx/GetSalesRank"
payload = {
"rankType": symbol_map[symbol],
"startY": date[:4],
"startM": str(int(date[4:6])),
"endY": date[:4],
"endM": str(int(date[4:6])),
"orderBy": f"{date[:4]}-{str(int(date[4:6]))}",
"modelGradeID": "",
"modelTypeID": "",
"countryID": "",
"queryDate": f"{date[:4]}-{str(int(date[4:6]))}",
}
headers = {
'Accept': 'application/json, text/javascript, */*; q=0.01',
'Accept-Encoding': 'gzip, deflate, br',
'Accept-Language': 'zh-CN,zh;q=0.9,en;q=0.8',
'Cache-Control': 'no-cache',
'Connection': 'keep-alive',
'Content-Length': '195',
'Content-Type': 'application/json; charset=UTF-8',
'Cookie': 'Hm_lvt_8e90480b1bf68ede548c407057660718=1636981448; _ga=GA1.2.858318653.1636981449; _gid=GA1.2.1448165285.1636981449; _gat=1; Hm_lpvt_8e90480b1bf68ede548c407057660718=1636982578',
'Host': 'i.gasgoo.com',
'Origin': 'https://i.gasgoo.com',
'Pragma': 'no-cache',
'Referer': 'https://i.gasgoo.com/data/sales/AutoModelSalesRank.aspx/GetSalesRank',
'sec-ch-ua': '"Google Chrome";v="95", "Chromium";v="95", ";Not A Brand";v="99"',
'sec-ch-ua-mobile': '?0',
'sec-ch-ua-platform': '"Windows"',
'Sec-Fetch-Dest': 'empty',
'Sec-Fetch-Mode': 'cors',
'Sec-Fetch-Site': 'same-origin',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/95.0.4638.69 Safari/537.36',
'X-Requested-With': 'XMLHttpRequest'
}
r = requests.post(url, json=payload, headers=headers)
data_json = r.json()
data_json = demjson.decode(data_json['d'])
temp_df = pd.DataFrame(data_json)
return temp_df
| 18,575 |
akfamily/akshare
|
087025d8d6f799b30ca114013e82c1ad22dc9294
|
akshare/rate/repo_rate.py
|
repo_rate_hist
|
(start_date: str = "20200930", end_date: str = "20201029")
|
return temp_df
|
中国外汇交易中心暨全国银行间同业拆借中心-回购定盘利率-历史数据
http://www.chinamoney.com.cn/chinese/bkfrr/
:param start_date: 开始时间, 开始时间与结束时间需要在一个月内
:type start_date: str
:param end_date: 结束时间, 开始时间与结束时间需要在一个月内
:type end_date: str
:return: 回购定盘利率-历史数据
:rtype: pandas.DataFrame
|
中国外汇交易中心暨全国银行间同业拆借中心-回购定盘利率-历史数据
http://www.chinamoney.com.cn/chinese/bkfrr/
:param start_date: 开始时间, 开始时间与结束时间需要在一个月内
:type start_date: str
:param end_date: 结束时间, 开始时间与结束时间需要在一个月内
:type end_date: str
:return: 回购定盘利率-历史数据
:rtype: pandas.DataFrame
| 11 | 51 |
def repo_rate_hist(start_date: str = "20200930", end_date: str = "20201029") -> pd.DataFrame:
"""
中国外汇交易中心暨全国银行间同业拆借中心-回购定盘利率-历史数据
http://www.chinamoney.com.cn/chinese/bkfrr/
:param start_date: 开始时间, 开始时间与结束时间需要在一个月内
:type start_date: str
:param end_date: 结束时间, 开始时间与结束时间需要在一个月内
:type end_date: str
:return: 回购定盘利率-历史数据
:rtype: pandas.DataFrame
"""
start_date = "-".join([start_date[:4], start_date[4:6], start_date[6:]])
end_date = "-".join([end_date[:4], end_date[4:6], end_date[6:]])
url = "http://www.chinamoney.com.cn/ags/ms/cm-u-bk-currency/FrrHis"
params = {
"lang": "CN",
"startDate": start_date,
"endDate": end_date,
"pageSize": "5000",
}
r = requests.post(url, params=params)
data_json = r.json()
temp_df = pd.DataFrame(data_json["records"])
temp_df = pd.DataFrame([item for item in temp_df["frValueMap"].to_list()])
temp_df = temp_df[[
"date",
"FR001",
"FR007",
"FR014",
"FDR001",
"FDR007",
"FDR014",
]]
temp_df['date'] = pd.to_datetime(temp_df['date']).dt.date
temp_df['FR001'] = pd.to_numeric(temp_df['FR001'])
temp_df['FR007'] = pd.to_numeric(temp_df['FR007'])
temp_df['FR014'] = pd.to_numeric(temp_df['FR014'])
temp_df['FDR001'] = pd.to_numeric(temp_df['FDR001'])
temp_df['FDR007'] = pd.to_numeric(temp_df['FDR007'])
temp_df['FDR014'] = pd.to_numeric(temp_df['FDR014'])
return temp_df
|
https://github.com/akfamily/akshare/blob/087025d8d6f799b30ca114013e82c1ad22dc9294/project25/akshare/rate/repo_rate.py#L11-L51
| 25 |
[
0,
1,
2,
3,
4,
5,
6,
7,
8,
9,
10
] | 26.829268 |
[
11,
12,
13,
14,
20,
21,
22,
23,
24,
33,
34,
35,
36,
37,
38,
39,
40
] | 41.463415 | false | 20.833333 | 41 | 2 | 58.536585 | 8 |
def repo_rate_hist(start_date: str = "20200930", end_date: str = "20201029") -> pd.DataFrame:
start_date = "-".join([start_date[:4], start_date[4:6], start_date[6:]])
end_date = "-".join([end_date[:4], end_date[4:6], end_date[6:]])
url = "http://www.chinamoney.com.cn/ags/ms/cm-u-bk-currency/FrrHis"
params = {
"lang": "CN",
"startDate": start_date,
"endDate": end_date,
"pageSize": "5000",
}
r = requests.post(url, params=params)
data_json = r.json()
temp_df = pd.DataFrame(data_json["records"])
temp_df = pd.DataFrame([item for item in temp_df["frValueMap"].to_list()])
temp_df = temp_df[[
"date",
"FR001",
"FR007",
"FR014",
"FDR001",
"FDR007",
"FDR014",
]]
temp_df['date'] = pd.to_datetime(temp_df['date']).dt.date
temp_df['FR001'] = pd.to_numeric(temp_df['FR001'])
temp_df['FR007'] = pd.to_numeric(temp_df['FR007'])
temp_df['FR014'] = pd.to_numeric(temp_df['FR014'])
temp_df['FDR001'] = pd.to_numeric(temp_df['FDR001'])
temp_df['FDR007'] = pd.to_numeric(temp_df['FDR007'])
temp_df['FDR014'] = pd.to_numeric(temp_df['FDR014'])
return temp_df
| 18,576 |
akfamily/akshare
|
087025d8d6f799b30ca114013e82c1ad22dc9294
|
akshare/fortune/fortune_hurun.py
|
hurun_rank
|
(indicator: str = "胡润百富榜", year: str = "2018") -> pd.Dat
|
return temp_df
|
胡润排行榜
https://www.hurun.net/CN/HuList/Index?num=3YwKs889SRIm
:param indicator: choice of {"胡润百富榜", "胡润全球富豪榜", "胡润印度榜", "胡润全球独角兽榜", "中国瞪羚企业榜", "全球瞪羚企业榜", "胡润Under30s创业领袖榜", "胡润中国500强民营企业", "胡润世界500强", "胡润艺术榜"}
:type indicator: str
:param year: 指定年份; {"胡润百富榜": "2014-至今", "胡润全球富豪榜": "2019-至今", "胡润印度榜": "2018-至今", "胡润全球独角兽榜": "2019-至今", "中国瞪羚企业榜": "2021-至今", "全球瞪羚企业榜": "2021-至今", "胡润Under30s创业领袖榜": "2019-至今", "胡润中国500强民营企业": "2019-至今", "胡润世界500强": "2020-至今", "胡润艺术榜": "2019-至今"}
:type year: str
:return: 指定 indicator 和 year 的数据
:rtype: pandas.DataFrame
|
胡润排行榜
https://www.hurun.net/CN/HuList/Index?num=3YwKs889SRIm
:param indicator: choice of {"胡润百富榜", "胡润全球富豪榜", "胡润印度榜", "胡润全球独角兽榜", "中国瞪羚企业榜", "全球瞪羚企业榜", "胡润Under30s创业领袖榜", "胡润中国500强民营企业", "胡润世界500强", "胡润艺术榜"}
:type indicator: str
:param year: 指定年份; {"胡润百富榜": "2014-至今", "胡润全球富豪榜": "2019-至今", "胡润印度榜": "2018-至今", "胡润全球独角兽榜": "2019-至今", "中国瞪羚企业榜": "2021-至今", "全球瞪羚企业榜": "2021-至今", "胡润Under30s创业领袖榜": "2019-至今", "胡润中国500强民营企业": "2019-至今", "胡润世界500强": "2020-至今", "胡润艺术榜": "2019-至今"}
:type year: str
:return: 指定 indicator 和 year 的数据
:rtype: pandas.DataFrame
| 14 | 310 |
def hurun_rank(indicator: str = "胡润百富榜", year: str = "2018") -> pd.DataFrame:
"""
胡润排行榜
https://www.hurun.net/CN/HuList/Index?num=3YwKs889SRIm
:param indicator: choice of {"胡润百富榜", "胡润全球富豪榜", "胡润印度榜", "胡润全球独角兽榜", "中国瞪羚企业榜", "全球瞪羚企业榜", "胡润Under30s创业领袖榜", "胡润中国500强民营企业", "胡润世界500强", "胡润艺术榜"}
:type indicator: str
:param year: 指定年份; {"胡润百富榜": "2014-至今", "胡润全球富豪榜": "2019-至今", "胡润印度榜": "2018-至今", "胡润全球独角兽榜": "2019-至今", "中国瞪羚企业榜": "2021-至今", "全球瞪羚企业榜": "2021-至今", "胡润Under30s创业领袖榜": "2019-至今", "胡润中国500强民营企业": "2019-至今", "胡润世界500强": "2020-至今", "胡润艺术榜": "2019-至今"}
:type year: str
:return: 指定 indicator 和 year 的数据
:rtype: pandas.DataFrame
"""
url = "https://www.hurun.net/zh-CN/Rank/HsRankDetails?pagetype=rich"
r = requests.get(url)
soup = BeautifulSoup(r.text, "lxml")
url_list = []
for item in soup.find_all("div", attrs={"aria-labelledby": "dropdownMenuLink1"}):
for inner_item in item.find_all("a"):
url_list.append("https://www.hurun.net" + inner_item["href"])
name_list = []
for item in soup.find_all("div", attrs={"aria-labelledby": "dropdownMenuLink1"}):
for inner_item in item.find_all("a"):
name_list.append(inner_item.text)
name_url_map = dict(zip(name_list, url_list))
r = requests.get(name_url_map[indicator])
soup = BeautifulSoup(r.text, "lxml")
code_list = [
item["value"].split("=")[2]
for item in soup.find(attrs={"id": "exampleFormControlSelect1"}).find_all(
"option"
)
]
year_list = [
item.text.split(" ")[0]
for item in soup.find(attrs={"id": "exampleFormControlSelect1"}).find_all(
"option"
)
]
year_code_map = dict(zip(year_list, code_list))
params = {
"num": year_code_map[year],
"search": "",
"offset": "0",
"limit": "20000",
}
if year == "2018":
warnings.warn("正在下载中")
offset = 0
limit = 20
big_df = pd.DataFrame()
while offset < 2200:
try:
params.update(
{
"offset": offset,
"limit": limit,
}
)
url = "https://www.hurun.net/zh-CN/Rank/HsRankDetailsList"
r = requests.get(url, params=params)
data_json = r.json()
temp_df = pd.DataFrame(data_json["rows"])
offset = offset + 20
big_df = pd.concat([big_df, temp_df], ignore_index=True)
except requests.exceptions.JSONDecodeError as e:
offset = offset + 40
continue
big_df.rename(
columns={
"hs_Rank_Rich_Ranking": "排名",
"hs_Rank_Rich_Wealth": "财富",
"hs_Rank_Rich_Ranking_Change": "排名变化",
"hs_Rank_Rich_ChaName_Cn": "姓名",
"hs_Rank_Rich_ComName_Cn": "企业",
"hs_Rank_Rich_Industry_Cn": "行业",
},
inplace=True,
)
big_df = big_df[
[
"排名",
"财富",
"姓名",
"企业",
"行业",
]
]
return big_df
url = "https://www.hurun.net/zh-CN/Rank/HsRankDetailsList"
r = requests.get(url, params=params)
data_json = r.json()
temp_df = pd.DataFrame(data_json["rows"])
if indicator == "胡润百富榜":
temp_df.rename(
columns={
"hs_Rank_Rich_Ranking": "排名",
"hs_Rank_Rich_Wealth": "财富",
"hs_Rank_Rich_Ranking_Change": "排名变化",
"hs_Rank_Rich_ChaName_Cn": "姓名",
"hs_Rank_Rich_ComName_Cn": "企业",
"hs_Rank_Rich_Industry_Cn": "行业",
},
inplace=True,
)
temp_df = temp_df[
[
"排名",
"财富",
"姓名",
"企业",
"行业",
]
]
elif indicator == "胡润全球富豪榜":
temp_df.rename(
columns={
"hs_Rank_Global_Ranking": "排名",
"hs_Rank_Global_Wealth": "财富",
"hs_Rank_Global_Ranking_Change": "排名变化",
"hs_Rank_Global_ChaName_Cn": "姓名",
"hs_Rank_Global_ComName_Cn": "企业",
"hs_Rank_Global_Industry_Cn": "行业",
},
inplace=True,
)
temp_df = temp_df[
[
"排名",
"财富",
"姓名",
"企业",
"行业",
]
]
elif indicator == "胡润印度榜":
temp_df.rename(
columns={
"hs_Rank_India_Ranking": "排名",
"hs_Rank_India_Wealth": "财富",
"hs_Rank_India_Ranking_Change": "排名变化",
"hs_Rank_India_ChaName_Cn": "姓名",
"hs_Rank_India_ComName_Cn": "企业",
"hs_Rank_India_Industry_Cn": "行业",
},
inplace=True,
)
temp_df = temp_df[
[
"排名",
"财富",
"姓名",
"企业",
"行业",
]
]
elif indicator == "胡润全球独角兽榜":
temp_df.rename(
columns={
"hs_Rank_Unicorn_Ranking": "排名",
"hs_Rank_Unicorn_Wealth": "财富",
"hs_Rank_Unicorn_Ranking_Change": "排名变化",
"hs_Rank_Unicorn_ChaName_Cn": "姓名",
"hs_Rank_Unicorn_ComName_Cn": "企业",
"hs_Rank_Unicorn_Industry_Cn": "行业",
},
inplace=True,
)
temp_df = temp_df[
[
"排名",
"财富",
"姓名",
"企业",
"行业",
]
]
elif indicator == "中国瞪羚企业榜":
temp_df.rename(
columns={
"hs_Rank_CGazelles_ComHeadquarters_Cn": "企业总部",
"hs_Rank_CGazelles_Name_Cn": "掌门人/联合创始人",
"hs_Rank_CGazelles_ComName_Cn": "企业信息",
"hs_Rank_CGazelles_Industry_Cn": "行业",
},
inplace=True,
)
temp_df = temp_df[
[
"企业信息",
"掌门人/联合创始人",
"企业总部",
"行业",
]
]
elif indicator == "全球瞪羚企业榜":
temp_df.rename(
columns={
"hs_Rank_GGazelles_ComHeadquarters_Cn": "企业总部",
"hs_Rank_GGazelles_Name_Cn": "掌门人/联合创始人",
"hs_Rank_GGazelles_ComName_Cn": "企业信息",
"hs_Rank_GGazelles_Industry_Cn": "行业",
},
inplace=True,
)
temp_df = temp_df[
[
"企业信息",
"掌门人/联合创始人",
"企业总部",
"行业",
]
]
elif indicator == "胡润Under30s创业领袖榜":
temp_df.rename(
columns={
"hs_Rank_U30_ComHeadquarters_Cn": "企业总部",
"hs_Rank_U30_ChaName_Cn": "姓名",
"hs_Rank_U30_ComName_Cn": "企业信息",
"hs_Rank_U30_Industry_Cn": "行业",
},
inplace=True,
)
temp_df = temp_df[
[
"姓名",
"企业信息",
"企业总部",
"行业",
]
]
elif indicator == "胡润中国500强民营企业":
temp_df.rename(
columns={
"hs_Rank_CTop500_Ranking": "排名",
"hs_Rank_CTop500_Wealth": "企业估值",
"hs_Rank_CTop500_Ranking_Change": "排名变化",
"hs_Rank_CTop500_ChaName_Cn": "CEO",
"hs_Rank_CTop500_ComName_Cn": "企业信息",
"hs_Rank_CTop500_Industry_Cn": "行业",
},
inplace=True,
)
temp_df = temp_df[
[
"排名",
"排名变化",
"企业估值",
"企业信息",
"CEO",
"行业",
]
]
elif indicator == "胡润世界500强":
temp_df.rename(
columns={
"hs_Rank_GTop500_Ranking": "排名",
"hs_Rank_GTop500_Wealth": "企业估值",
"hs_Rank_GTop500_Ranking_Change": "排名变化",
"hs_Rank_GTop500_ChaName_Cn": "CEO",
"hs_Rank_GTop500_ComName_Cn": "企业信息",
"hs_Rank_GTop500_Industry_Cn": "行业",
},
inplace=True,
)
temp_df = temp_df[
[
"排名",
"排名变化",
"企业估值",
"企业信息",
"CEO",
"行业",
]
]
elif indicator == "胡润艺术榜":
temp_df.rename(
columns={
"hs_Rank_Art_Ranking": "排名",
"hs_Rank_Art_Turnover": "成交额",
"hs_Rank_Art_Ranking_Change": "排名变化",
"hs_Rank_Art_Name_Cn": "姓名",
"hs_Rank_Art_Age": "年龄",
"hs_Rank_Art_ArtCategory_Cn": "艺术类别",
},
inplace=True,
)
temp_df = temp_df[
[
"排名",
"排名变化",
"成交额",
"姓名",
"年龄",
"艺术类别",
]
]
return temp_df
|
https://github.com/akfamily/akshare/blob/087025d8d6f799b30ca114013e82c1ad22dc9294/project25/akshare/fortune/fortune_hurun.py#L14-L310
| 25 |
[
0,
1,
2,
3,
4,
5,
6,
7,
8,
9,
10
] | 3.703704 |
[
11,
12,
13,
14,
15,
16,
17,
18,
19,
20,
21,
23,
24,
25,
26,
32,
38,
39,
45,
46,
47,
48,
49,
50,
51,
52,
58,
59,
60,
61,
62,
63,
64,
65,
66,
67,
78,
87,
88,
89,
90,
91,
92,
93,
104,
113,
114,
125,
134,
135,
146,
155,
156,
167,
176,
177,
186,
194,
195,
204,
212,
213,
222,
230,
231,
242,
252,
253,
264,
274,
275,
286,
296
] | 24.579125 | false | 7 | 297 | 20 | 75.420875 | 8 |
def hurun_rank(indicator: str = "胡润百富榜", year: str = "2018") -> pd.DataFrame:
url = "https://www.hurun.net/zh-CN/Rank/HsRankDetails?pagetype=rich"
r = requests.get(url)
soup = BeautifulSoup(r.text, "lxml")
url_list = []
for item in soup.find_all("div", attrs={"aria-labelledby": "dropdownMenuLink1"}):
for inner_item in item.find_all("a"):
url_list.append("https://www.hurun.net" + inner_item["href"])
name_list = []
for item in soup.find_all("div", attrs={"aria-labelledby": "dropdownMenuLink1"}):
for inner_item in item.find_all("a"):
name_list.append(inner_item.text)
name_url_map = dict(zip(name_list, url_list))
r = requests.get(name_url_map[indicator])
soup = BeautifulSoup(r.text, "lxml")
code_list = [
item["value"].split("=")[2]
for item in soup.find(attrs={"id": "exampleFormControlSelect1"}).find_all(
"option"
)
]
year_list = [
item.text.split(" ")[0]
for item in soup.find(attrs={"id": "exampleFormControlSelect1"}).find_all(
"option"
)
]
year_code_map = dict(zip(year_list, code_list))
params = {
"num": year_code_map[year],
"search": "",
"offset": "0",
"limit": "20000",
}
if year == "2018":
warnings.warn("正在下载中")
offset = 0
limit = 20
big_df = pd.DataFrame()
while offset < 2200:
try:
params.update(
{
"offset": offset,
"limit": limit,
}
)
url = "https://www.hurun.net/zh-CN/Rank/HsRankDetailsList"
r = requests.get(url, params=params)
data_json = r.json()
temp_df = pd.DataFrame(data_json["rows"])
offset = offset + 20
big_df = pd.concat([big_df, temp_df], ignore_index=True)
except requests.exceptions.JSONDecodeError as e:
offset = offset + 40
continue
big_df.rename(
columns={
"hs_Rank_Rich_Ranking": "排名",
"hs_Rank_Rich_Wealth": "财富",
"hs_Rank_Rich_Ranking_Change": "排名变化",
"hs_Rank_Rich_ChaName_Cn": "姓名",
"hs_Rank_Rich_ComName_Cn": "企业",
"hs_Rank_Rich_Industry_Cn": "行业",
},
inplace=True,
)
big_df = big_df[
[
"排名",
"财富",
"姓名",
"企业",
"行业",
]
]
return big_df
url = "https://www.hurun.net/zh-CN/Rank/HsRankDetailsList"
r = requests.get(url, params=params)
data_json = r.json()
temp_df = pd.DataFrame(data_json["rows"])
if indicator == "胡润百富榜":
temp_df.rename(
columns={
"hs_Rank_Rich_Ranking": "排名",
"hs_Rank_Rich_Wealth": "财富",
"hs_Rank_Rich_Ranking_Change": "排名变化",
"hs_Rank_Rich_ChaName_Cn": "姓名",
"hs_Rank_Rich_ComName_Cn": "企业",
"hs_Rank_Rich_Industry_Cn": "行业",
},
inplace=True,
)
temp_df = temp_df[
[
"排名",
"财富",
"姓名",
"企业",
"行业",
]
]
elif indicator == "胡润全球富豪榜":
temp_df.rename(
columns={
"hs_Rank_Global_Ranking": "排名",
"hs_Rank_Global_Wealth": "财富",
"hs_Rank_Global_Ranking_Change": "排名变化",
"hs_Rank_Global_ChaName_Cn": "姓名",
"hs_Rank_Global_ComName_Cn": "企业",
"hs_Rank_Global_Industry_Cn": "行业",
},
inplace=True,
)
temp_df = temp_df[
[
"排名",
"财富",
"姓名",
"企业",
"行业",
]
]
elif indicator == "胡润印度榜":
temp_df.rename(
columns={
"hs_Rank_India_Ranking": "排名",
"hs_Rank_India_Wealth": "财富",
"hs_Rank_India_Ranking_Change": "排名变化",
"hs_Rank_India_ChaName_Cn": "姓名",
"hs_Rank_India_ComName_Cn": "企业",
"hs_Rank_India_Industry_Cn": "行业",
},
inplace=True,
)
temp_df = temp_df[
[
"排名",
"财富",
"姓名",
"企业",
"行业",
]
]
elif indicator == "胡润全球独角兽榜":
temp_df.rename(
columns={
"hs_Rank_Unicorn_Ranking": "排名",
"hs_Rank_Unicorn_Wealth": "财富",
"hs_Rank_Unicorn_Ranking_Change": "排名变化",
"hs_Rank_Unicorn_ChaName_Cn": "姓名",
"hs_Rank_Unicorn_ComName_Cn": "企业",
"hs_Rank_Unicorn_Industry_Cn": "行业",
},
inplace=True,
)
temp_df = temp_df[
[
"排名",
"财富",
"姓名",
"企业",
"行业",
]
]
elif indicator == "中国瞪羚企业榜":
temp_df.rename(
columns={
"hs_Rank_CGazelles_ComHeadquarters_Cn": "企业总部",
"hs_Rank_CGazelles_Name_Cn": "掌门人/联合创始人",
"hs_Rank_CGazelles_ComName_Cn": "企业信息",
"hs_Rank_CGazelles_Industry_Cn": "行业",
},
inplace=True,
)
temp_df = temp_df[
[
"企业信息",
"掌门人/联合创始人",
"企业总部",
"行业",
]
]
elif indicator == "全球瞪羚企业榜":
temp_df.rename(
columns={
"hs_Rank_GGazelles_ComHeadquarters_Cn": "企业总部",
"hs_Rank_GGazelles_Name_Cn": "掌门人/联合创始人",
"hs_Rank_GGazelles_ComName_Cn": "企业信息",
"hs_Rank_GGazelles_Industry_Cn": "行业",
},
inplace=True,
)
temp_df = temp_df[
[
"企业信息",
"掌门人/联合创始人",
"企业总部",
"行业",
]
]
elif indicator == "胡润Under30s创业领袖榜":
temp_df.rename(
columns={
"hs_Rank_U30_ComHeadquarters_Cn": "企业总部",
"hs_Rank_U30_ChaName_Cn": "姓名",
"hs_Rank_U30_ComName_Cn": "企业信息",
"hs_Rank_U30_Industry_Cn": "行业",
},
inplace=True,
)
temp_df = temp_df[
[
"姓名",
"企业信息",
"企业总部",
"行业",
]
]
elif indicator == "胡润中国500强民营企业":
temp_df.rename(
columns={
"hs_Rank_CTop500_Ranking": "排名",
"hs_Rank_CTop500_Wealth": "企业估值",
"hs_Rank_CTop500_Ranking_Change": "排名变化",
"hs_Rank_CTop500_ChaName_Cn": "CEO",
"hs_Rank_CTop500_ComName_Cn": "企业信息",
"hs_Rank_CTop500_Industry_Cn": "行业",
},
inplace=True,
)
temp_df = temp_df[
[
"排名",
"排名变化",
"企业估值",
"企业信息",
"CEO",
"行业",
]
]
elif indicator == "胡润世界500强":
temp_df.rename(
columns={
"hs_Rank_GTop500_Ranking": "排名",
"hs_Rank_GTop500_Wealth": "企业估值",
"hs_Rank_GTop500_Ranking_Change": "排名变化",
"hs_Rank_GTop500_ChaName_Cn": "CEO",
"hs_Rank_GTop500_ComName_Cn": "企业信息",
"hs_Rank_GTop500_Industry_Cn": "行业",
},
inplace=True,
)
temp_df = temp_df[
[
"排名",
"排名变化",
"企业估值",
"企业信息",
"CEO",
"行业",
]
]
elif indicator == "胡润艺术榜":
temp_df.rename(
columns={
"hs_Rank_Art_Ranking": "排名",
"hs_Rank_Art_Turnover": "成交额",
"hs_Rank_Art_Ranking_Change": "排名变化",
"hs_Rank_Art_Name_Cn": "姓名",
"hs_Rank_Art_Age": "年龄",
"hs_Rank_Art_ArtCategory_Cn": "艺术类别",
},
inplace=True,
)
temp_df = temp_df[
[
"排名",
"排名变化",
"成交额",
"姓名",
"年龄",
"艺术类别",
]
]
return temp_df
| 18,577 |
akfamily/akshare
|
087025d8d6f799b30ca114013e82c1ad22dc9294
|
akshare/fortune/fortune_forbes_500.py
|
forbes_rank
|
(symbol: str = "2021福布斯中国创投人100") -> pd.DataFrame
|
return temp_df
|
福布斯中国-榜单
https://www.forbeschina.com/lists
https://www.forbeschina.com/lists/1750
:param symbol: choice of {"2020福布斯美国富豪榜", "2020福布斯新加坡富豪榜", "2020福布斯中国名人榜", *}
:type symbol: str
:return: 具体指标的榜单
:rtype: pandas.DataFrame
|
福布斯中国-榜单
https://www.forbeschina.com/lists
https://www.forbeschina.com/lists/1750
:param symbol: choice of {"2020福布斯美国富豪榜", "2020福布斯新加坡富豪榜", "2020福布斯中国名人榜", *}
:type symbol: str
:return: 具体指标的榜单
:rtype: pandas.DataFrame
| 13 | 44 |
def forbes_rank(symbol: str = "2021福布斯中国创投人100") -> pd.DataFrame:
"""
福布斯中国-榜单
https://www.forbeschina.com/lists
https://www.forbeschina.com/lists/1750
:param symbol: choice of {"2020福布斯美国富豪榜", "2020福布斯新加坡富豪榜", "2020福布斯中国名人榜", *}
:type symbol: str
:return: 具体指标的榜单
:rtype: pandas.DataFrame
"""
url = "https://www.forbeschina.com/lists"
r = requests.get(url, verify=False)
soup = BeautifulSoup(r.text, "lxml")
need_list = [
item.find_all("a")
for item in soup.find_all("div", attrs={"class": "col-sm-4"})
]
all_list = []
for item in need_list:
all_list.extend(item)
name_url_dict = dict(
zip(
[item.text.strip() for item in all_list],
[
"https://www.forbeschina.com" + item["href"]
for item in all_list
],
)
)
r = requests.get(name_url_dict[symbol], verify=False)
temp_df = pd.read_html(r.text)[0]
return temp_df
|
https://github.com/akfamily/akshare/blob/087025d8d6f799b30ca114013e82c1ad22dc9294/project25/akshare/fortune/fortune_forbes_500.py#L13-L44
| 25 |
[
0,
1,
2,
3,
4,
5,
6,
7,
8,
9
] | 31.25 |
[
10,
11,
12,
13,
17,
18,
19,
20,
29,
30,
31
] | 34.375 | false | 31.578947 | 32 | 5 | 65.625 | 7 |
def forbes_rank(symbol: str = "2021福布斯中国创投人100") -> pd.DataFrame:
url = "https://www.forbeschina.com/lists"
r = requests.get(url, verify=False)
soup = BeautifulSoup(r.text, "lxml")
need_list = [
item.find_all("a")
for item in soup.find_all("div", attrs={"class": "col-sm-4"})
]
all_list = []
for item in need_list:
all_list.extend(item)
name_url_dict = dict(
zip(
[item.text.strip() for item in all_list],
[
"https://www.forbeschina.com" + item["href"]
for item in all_list
],
)
)
r = requests.get(name_url_dict[symbol], verify=False)
temp_df = pd.read_html(r.text)[0]
return temp_df
| 18,578 |
akfamily/akshare
|
087025d8d6f799b30ca114013e82c1ad22dc9294
|
akshare/fortune/fortune_bloomberg.py
|
index_bloomberg_billionaires_hist
|
(year: str = "2021")
|
return temp_df
|
Bloomberg Billionaires Index
https://stats.areppim.com/stats/links_billionairexlists.htm
:param year: choice of {"2021", "2019", "2018", ...}
:type year: str
:return: 彭博亿万富豪指数历史数据
:rtype: pandas.DataFrame
|
Bloomberg Billionaires Index
https://stats.areppim.com/stats/links_billionairexlists.htm
:param year: choice of {"2021", "2019", "2018", ...}
:type year: str
:return: 彭博亿万富豪指数历史数据
:rtype: pandas.DataFrame
| 13 | 61 |
def index_bloomberg_billionaires_hist(year: str = "2021") -> pd.DataFrame:
"""
Bloomberg Billionaires Index
https://stats.areppim.com/stats/links_billionairexlists.htm
:param year: choice of {"2021", "2019", "2018", ...}
:type year: str
:return: 彭博亿万富豪指数历史数据
:rtype: pandas.DataFrame
"""
url = f"https://stats.areppim.com/listes/list_billionairesx{year[-2:]}xwor.htm"
r = requests.get(url)
soup = BeautifulSoup(r.text, "lxml")
trs = soup.findAll("table")[0].findAll("tr")
heads = trs[1]
if "Rank" not in heads.text:
heads = trs[0]
dic_keys = []
dic = {}
for head in heads:
head = head.text
dic_keys.append(head)
for dic_key in dic_keys:
dic[dic_key] = []
for l in trs:
item = l.findAll("td")
for i in range(len(item)):
v = item[i].text
if i == 0 and not v.isdigit():
break
dic[dic_keys[i]].append(v)
temp_df = pd.DataFrame(dic)
temp_df = temp_df.rename(
{
"Rank": "rank",
"Name": "name",
"Age": "age",
"Citizenship": "country",
"Country": "country",
"Net Worth(bil US$)": "total_net_worth",
"Total net worth$Billion": "total_net_worth",
"$ Last change": "last_change",
"$ YTD change": "ytd_change",
"Industry": "industry",
},
axis=1,
)
return temp_df
|
https://github.com/akfamily/akshare/blob/087025d8d6f799b30ca114013e82c1ad22dc9294/project25/akshare/fortune/fortune_bloomberg.py#L13-L61
| 25 |
[
0,
1,
2,
3,
4,
5,
6,
7,
8
] | 18.367347 |
[
9,
10,
11,
12,
13,
14,
15,
16,
17,
18,
19,
20,
21,
22,
24,
25,
26,
27,
28,
29,
30,
32,
33,
48
] | 48.979592 | false | 14.583333 | 49 | 8 | 51.020408 | 6 |
def index_bloomberg_billionaires_hist(year: str = "2021") -> pd.DataFrame:
url = f"https://stats.areppim.com/listes/list_billionairesx{year[-2:]}xwor.htm"
r = requests.get(url)
soup = BeautifulSoup(r.text, "lxml")
trs = soup.findAll("table")[0].findAll("tr")
heads = trs[1]
if "Rank" not in heads.text:
heads = trs[0]
dic_keys = []
dic = {}
for head in heads:
head = head.text
dic_keys.append(head)
for dic_key in dic_keys:
dic[dic_key] = []
for l in trs:
item = l.findAll("td")
for i in range(len(item)):
v = item[i].text
if i == 0 and not v.isdigit():
break
dic[dic_keys[i]].append(v)
temp_df = pd.DataFrame(dic)
temp_df = temp_df.rename(
{
"Rank": "rank",
"Name": "name",
"Age": "age",
"Citizenship": "country",
"Country": "country",
"Net Worth(bil US$)": "total_net_worth",
"Total net worth$Billion": "total_net_worth",
"$ Last change": "last_change",
"$ YTD change": "ytd_change",
"Industry": "industry",
},
axis=1,
)
return temp_df
| 18,579 |
akfamily/akshare
|
087025d8d6f799b30ca114013e82c1ad22dc9294
|
akshare/fortune/fortune_bloomberg.py
|
index_bloomberg_billionaires
|
()
|
return temp_df
|
Bloomberg Billionaires Index
https://www.bloomberg.com/billionaires/
:return: 彭博亿万富豪指数
:rtype: pandas.DataFrame
|
Bloomberg Billionaires Index
https://www.bloomberg.com/billionaires/
:return: 彭博亿万富豪指数
:rtype: pandas.DataFrame
| 64 | 106 |
def index_bloomberg_billionaires() -> pd.DataFrame:
"""
Bloomberg Billionaires Index
https://www.bloomberg.com/billionaires/
:return: 彭博亿万富豪指数
:rtype: pandas.DataFrame
"""
url = "https://www.bloomberg.com/billionaires"
headers = {
"accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9",
"accept-encoding": "gzip, deflate, br",
"accept-language": "zh-CN,zh;q=0.9,en;q=0.8",
"cache-control": "no-cache",
"pragma": "no-cache",
"sec-fetch-dest": "document",
"sec-fetch-mode": "navigate",
"sec-fetch-site": "same-origin",
"sec-fetch-user": "?1",
"upgrade-insecure-requests": "1",
"referer": "https://www.bloomberg.com/",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/83.0.4103.116 Safari/537.36",
}
r = requests.get(url, headers=headers)
soup = BeautifulSoup(r.text, "lxml")
big_content_list = list()
soup_node = soup.find(attrs={"class": "table-chart"}).find_all(
attrs={"class": "table-row"}
)
for row in soup_node:
temp_content_list = row.text.strip().replace("\n", "").split(" ")
content_list = [item for item in temp_content_list if item != ""]
big_content_list.append(content_list)
temp_df = pd.DataFrame(big_content_list)
temp_df.columns = [
"rank",
"name",
"total_net_worth",
"last_change",
"YTD_change",
"country",
"industry",
]
return temp_df
|
https://github.com/akfamily/akshare/blob/087025d8d6f799b30ca114013e82c1ad22dc9294/project25/akshare/fortune/fortune_bloomberg.py#L64-L106
| 25 |
[
0,
1,
2,
3,
4,
5,
6
] | 16.27907 |
[
7,
8,
22,
23,
24,
25,
28,
29,
30,
31,
32,
33,
42
] | 30.232558 | false | 14.583333 | 43 | 3 | 69.767442 | 4 |
def index_bloomberg_billionaires() -> pd.DataFrame:
url = "https://www.bloomberg.com/billionaires"
headers = {
"accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9",
"accept-encoding": "gzip, deflate, br",
"accept-language": "zh-CN,zh;q=0.9,en;q=0.8",
"cache-control": "no-cache",
"pragma": "no-cache",
"sec-fetch-dest": "document",
"sec-fetch-mode": "navigate",
"sec-fetch-site": "same-origin",
"sec-fetch-user": "?1",
"upgrade-insecure-requests": "1",
"referer": "https://www.bloomberg.com/",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/83.0.4103.116 Safari/537.36",
}
r = requests.get(url, headers=headers)
soup = BeautifulSoup(r.text, "lxml")
big_content_list = list()
soup_node = soup.find(attrs={"class": "table-chart"}).find_all(
attrs={"class": "table-row"}
)
for row in soup_node:
temp_content_list = row.text.strip().replace("\n", "").split(" ")
content_list = [item for item in temp_content_list if item != ""]
big_content_list.append(content_list)
temp_df = pd.DataFrame(big_content_list)
temp_df.columns = [
"rank",
"name",
"total_net_worth",
"last_change",
"YTD_change",
"country",
"industry",
]
return temp_df
| 18,580 |
akfamily/akshare
|
087025d8d6f799b30ca114013e82c1ad22dc9294
|
akshare/fortune/fortune_xincaifu_500.py
|
xincaifu_rank
|
(year: str = "2022")
|
return temp_df
|
新财富 500 人富豪榜
http://www.xcf.cn/zhuanti/ztzz/hdzt1/500frb/index.html
:param year: 具体排名年份, 数据从 2003-至今
:type year: str
:return: 排行榜
:rtype: pandas.DataFrame
|
新财富 500 人富豪榜
http://www.xcf.cn/zhuanti/ztzz/hdzt1/500frb/index.html
:param year: 具体排名年份, 数据从 2003-至今
:type year: str
:return: 排行榜
:rtype: pandas.DataFrame
| 14 | 68 |
def xincaifu_rank(year: str = "2022") -> pd.DataFrame:
"""
新财富 500 人富豪榜
http://www.xcf.cn/zhuanti/ztzz/hdzt1/500frb/index.html
:param year: 具体排名年份, 数据从 2003-至今
:type year: str
:return: 排行榜
:rtype: pandas.DataFrame
"""
url = "http://service.ikuyu.cn/XinCaiFu2/pcremoting/bdListAction.do"
params = {
"method": "getPage",
"callback": "jsonpCallback",
"sortBy": "",
"order": "",
"type": "4",
"keyword": "",
"pageSize": "1000",
"year": year,
"pageNo": "1",
"from": "jsonp",
"_": "1604722171732",
}
r = requests.get(url, params=params)
data_text = r.text
data_json = json.loads(data_text[data_text.find("{") : -1])
temp_df = pd.DataFrame(data_json["data"]["rows"])
temp_df.columns
temp_df.rename(columns={
'assets': "财富",
'year': "年份",
'sex': "性别",
'name': "姓名",
'rank': "排名",
'company': "主要公司",
'industry': "相关行业",
'id': "-",
'addr': "公司总部",
'rankLst': "-",
'age': "年龄",
}, inplace=True)
temp_df = temp_df[
[
"排名",
"财富",
"姓名",
"主要公司",
"相关行业",
"公司总部",
"性别",
"年龄",
"年份",
]
]
return temp_df
|
https://github.com/akfamily/akshare/blob/087025d8d6f799b30ca114013e82c1ad22dc9294/project25/akshare/fortune/fortune_xincaifu_500.py#L14-L68
| 25 |
[
0,
1,
2,
3,
4,
5,
6,
7,
8
] | 16.363636 |
[
9,
10,
23,
24,
25,
26,
27,
28,
41,
54
] | 18.181818 | false | 33.333333 | 55 | 1 | 81.818182 | 6 |
def xincaifu_rank(year: str = "2022") -> pd.DataFrame:
url = "http://service.ikuyu.cn/XinCaiFu2/pcremoting/bdListAction.do"
params = {
"method": "getPage",
"callback": "jsonpCallback",
"sortBy": "",
"order": "",
"type": "4",
"keyword": "",
"pageSize": "1000",
"year": year,
"pageNo": "1",
"from": "jsonp",
"_": "1604722171732",
}
r = requests.get(url, params=params)
data_text = r.text
data_json = json.loads(data_text[data_text.find("{") : -1])
temp_df = pd.DataFrame(data_json["data"]["rows"])
temp_df.columns
temp_df.rename(columns={
'assets': "财富",
'year': "年份",
'sex': "性别",
'name': "姓名",
'rank': "排名",
'company': "主要公司",
'industry': "相关行业",
'id': "-",
'addr': "公司总部",
'rankLst': "-",
'age': "年龄",
}, inplace=True)
temp_df = temp_df[
[
"排名",
"财富",
"姓名",
"主要公司",
"相关行业",
"公司总部",
"性别",
"年龄",
"年份",
]
]
return temp_df
| 18,581 |
akfamily/akshare
|
087025d8d6f799b30ca114013e82c1ad22dc9294
|
akshare/fortune/fortune_500.py
|
_fortune_rank_year_url_map
|
()
|
return year_url_map
|
年份和网址映射
https://www.fortunechina.com/fortune500/index.htm
:return: 年份和网址映射
:rtype: dict
|
年份和网址映射
https://www.fortunechina.com/fortune500/index.htm
:return: 年份和网址映射
:rtype: dict
| 20 | 34 |
def _fortune_rank_year_url_map() -> dict:
"""
年份和网址映射
https://www.fortunechina.com/fortune500/index.htm
:return: 年份和网址映射
:rtype: dict
"""
url = "http://www.fortunechina.com/fortune500/index.htm"
r = requests.get(url)
soup = BeautifulSoup(r.text, "lxml")
node_list = soup.find_all('div', attrs={"class": "swiper-slide"})
url_list = [item.find("a")['href'] for item in node_list]
year_list = [item.find("a").text for item in node_list]
year_url_map = dict(zip(year_list, url_list))
return year_url_map
|
https://github.com/akfamily/akshare/blob/087025d8d6f799b30ca114013e82c1ad22dc9294/project25/akshare/fortune/fortune_500.py#L20-L34
| 25 |
[
0,
1,
2,
3,
4,
5,
6
] | 46.666667 |
[
7,
8,
9,
10,
11,
12,
13,
14
] | 53.333333 | false | 16.438356 | 15 | 3 | 46.666667 | 4 |
def _fortune_rank_year_url_map() -> dict:
url = "http://www.fortunechina.com/fortune500/index.htm"
r = requests.get(url)
soup = BeautifulSoup(r.text, "lxml")
node_list = soup.find_all('div', attrs={"class": "swiper-slide"})
url_list = [item.find("a")['href'] for item in node_list]
year_list = [item.find("a").text for item in node_list]
year_url_map = dict(zip(year_list, url_list))
return year_url_map
| 18,582 |
akfamily/akshare
|
087025d8d6f799b30ca114013e82c1ad22dc9294
|
akshare/fortune/fortune_500.py
|
fortune_rank
|
(year: str = "2015")
|
财富 500 强公司从 1996 年开始的排行榜
https://www.fortunechina.com/fortune500/index.htm
:param year: str 年份
:return: pandas.DataFrame
|
财富 500 强公司从 1996 年开始的排行榜
https://www.fortunechina.com/fortune500/index.htm
:param year: str 年份
:return: pandas.DataFrame
| 37 | 65 |
def fortune_rank(year: str = "2015") -> pd.DataFrame:
"""
财富 500 强公司从 1996 年开始的排行榜
https://www.fortunechina.com/fortune500/index.htm
:param year: str 年份
:return: pandas.DataFrame
"""
year_url_map = _fortune_rank_year_url_map()
url = year_url_map[year]
r = requests.get(url)
r.encoding = "utf-8"
if int(year) < 2007:
df = pd.read_html(r.text)[0].iloc[1:-1, ]
df.columns = pd.read_html(r.text)[0].iloc[0, :].tolist()
return df
elif 2006 < int(year) < 2010:
df = pd.read_html(r.text)[0].iloc[1:, ]
df.columns = pd.read_html(r.text)[0].iloc[0, :].tolist()
for page in tqdm(range(2, 11), leave=False):
# page =2
r = requests.get(url.rsplit(".", maxsplit=1)[0] + "_" + str(page) + ".htm")
r.encoding = "utf-8"
temp_df = pd.read_html(r.text)[0].iloc[1:, ]
temp_df.columns = pd.read_html(r.text)[0].iloc[0, :].tolist()
df = pd.concat([df, temp_df], ignore_index=True)
return df
else:
df = pd.read_html(r.text)[0]
return df
|
https://github.com/akfamily/akshare/blob/087025d8d6f799b30ca114013e82c1ad22dc9294/project25/akshare/fortune/fortune_500.py#L37-L65
| 25 |
[
0,
1,
2,
3,
4,
5,
6
] | 24.137931 |
[
7,
8,
9,
10,
11,
12,
13,
14,
15,
16,
17,
18,
20,
21,
22,
23,
24,
25,
27,
28
] | 68.965517 | false | 16.438356 | 29 | 4 | 31.034483 | 4 |
def fortune_rank(year: str = "2015") -> pd.DataFrame:
year_url_map = _fortune_rank_year_url_map()
url = year_url_map[year]
r = requests.get(url)
r.encoding = "utf-8"
if int(year) < 2007:
df = pd.read_html(r.text)[0].iloc[1:-1, ]
df.columns = pd.read_html(r.text)[0].iloc[0, :].tolist()
return df
elif 2006 < int(year) < 2010:
df = pd.read_html(r.text)[0].iloc[1:, ]
df.columns = pd.read_html(r.text)[0].iloc[0, :].tolist()
for page in tqdm(range(2, 11), leave=False):
# page =2
r = requests.get(url.rsplit(".", maxsplit=1)[0] + "_" + str(page) + ".htm")
r.encoding = "utf-8"
temp_df = pd.read_html(r.text)[0].iloc[1:, ]
temp_df.columns = pd.read_html(r.text)[0].iloc[0, :].tolist()
df = pd.concat([df, temp_df], ignore_index=True)
return df
else:
df = pd.read_html(r.text)[0]
return df
| 18,583 |
|
akfamily/akshare
|
087025d8d6f799b30ca114013e82c1ad22dc9294
|
akshare/fortune/fortune_500.py
|
fortune_rank_eng
|
(year: str = "1995")
|
return big_df
|
注意你的网速
https://fortune.com/global500/
https://fortune.com/global500/2012/search/
:param year: "1995"
:type year: str
:return: 历年排名
:rtype: pandas.DataFrame
|
注意你的网速
https://fortune.com/global500/
https://fortune.com/global500/2012/search/
:param year: "1995"
:type year: str
:return: 历年排名
:rtype: pandas.DataFrame
| 68 | 95 |
def fortune_rank_eng(year: str = "1995") -> pd.DataFrame:
"""
注意你的网速
https://fortune.com/global500/
https://fortune.com/global500/2012/search/
:param year: "1995"
:type year: str
:return: 历年排名
:rtype: pandas.DataFrame
"""
url = f"https://fortune.com/global500/{year}/search/"
res = requests.get(url)
soup = BeautifulSoup(res.text, "lxml")
code = json.loads(soup.find("script", attrs={"type": "application/ld+json"}).string)["identifier"]
url = f"https://content.fortune.com/wp-json/irving/v1/data/franchise-search-results"
params = {
"list_id": code,
"token": "Zm9ydHVuZTpCcHNyZmtNZCN5SndjWkkhNHFqMndEOTM=",
}
res = requests.get(url, params=params)
big_df = pd.DataFrame()
for i in range(len(res.json()[1]["items"][0]['fields'])):
temp_df = pd.DataFrame([item["fields"][i] for item in res.json()[1]["items"]])
big_df[temp_df["key"].values[0]] = temp_df["value"]
big_df["rank"] = big_df["rank"].astype(int)
big_df.sort_values("rank", inplace=True)
big_df.reset_index(drop=True, inplace=True)
return big_df
|
https://github.com/akfamily/akshare/blob/087025d8d6f799b30ca114013e82c1ad22dc9294/project25/akshare/fortune/fortune_500.py#L68-L95
| 25 |
[
0,
1,
2,
3,
4,
5,
6,
7,
8,
9
] | 35.714286 |
[
10,
11,
12,
13,
14,
15,
19,
20,
21,
22,
23,
24,
25,
26,
27
] | 53.571429 | false | 16.438356 | 28 | 3 | 46.428571 | 7 |
def fortune_rank_eng(year: str = "1995") -> pd.DataFrame:
url = f"https://fortune.com/global500/{year}/search/"
res = requests.get(url)
soup = BeautifulSoup(res.text, "lxml")
code = json.loads(soup.find("script", attrs={"type": "application/ld+json"}).string)["identifier"]
url = f"https://content.fortune.com/wp-json/irving/v1/data/franchise-search-results"
params = {
"list_id": code,
"token": "Zm9ydHVuZTpCcHNyZmtNZCN5SndjWkkhNHFqMndEOTM=",
}
res = requests.get(url, params=params)
big_df = pd.DataFrame()
for i in range(len(res.json()[1]["items"][0]['fields'])):
temp_df = pd.DataFrame([item["fields"][i] for item in res.json()[1]["items"]])
big_df[temp_df["key"].values[0]] = temp_df["value"]
big_df["rank"] = big_df["rank"].astype(int)
big_df.sort_values("rank", inplace=True)
big_df.reset_index(drop=True, inplace=True)
return big_df
| 18,584 |
akfamily/akshare
|
087025d8d6f799b30ca114013e82c1ad22dc9294
|
akshare/fortune/fortune_it_juzi.py
|
death_company
|
()
|
return temp_df
|
此数据未更新
IT桔子-死亡公司名单
https://www.itjuzi.com/deathCompany
:return: 死亡公司名单
:rtype: pandas.DataFrame
|
此数据未更新
IT桔子-死亡公司名单
https://www.itjuzi.com/deathCompany
:return: 死亡公司名单
:rtype: pandas.DataFrame
| 13 | 35 |
def death_company() -> pd.DataFrame:
"""
此数据未更新
IT桔子-死亡公司名单
https://www.itjuzi.com/deathCompany
:return: 死亡公司名单
:rtype: pandas.DataFrame
"""
temp_df = pd.read_csv(
"https://jfds-1252952517.cos.ap-chengdu.myqcloud.com/akshare/data/data_juzi/juzi.csv"
)
temp_df.reset_index(inplace=True, drop=True)
temp_df.columns = [
"公司简称",
"成立时间",
"关闭时间",
"存活天数",
"融资规模",
"行业",
"地点",
]
return temp_df
|
https://github.com/akfamily/akshare/blob/087025d8d6f799b30ca114013e82c1ad22dc9294/project25/akshare/fortune/fortune_it_juzi.py#L13-L35
| 25 |
[
0,
1,
2,
3,
4,
5,
6,
7
] | 34.782609 |
[
8,
12,
13,
22
] | 17.391304 | false | 10.714286 | 23 | 1 | 82.608696 | 5 |
def death_company() -> pd.DataFrame:
temp_df = pd.read_csv(
"https://jfds-1252952517.cos.ap-chengdu.myqcloud.com/akshare/data/data_juzi/juzi.csv"
)
temp_df.reset_index(inplace=True, drop=True)
temp_df.columns = [
"公司简称",
"成立时间",
"关闭时间",
"存活天数",
"融资规模",
"行业",
"地点",
]
return temp_df
| 18,585 |
akfamily/akshare
|
087025d8d6f799b30ca114013e82c1ad22dc9294
|
akshare/fortune/fortune_it_juzi.py
|
nicorn_company
|
()
|
return temp_df
|
此数据未更新
IT桔子-独角兽公司
https://www.itjuzi.com/unicorn
:return: 独角兽公司
:rtype: pandas.DataFrame
|
此数据未更新
IT桔子-独角兽公司
https://www.itjuzi.com/unicorn
:return: 独角兽公司
:rtype: pandas.DataFrame
| 38 | 74 |
def nicorn_company() -> pd.DataFrame:
"""
此数据未更新
IT桔子-独角兽公司
https://www.itjuzi.com/unicorn
:return: 独角兽公司
:rtype: pandas.DataFrame
"""
temp_df = pd.read_csv(
"https://jfds-1252952517.cos.ap-chengdu.myqcloud.com/akshare/data/data_juzi/nicorn_company.csv",
index_col=0,
)
temp_df.reset_index(drop=True, inplace=True)
del temp_df["com_id"]
del temp_df["com_logo_archive"]
del temp_df["com_city"]
del temp_df["invse_year"]
del temp_df["invse_month"]
del temp_df["invse_day"]
del temp_df["invse_guess_particulars"]
del temp_df["invse_detail_money"]
del temp_df["invse_currency_id"]
del temp_df["invse_similar_money_id"]
del temp_df["invse_round_id"]
del temp_df["money"]
del temp_df["invse_money"]
del temp_df["round"]
temp_df.reset_index(inplace=True)
temp_df["index"] = range(1, len(temp_df) + 1)
temp_df.columns = [
"序号",
"公司",
"地区",
"行业",
"子行业",
]
return temp_df
|
https://github.com/akfamily/akshare/blob/087025d8d6f799b30ca114013e82c1ad22dc9294/project25/akshare/fortune/fortune_it_juzi.py#L38-L74
| 25 |
[
0,
1,
2,
3,
4,
5,
6,
7
] | 21.621622 |
[
8,
12,
13,
14,
15,
16,
17,
18,
19,
20,
21,
22,
23,
24,
25,
26,
27,
28,
29,
36
] | 54.054054 | false | 10.714286 | 37 | 1 | 45.945946 | 5 |
def nicorn_company() -> pd.DataFrame:
temp_df = pd.read_csv(
"https://jfds-1252952517.cos.ap-chengdu.myqcloud.com/akshare/data/data_juzi/nicorn_company.csv",
index_col=0,
)
temp_df.reset_index(drop=True, inplace=True)
del temp_df["com_id"]
del temp_df["com_logo_archive"]
del temp_df["com_city"]
del temp_df["invse_year"]
del temp_df["invse_month"]
del temp_df["invse_day"]
del temp_df["invse_guess_particulars"]
del temp_df["invse_detail_money"]
del temp_df["invse_currency_id"]
del temp_df["invse_similar_money_id"]
del temp_df["invse_round_id"]
del temp_df["money"]
del temp_df["invse_money"]
del temp_df["round"]
temp_df.reset_index(inplace=True)
temp_df["index"] = range(1, len(temp_df) + 1)
temp_df.columns = [
"序号",
"公司",
"地区",
"行业",
"子行业",
]
return temp_df
| 18,586 |
akfamily/akshare
|
087025d8d6f799b30ca114013e82c1ad22dc9294
|
akshare/fortune/fortune_it_juzi.py
|
maxima_company
|
()
|
return temp_df
|
此数据未更新
IT桔子-千里马公司
https://www.itjuzi.com/chollima
:return: 千里马公司
:rtype: pandas.DataFrame
|
此数据未更新
IT桔子-千里马公司
https://www.itjuzi.com/chollima
:return: 千里马公司
:rtype: pandas.DataFrame
| 77 | 112 |
def maxima_company() -> pd.DataFrame:
"""
此数据未更新
IT桔子-千里马公司
https://www.itjuzi.com/chollima
:return: 千里马公司
:rtype: pandas.DataFrame
"""
temp_df = pd.read_csv(
"https://jfds-1252952517.cos.ap-chengdu.myqcloud.com/akshare/data/data_juzi/maxima.csv",
index_col=0,
)
temp_df.reset_index(drop=True, inplace=True)
del temp_df["com_id"]
del temp_df["com_logo_archive"]
del temp_df["com_scope_id"]
del temp_df["invse_year"]
del temp_df["invse_month"]
del temp_df["invse_day"]
del temp_df["invse_similar_money_id"]
del temp_df["invse_guess_particulars"]
del temp_df["invse_detail_money"]
del temp_df["invse_currency_id"]
del temp_df["invse_round_id"]
del temp_df["money"]
del temp_df["invse_money"]
del temp_df["round"]
temp_df.reset_index(inplace=True)
temp_df["index"] = range(1, len(temp_df) + 1)
temp_df.columns = [
"序号",
"公司",
"行业",
"地区",
]
return temp_df
|
https://github.com/akfamily/akshare/blob/087025d8d6f799b30ca114013e82c1ad22dc9294/project25/akshare/fortune/fortune_it_juzi.py#L77-L112
| 25 |
[
0,
1,
2,
3,
4,
5,
6,
7
] | 22.222222 |
[
8,
12,
13,
14,
15,
16,
17,
18,
19,
20,
21,
22,
23,
24,
25,
26,
27,
28,
29,
35
] | 55.555556 | false | 10.714286 | 36 | 1 | 44.444444 | 5 |
def maxima_company() -> pd.DataFrame:
temp_df = pd.read_csv(
"https://jfds-1252952517.cos.ap-chengdu.myqcloud.com/akshare/data/data_juzi/maxima.csv",
index_col=0,
)
temp_df.reset_index(drop=True, inplace=True)
del temp_df["com_id"]
del temp_df["com_logo_archive"]
del temp_df["com_scope_id"]
del temp_df["invse_year"]
del temp_df["invse_month"]
del temp_df["invse_day"]
del temp_df["invse_similar_money_id"]
del temp_df["invse_guess_particulars"]
del temp_df["invse_detail_money"]
del temp_df["invse_currency_id"]
del temp_df["invse_round_id"]
del temp_df["money"]
del temp_df["invse_money"]
del temp_df["round"]
temp_df.reset_index(inplace=True)
temp_df["index"] = range(1, len(temp_df) + 1)
temp_df.columns = [
"序号",
"公司",
"行业",
"地区",
]
return temp_df
| 18,587 |
akfamily/akshare
|
087025d8d6f799b30ca114013e82c1ad22dc9294
|
akshare/air/air_zhenqi.py
|
_get_js_path
|
(name: str = None, module_file: str = None)
|
return module_json_path
|
获取 JS 文件的路径(从模块所在目录查找)
:param name: 文件名
:type name: str
:param module_file: 模块路径
:type module_file: str
:return: 路径
:rtype: str
|
获取 JS 文件的路径(从模块所在目录查找)
:param name: 文件名
:type name: str
:param module_file: 模块路径
:type module_file: str
:return: 路径
:rtype: str
| 21 | 35 |
def _get_js_path(name: str = None, module_file: str = None) -> str:
"""
获取 JS 文件的路径(从模块所在目录查找)
:param name: 文件名
:type name: str
:param module_file: 模块路径
:type module_file: str
:return: 路径
:rtype: str
"""
module_folder = os.path.abspath(
os.path.dirname(os.path.dirname(module_file))
)
module_json_path = os.path.join(module_folder, "air", name)
return module_json_path
|
https://github.com/akfamily/akshare/blob/087025d8d6f799b30ca114013e82c1ad22dc9294/project25/akshare/air/air_zhenqi.py#L21-L35
| 25 |
[
0,
1,
2,
3,
4,
5,
6,
7,
8,
9
] | 66.666667 |
[
10,
13,
14
] | 20 | false | 14.285714 | 15 | 1 | 80 | 7 |
def _get_js_path(name: str = None, module_file: str = None) -> str:
module_folder = os.path.abspath(
os.path.dirname(os.path.dirname(module_file))
)
module_json_path = os.path.join(module_folder, "air", name)
return module_json_path
| 18,588 |
akfamily/akshare
|
087025d8d6f799b30ca114013e82c1ad22dc9294
|
akshare/air/air_zhenqi.py
|
_get_file_content
|
(file_name: str = "crypto.js")
|
return file_data
|
获取 JS 文件的内容
:param file_name: JS 文件名
:type file_name: str
:return: 文件内容
:rtype: str
|
获取 JS 文件的内容
:param file_name: JS 文件名
:type file_name: str
:return: 文件内容
:rtype: str
| 38 | 50 |
def _get_file_content(file_name: str = "crypto.js") -> str:
"""
获取 JS 文件的内容
:param file_name: JS 文件名
:type file_name: str
:return: 文件内容
:rtype: str
"""
setting_file_name = file_name
setting_file_path = _get_js_path(setting_file_name, __file__)
with open(setting_file_path) as f:
file_data = f.read()
return file_data
|
https://github.com/akfamily/akshare/blob/087025d8d6f799b30ca114013e82c1ad22dc9294/project25/akshare/air/air_zhenqi.py#L38-L50
| 25 |
[
0,
1,
2,
3,
4,
5,
6,
7
] | 61.538462 |
[
8,
9,
10,
11,
12
] | 38.461538 | false | 14.285714 | 13 | 2 | 61.538462 | 5 |
def _get_file_content(file_name: str = "crypto.js") -> str:
setting_file_name = file_name
setting_file_path = _get_js_path(setting_file_name, __file__)
with open(setting_file_path) as f:
file_data = f.read()
return file_data
| 18,589 |
akfamily/akshare
|
087025d8d6f799b30ca114013e82c1ad22dc9294
|
akshare/air/air_zhenqi.py
|
has_month_data
|
(href)
|
return href and re.compile("monthdata.php").search(href)
|
Deal with href node
:param href: href
:type href: str
:return: href result
:rtype: str
|
Deal with href node
:param href: href
:type href: str
:return: href result
:rtype: str
| 53 | 61 |
def has_month_data(href):
"""
Deal with href node
:param href: href
:type href: str
:return: href result
:rtype: str
"""
return href and re.compile("monthdata.php").search(href)
|
https://github.com/akfamily/akshare/blob/087025d8d6f799b30ca114013e82c1ad22dc9294/project25/akshare/air/air_zhenqi.py#L53-L61
| 25 |
[
0,
1,
2,
3,
4,
5,
6,
7
] | 88.888889 |
[
8
] | 11.111111 | false | 14.285714 | 9 | 2 | 88.888889 | 5 |
def has_month_data(href):
return href and re.compile("monthdata.php").search(href)
| 18,590 |
akfamily/akshare
|
087025d8d6f799b30ca114013e82c1ad22dc9294
|
akshare/air/air_zhenqi.py
|
air_city_table
|
()
|
return temp_df
|
真气网-空气质量历史数据查询-全部城市列表
https://www.zq12369.com/environment.php?date=2019-06-05&tab=rank&order=DESC&type=DAY#rank
:return: 城市映射
:rtype: pandas.DataFrame
|
真气网-空气质量历史数据查询-全部城市列表
https://www.zq12369.com/environment.php?date=2019-06-05&tab=rank&order=DESC&type=DAY#rank
:return: 城市映射
:rtype: pandas.DataFrame
| 64 | 87 |
def air_city_table() -> pd.DataFrame:
"""
真气网-空气质量历史数据查询-全部城市列表
https://www.zq12369.com/environment.php?date=2019-06-05&tab=rank&order=DESC&type=DAY#rank
:return: 城市映射
:rtype: pandas.DataFrame
"""
url = "https://www.zq12369.com/environment.php"
date = "2020-05-01"
if len(date.split("-")) == 3:
params = {
"date": date,
"tab": "rank",
"order": "DESC",
"type": "DAY",
}
r = requests.get(url, params=params)
temp_df = pd.read_html(r.text)[1].iloc[1:, :]
del temp_df["降序"]
temp_df.reset_index(inplace=True)
temp_df["index"] = temp_df.index + 1
temp_df.columns = ["序号", "省份", "城市", "AQI", "空气质量", "PM2.5浓度", "首要污染物"]
temp_df["AQI"] = pd.to_numeric(temp_df["AQI"])
return temp_df
|
https://github.com/akfamily/akshare/blob/087025d8d6f799b30ca114013e82c1ad22dc9294/project25/akshare/air/air_zhenqi.py#L64-L87
| 25 |
[
0,
1,
2,
3,
4,
5,
6
] | 29.166667 |
[
7,
8,
9,
10,
16,
17,
18,
19,
20,
21,
22,
23
] | 50 | false | 14.285714 | 24 | 2 | 50 | 4 |
def air_city_table() -> pd.DataFrame:
url = "https://www.zq12369.com/environment.php"
date = "2020-05-01"
if len(date.split("-")) == 3:
params = {
"date": date,
"tab": "rank",
"order": "DESC",
"type": "DAY",
}
r = requests.get(url, params=params)
temp_df = pd.read_html(r.text)[1].iloc[1:, :]
del temp_df["降序"]
temp_df.reset_index(inplace=True)
temp_df["index"] = temp_df.index + 1
temp_df.columns = ["序号", "省份", "城市", "AQI", "空气质量", "PM2.5浓度", "首要污染物"]
temp_df["AQI"] = pd.to_numeric(temp_df["AQI"])
return temp_df
| 18,591 |
akfamily/akshare
|
087025d8d6f799b30ca114013e82c1ad22dc9294
|
akshare/air/air_zhenqi.py
|
air_quality_watch_point
|
(
city: str = "杭州", start_date: str = "20220408", end_date: str = "20220409"
)
|
return temp_df
|
真气网-监测点空气质量-细化到具体城市的每个监测点
指定之间段之间的空气质量数据
https://www.zq12369.com/
:param city: 调用 ak.air_city_table() 接口获取
:type city: str
:param start_date: e.g., "20190327"
:type start_date: str
:param end_date: e.g., ""20200327""
:type end_date: str
:return: 指定城市指定日期区间的观测点空气质量
:rtype: pandas.DataFrame
|
真气网-监测点空气质量-细化到具体城市的每个监测点
指定之间段之间的空气质量数据
https://www.zq12369.com/
:param city: 调用 ak.air_city_table() 接口获取
:type city: str
:param start_date: e.g., "20190327"
:type start_date: str
:param end_date: e.g., ""20200327""
:type end_date: str
:return: 指定城市指定日期区间的观测点空气质量
:rtype: pandas.DataFrame
| 90 | 135 |
def air_quality_watch_point(
city: str = "杭州", start_date: str = "20220408", end_date: str = "20220409"
) -> pd.DataFrame:
"""
真气网-监测点空气质量-细化到具体城市的每个监测点
指定之间段之间的空气质量数据
https://www.zq12369.com/
:param city: 调用 ak.air_city_table() 接口获取
:type city: str
:param start_date: e.g., "20190327"
:type start_date: str
:param end_date: e.g., ""20200327""
:type end_date: str
:return: 指定城市指定日期区间的观测点空气质量
:rtype: pandas.DataFrame
"""
start_date = "-".join([start_date[:4], start_date[4:6], start_date[6:]])
end_date = "-".join([end_date[:4], end_date[4:6], end_date[6:]])
url = "https://www.zq12369.com/api/zhenqiapi.php"
file_data = _get_file_content(file_name="crypto.js")
ctx = py_mini_racer.MiniRacer()
ctx.eval(file_data)
method = "GETCITYPOINTAVG"
ctx.call("encode_param", method)
ctx.call("encode_param", start_date)
ctx.call("encode_param", end_date)
city_param = ctx.call("encode_param", city)
ctx.call("encode_secret", method, city_param, start_date, end_date)
payload = {
"appId": "a01901d3caba1f362d69474674ce477f",
"method": ctx.call("encode_param", method),
"city": city_param,
"startTime": ctx.call("encode_param", start_date),
"endTime": ctx.call("encode_param", end_date),
"secret": ctx.call(
"encode_secret", method, city_param, start_date, end_date
),
}
headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/81.0.4044.122 Safari/537.36"
}
r = requests.post(url, data=payload, headers=headers)
data_text = r.text
data_json = demjson.decode(ctx.call("decode_result", data_text))
temp_df = pd.DataFrame(data_json["rows"])
return temp_df
|
https://github.com/akfamily/akshare/blob/087025d8d6f799b30ca114013e82c1ad22dc9294/project25/akshare/air/air_zhenqi.py#L90-L135
| 25 |
[
0
] | 2.173913 |
[
16,
17,
18,
19,
20,
21,
22,
23,
24,
25,
26,
27,
28,
38,
41,
42,
43,
44,
45
] | 41.304348 | false | 14.285714 | 46 | 1 | 58.695652 | 11 |
def air_quality_watch_point(
city: str = "杭州", start_date: str = "20220408", end_date: str = "20220409"
) -> pd.DataFrame:
start_date = "-".join([start_date[:4], start_date[4:6], start_date[6:]])
end_date = "-".join([end_date[:4], end_date[4:6], end_date[6:]])
url = "https://www.zq12369.com/api/zhenqiapi.php"
file_data = _get_file_content(file_name="crypto.js")
ctx = py_mini_racer.MiniRacer()
ctx.eval(file_data)
method = "GETCITYPOINTAVG"
ctx.call("encode_param", method)
ctx.call("encode_param", start_date)
ctx.call("encode_param", end_date)
city_param = ctx.call("encode_param", city)
ctx.call("encode_secret", method, city_param, start_date, end_date)
payload = {
"appId": "a01901d3caba1f362d69474674ce477f",
"method": ctx.call("encode_param", method),
"city": city_param,
"startTime": ctx.call("encode_param", start_date),
"endTime": ctx.call("encode_param", end_date),
"secret": ctx.call(
"encode_secret", method, city_param, start_date, end_date
),
}
headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/81.0.4044.122 Safari/537.36"
}
r = requests.post(url, data=payload, headers=headers)
data_text = r.text
data_json = demjson.decode(ctx.call("decode_result", data_text))
temp_df = pd.DataFrame(data_json["rows"])
return temp_df
| 18,592 |
akfamily/akshare
|
087025d8d6f799b30ca114013e82c1ad22dc9294
|
akshare/air/air_zhenqi.py
|
air_quality_hist
|
(
city: str = "杭州",
period: str = "day",
start_date: str = "20190327",
end_date: str = "20200427",
)
|
return temp_df
|
真气网-空气历史数据
https://www.zq12369.com/
:param city: 调用 ak.air_city_table() 接口获取所有城市列表
:type city: str
:param period: "hour": 每小时一个数据, 由于数据量比较大, 下载较慢; "day": 每天一个数据; "month": 每个月一个数据
:type period: str
:param start_date: e.g., "20190327"
:type start_date: str
:param end_date: e.g., "20200327"
:type end_date: str
:return: 指定城市和数据频率下在指定时间段内的空气质量数据
:rtype: pandas.DataFrame
|
真气网-空气历史数据
https://www.zq12369.com/
:param city: 调用 ak.air_city_table() 接口获取所有城市列表
:type city: str
:param period: "hour": 每小时一个数据, 由于数据量比较大, 下载较慢; "day": 每天一个数据; "month": 每个月一个数据
:type period: str
:param start_date: e.g., "20190327"
:type start_date: str
:param end_date: e.g., "20200327"
:type end_date: str
:return: 指定城市和数据频率下在指定时间段内的空气质量数据
:rtype: pandas.DataFrame
| 138 | 233 |
def air_quality_hist(
city: str = "杭州",
period: str = "day",
start_date: str = "20190327",
end_date: str = "20200427",
) -> pd.DataFrame:
"""
真气网-空气历史数据
https://www.zq12369.com/
:param city: 调用 ak.air_city_table() 接口获取所有城市列表
:type city: str
:param period: "hour": 每小时一个数据, 由于数据量比较大, 下载较慢; "day": 每天一个数据; "month": 每个月一个数据
:type period: str
:param start_date: e.g., "20190327"
:type start_date: str
:param end_date: e.g., "20200327"
:type end_date: str
:return: 指定城市和数据频率下在指定时间段内的空气质量数据
:rtype: pandas.DataFrame
"""
start_date = "-".join([start_date[:4], start_date[4:6], start_date[6:]])
end_date = "-".join([end_date[:4], end_date[4:6], end_date[6:]])
url = "https://www.zq12369.com/api/newzhenqiapi.php"
file_data = _get_file_content(file_name="outcrypto.js")
ctx = py_mini_racer.MiniRacer()
ctx.eval(file_data)
appId = "4f0e3a273d547ce6b7147bfa7ceb4b6e"
method = "CETCITYPERIOD"
timestamp = ctx.eval("timestamp = new Date().getTime()")
p_text = json.dumps(
{
"city": city,
"endTime": f"{end_date} 23:45:39",
"startTime": f"{start_date} 00:00:00",
"type": period.upper(),
},
ensure_ascii=False,
indent=None,
).replace(' "', '"')
secret = ctx.call(
"hex_md5", appId + method + str(timestamp) + "WEB" + p_text
)
payload = {
"appId": "4f0e3a273d547ce6b7147bfa7ceb4b6e",
"method": "CETCITYPERIOD",
"timestamp": int(timestamp),
"clienttype": "WEB",
"object": {
"city": city,
"type": period.upper(),
"startTime": f"{start_date} 00:00:00",
"endTime": f"{end_date} 23:45:39",
},
"secret": secret,
}
need = (
json.dumps(payload, ensure_ascii=False, indent=None, sort_keys=False)
.replace(' "', '"')
.replace("\\", "")
.replace('p": ', 'p":')
.replace('t": ', 't":')
)
headers = {
# 'Accept': '*/*',
# 'Accept-Encoding': 'gzip, deflate, br',
# 'Accept-Language': 'zh-CN,zh;q=0.9,en;q=0.8',
# 'Cache-Control': 'no-cache',
# 'Connection': 'keep-alive',
# 'Content-Length': '1174',
# 'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8',
# 'Cookie': 'UM_distinctid=1800e5142c5b85-04b8f11aa852f3-1a343370-1fa400-1800e5142c6b7e; CNZZDATA1254317176=1502593570-1649496979-%7C1649507817; city=%E6%9D%AD%E5%B7%9E; SECKEY_ABVK=eSrbUhd28Mjo7jf8Rfh+uY5E9C+tAhQ8mOfYJHSjSfY%3D; BMAP_SECKEY=N5fGcwdWpeJW46eZRpR9GW3qdVnODGQwGm6JE0ELECQHJOTFc9MCuNdyf8OWUspFI6Xq4MMPxgVVr5I13odFOW6AQMgSPOtEvVHciC2NsQwb1pnmFtEaqyKHOUeavelt0ejBy6ETRD_4FXAhZb9FSbVIMPew7qwFX_kdPDxVJH-vHfCVhRx9XDZgb41B_T4D',
# 'Host': 'www.zq12369.com',
# 'Origin': 'https://www.zq12369.com',
# 'Pragma': 'no-cache',
# 'Referer': 'https://www.zq12369.com/environment.php?catid=4',
# 'sec-ch-ua': '" Not A;Brand";v="99", "Chromium";v="100", "Google Chrome";v="100"',
# 'sec-ch-ua-mobile': '?0',
# 'sec-ch-ua-platform': '"Windows"',
# 'Sec-Fetch-Dest': 'empty',
# 'Sec-Fetch-Mode': 'cors',
# 'Sec-Fetch-Site': 'same-origin',
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/100.0.4896.75 Safari/537.36",
"X-Requested-With": "XMLHttpRequest",
}
params = {"param": ctx.call("AES.encrypt", need)}
params = {"param": ctx.call("encode_param", need)}
r = requests.post(url, data=params, headers=headers)
temp_text = ctx.call("decryptData", r.text)
data_json = demjson.decode(ctx.call("b.decode", temp_text))
temp_df = pd.DataFrame(data_json["result"]["data"]["rows"])
temp_df.index = temp_df["time"]
del temp_df["time"]
temp_df = temp_df.astype(float, errors="ignore")
return temp_df
|
https://github.com/akfamily/akshare/blob/087025d8d6f799b30ca114013e82c1ad22dc9294/project25/akshare/air/air_zhenqi.py#L138-L233
| 25 |
[
0
] | 1.041667 |
[
20,
21,
22,
23,
24,
25,
26,
27,
28,
29,
39,
42,
55,
63,
85,
86,
88,
89,
90,
91,
92,
93,
94,
95
] | 25 | false | 14.285714 | 96 | 1 | 75 | 12 |
def air_quality_hist(
city: str = "杭州",
period: str = "day",
start_date: str = "20190327",
end_date: str = "20200427",
) -> pd.DataFrame:
start_date = "-".join([start_date[:4], start_date[4:6], start_date[6:]])
end_date = "-".join([end_date[:4], end_date[4:6], end_date[6:]])
url = "https://www.zq12369.com/api/newzhenqiapi.php"
file_data = _get_file_content(file_name="outcrypto.js")
ctx = py_mini_racer.MiniRacer()
ctx.eval(file_data)
appId = "4f0e3a273d547ce6b7147bfa7ceb4b6e"
method = "CETCITYPERIOD"
timestamp = ctx.eval("timestamp = new Date().getTime()")
p_text = json.dumps(
{
"city": city,
"endTime": f"{end_date} 23:45:39",
"startTime": f"{start_date} 00:00:00",
"type": period.upper(),
},
ensure_ascii=False,
indent=None,
).replace(' "', '"')
secret = ctx.call(
"hex_md5", appId + method + str(timestamp) + "WEB" + p_text
)
payload = {
"appId": "4f0e3a273d547ce6b7147bfa7ceb4b6e",
"method": "CETCITYPERIOD",
"timestamp": int(timestamp),
"clienttype": "WEB",
"object": {
"city": city,
"type": period.upper(),
"startTime": f"{start_date} 00:00:00",
"endTime": f"{end_date} 23:45:39",
},
"secret": secret,
}
need = (
json.dumps(payload, ensure_ascii=False, indent=None, sort_keys=False)
.replace(' "', '"')
.replace("\\", "")
.replace('p": ', 'p":')
.replace('t": ', 't":')
)
headers = {
# 'Accept': '*/*',
# 'Accept-Encoding': 'gzip, deflate, br',
# 'Accept-Language': 'zh-CN,zh;q=0.9,en;q=0.8',
# 'Cache-Control': 'no-cache',
# 'Connection': 'keep-alive',
# 'Content-Length': '1174',
# 'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8',
# 'Cookie': 'UM_distinctid=1800e5142c5b85-04b8f11aa852f3-1a343370-1fa400-1800e5142c6b7e; CNZZDATA1254317176=1502593570-1649496979-%7C1649507817; city=%E6%9D%AD%E5%B7%9E; SECKEY_ABVK=eSrbUhd28Mjo7jf8Rfh+uY5E9C+tAhQ8mOfYJHSjSfY%3D; BMAP_SECKEY=N5fGcwdWpeJW46eZRpR9GW3qdVnODGQwGm6JE0ELECQHJOTFc9MCuNdyf8OWUspFI6Xq4MMPxgVVr5I13odFOW6AQMgSPOtEvVHciC2NsQwb1pnmFtEaqyKHOUeavelt0ejBy6ETRD_4FXAhZb9FSbVIMPew7qwFX_kdPDxVJH-vHfCVhRx9XDZgb41B_T4D',
# 'Host': 'www.zq12369.com',
# 'Origin': 'https://www.zq12369.com',
# 'Pragma': 'no-cache',
# 'Referer': 'https://www.zq12369.com/environment.php?catid=4',
# 'sec-ch-ua': '" Not A;Brand";v="99", "Chromium";v="100", "Google Chrome";v="100"',
# 'sec-ch-ua-mobile': '?0',
# 'sec-ch-ua-platform': '"Windows"',
# 'Sec-Fetch-Dest': 'empty',
# 'Sec-Fetch-Mode': 'cors',
# 'Sec-Fetch-Site': 'same-origin',
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/100.0.4896.75 Safari/537.36",
"X-Requested-With": "XMLHttpRequest",
}
params = {"param": ctx.call("AES.encrypt", need)}
params = {"param": ctx.call("encode_param", need)}
r = requests.post(url, data=params, headers=headers)
temp_text = ctx.call("decryptData", r.text)
data_json = demjson.decode(ctx.call("b.decode", temp_text))
temp_df = pd.DataFrame(data_json["result"]["data"]["rows"])
temp_df.index = temp_df["time"]
del temp_df["time"]
temp_df = temp_df.astype(float, errors="ignore")
return temp_df
| 18,593 |
akfamily/akshare
|
087025d8d6f799b30ca114013e82c1ad22dc9294
|
akshare/air/air_zhenqi.py
|
air_quality_rank
|
(date: str = "")
|
真气网-168 城市 AQI 排行榜
https://www.zq12369.com/environment.php?date=2020-03-12&tab=rank&order=DESC&type=DAY#rank
:param date: "": 当前时刻空气质量排名; "20200312": 当日空气质量排名; "202003": 当月空气质量排名; "2019": 当年空气质量排名;
:type date: str
:return: 指定 date 类型的空气质量排名数据
:rtype: pandas.DataFrame
|
真气网-168 城市 AQI 排行榜
https://www.zq12369.com/environment.php?date=2020-03-12&tab=rank&order=DESC&type=DAY#rank
:param date: "": 当前时刻空气质量排名; "20200312": 当日空气质量排名; "202003": 当月空气质量排名; "2019": 当年空气质量排名;
:type date: str
:return: 指定 date 类型的空气质量排名数据
:rtype: pandas.DataFrame
| 236 | 290 |
def air_quality_rank(date: str = "") -> pd.DataFrame:
"""
真气网-168 城市 AQI 排行榜
https://www.zq12369.com/environment.php?date=2020-03-12&tab=rank&order=DESC&type=DAY#rank
:param date: "": 当前时刻空气质量排名; "20200312": 当日空气质量排名; "202003": 当月空气质量排名; "2019": 当年空气质量排名;
:type date: str
:return: 指定 date 类型的空气质量排名数据
:rtype: pandas.DataFrame
"""
if len(date) == 4:
date = date
elif len(date) == 6:
date = "-".join([date[:4], date[4:6]])
elif date == "":
date = "实时"
else:
date = "-".join([date[:4], date[4:6], date[6:]])
url = "https://www.zq12369.com/environment.php"
if len(date.split("-")) == 3:
params = {
"date": date,
"tab": "rank",
"order": "DESC",
"type": "DAY",
}
r = requests.get(url, params=params)
return pd.read_html(r.text)[1].iloc[1:, :]
elif len(date.split("-")) == 2:
params = {
"month": date,
"tab": "rank",
"order": "DESC",
"type": "MONTH",
}
r = requests.get(url, params=params)
return pd.read_html(r.text)[2].iloc[1:, :]
elif len(date.split("-")) == 1 and date != "实时":
params = {
"year": date,
"tab": "rank",
"order": "DESC",
"type": "YEAR",
}
r = requests.get(url, params=params)
return pd.read_html(r.text)[3].iloc[1:, :]
if date == "实时":
params = {
"tab": "rank",
"order": "DESC",
"type": "MONTH",
}
r = requests.get(url, params=params)
return pd.read_html(r.text)[0].iloc[1:, :]
|
https://github.com/akfamily/akshare/blob/087025d8d6f799b30ca114013e82c1ad22dc9294/project25/akshare/air/air_zhenqi.py#L236-L290
| 25 |
[
0,
1,
2,
3,
4,
5,
6,
7,
8
] | 16.363636 |
[
9,
10,
11,
12,
13,
14,
16,
18,
20,
21,
27,
28,
29,
30,
36,
37,
38,
39,
45,
46,
47,
48,
53,
54
] | 43.636364 | false | 14.285714 | 55 | 9 | 56.363636 | 6 |
def air_quality_rank(date: str = "") -> pd.DataFrame:
if len(date) == 4:
date = date
elif len(date) == 6:
date = "-".join([date[:4], date[4:6]])
elif date == "":
date = "实时"
else:
date = "-".join([date[:4], date[4:6], date[6:]])
url = "https://www.zq12369.com/environment.php"
if len(date.split("-")) == 3:
params = {
"date": date,
"tab": "rank",
"order": "DESC",
"type": "DAY",
}
r = requests.get(url, params=params)
return pd.read_html(r.text)[1].iloc[1:, :]
elif len(date.split("-")) == 2:
params = {
"month": date,
"tab": "rank",
"order": "DESC",
"type": "MONTH",
}
r = requests.get(url, params=params)
return pd.read_html(r.text)[2].iloc[1:, :]
elif len(date.split("-")) == 1 and date != "实时":
params = {
"year": date,
"tab": "rank",
"order": "DESC",
"type": "YEAR",
}
r = requests.get(url, params=params)
return pd.read_html(r.text)[3].iloc[1:, :]
if date == "实时":
params = {
"tab": "rank",
"order": "DESC",
"type": "MONTH",
}
r = requests.get(url, params=params)
return pd.read_html(r.text)[0].iloc[1:, :]
| 18,594 |
|
akfamily/akshare
|
087025d8d6f799b30ca114013e82c1ad22dc9294
|
akshare/air/air_hebei.py
|
air_quality_hebei
|
(symbol: str = "唐山市") -> pd
|
河北省空气质量预报信息发布系统-空气质量预报, 未来 6 天
http://110.249.223.67/publish/
:param symbol: choice of {'石家庄市', '唐山市', '秦皇岛市', '邯郸市', '邢台市', '保定市', '张家口市', '承德市', '沧州市', '廊坊市', '衡水市', '辛集市', '定州市'}
:type symbol: str
:return: city = "", 返回所有地区的数据; city="唐山市", 返回唐山市的数据
:rtype: pandas.DataFrame
|
河北省空气质量预报信息发布系统-空气质量预报, 未来 6 天
http://110.249.223.67/publish/
:param symbol: choice of {'石家庄市', '唐山市', '秦皇岛市', '邯郸市', '邢台市', '保定市', '张家口市', '承德市', '沧州市', '廊坊市', '衡水市', '辛集市', '定州市'}
:type symbol: str
:return: city = "", 返回所有地区的数据; city="唐山市", 返回唐山市的数据
:rtype: pandas.DataFrame
| 24 | 76 |
def air_quality_hebei(symbol: str = "唐山市") -> pd.DataFrame:
"""
河北省空气质量预报信息发布系统-空气质量预报, 未来 6 天
http://110.249.223.67/publish/
:param symbol: choice of {'石家庄市', '唐山市', '秦皇岛市', '邯郸市', '邢台市', '保定市', '张家口市', '承德市', '沧州市', '廊坊市', '衡水市', '辛集市', '定州市'}
:type symbol: str
:return: city = "", 返回所有地区的数据; city="唐山市", 返回唐山市的数据
:rtype: pandas.DataFrame
"""
url = "http://110.249.223.67/server/api/CityPublishInfo/GetProvinceAndCityPublishData"
params = {"publishDate": f"{datetime.today().strftime('%Y-%m-%d')} 16:00:00"}
r = requests.get(url, params=params)
json_data = r.json()
city_list = pd.DataFrame.from_dict(json_data["cityPublishDatas"], orient="columns")[
"CityName"
].tolist()
outer_df = pd.DataFrame()
for i in tqdm(range(1, 7), leave=False):
inner_df = pd.DataFrame(
[item[f"Date{i}"] for item in json_data["cityPublishDatas"]],
index=city_list,
)
outer_df = pd.concat([outer_df, inner_df])
if symbol == "":
temp_df = outer_df.reset_index()
temp_df.columns = [
'city',
'date',
'pollutant',
'minAQI',
'maxAQI',
'level',
]
temp_df['date'] = pd.to_datetime(temp_df['date']).dt.date
temp_df['minaqi'] = pd.to_numeric(temp_df['minaqi'])
temp_df['maxaqi'] = pd.to_numeric(temp_df['maxaqi'])
return temp_df
else:
temp_df = outer_df.reset_index()
temp_df.columns = [
'city',
'date',
'pollutant',
'minaqi',
'maxaqi',
'level',
]
temp_df['date'] = pd.to_datetime(temp_df['date']).dt.date
temp_df['minaqi'] = pd.to_numeric(temp_df['minaqi'])
temp_df['maxaqi'] = pd.to_numeric(temp_df['maxaqi'])
temp_df = temp_df[temp_df['city'] == symbol]
temp_df.reset_index(inplace=True, drop=True)
return temp_df
|
https://github.com/akfamily/akshare/blob/087025d8d6f799b30ca114013e82c1ad22dc9294/project25/akshare/air/air_hebei.py#L24-L76
| 25 |
[
0,
1,
2,
3,
4,
5,
6,
7,
8
] | 16.981132 |
[
9,
10,
11,
12,
13,
16,
17,
18,
22,
23,
24,
25,
33,
34,
35,
36,
38,
39,
47,
48,
49,
50,
51,
52
] | 45.283019 | false | 21.212121 | 53 | 4 | 54.716981 | 6 |
def air_quality_hebei(symbol: str = "唐山市") -> pd.DataFrame:
url = "http://110.249.223.67/server/api/CityPublishInfo/GetProvinceAndCityPublishData"
params = {"publishDate": f"{datetime.today().strftime('%Y-%m-%d')} 16:00:00"}
r = requests.get(url, params=params)
json_data = r.json()
city_list = pd.DataFrame.from_dict(json_data["cityPublishDatas"], orient="columns")[
"CityName"
].tolist()
outer_df = pd.DataFrame()
for i in tqdm(range(1, 7), leave=False):
inner_df = pd.DataFrame(
[item[f"Date{i}"] for item in json_data["cityPublishDatas"]],
index=city_list,
)
outer_df = pd.concat([outer_df, inner_df])
if symbol == "":
temp_df = outer_df.reset_index()
temp_df.columns = [
'city',
'date',
'pollutant',
'minAQI',
'maxAQI',
'level',
]
temp_df['date'] = pd.to_datetime(temp_df['date']).dt.date
temp_df['minaqi'] = pd.to_numeric(temp_df['minaqi'])
temp_df['maxaqi'] = pd.to_numeric(temp_df['maxaqi'])
return temp_df
else:
temp_df = outer_df.reset_index()
temp_df.columns = [
'city',
'date',
'pollutant',
'minaqi',
'maxaqi',
'level',
]
temp_df['date'] = pd.to_datetime(temp_df['date']).dt.date
temp_df['minaqi'] = pd.to_numeric(temp_df['minaqi'])
temp_df['maxaqi'] = pd.to_numeric(temp_df['maxaqi'])
temp_df = temp_df[temp_df['city'] == symbol]
temp_df.reset_index(inplace=True, drop=True)
return temp_df
| 18,595 |
|
akfamily/akshare
|
087025d8d6f799b30ca114013e82c1ad22dc9294
|
akshare/air/time_and_date.py
|
sunrise_city_list
|
()
|
return city_list
|
查询日出与日落数据的城市列表
:return: 所有可以获取的数据的城市列表
:rtype: list
|
查询日出与日落数据的城市列表
:return: 所有可以获取的数据的城市列表
:rtype: list
| 13 | 31 |
def sunrise_city_list() -> list:
"""
查询日出与日落数据的城市列表
:return: 所有可以获取的数据的城市列表
:rtype: list
"""
url = "https://www.timeanddate.com/sun/china"
res = requests.get(url)
city_list = []
china_city_one_df = pd.read_html(res.text)[1]
china_city_two_df = pd.read_html(res.text)[2]
city_list.extend([item.lower() for item in china_city_one_df.iloc[:, 0].tolist()])
city_list.extend([item.lower() for item in china_city_one_df.iloc[:, 3].tolist()])
city_list.extend([item.lower() for item in china_city_two_df.iloc[:, 0].tolist()])
city_list.extend([item.lower() for item in china_city_two_df.iloc[:, 1].tolist()])
city_list.extend([item.lower() for item in china_city_two_df.iloc[:, 2].tolist()])
city_list.extend([item.lower() for item in china_city_two_df.iloc[:, 3].tolist()])
city_list.extend([item.lower() for item in china_city_two_df.iloc[:, 4][:-2].tolist()])
return city_list
|
https://github.com/akfamily/akshare/blob/087025d8d6f799b30ca114013e82c1ad22dc9294/project25/akshare/air/time_and_date.py#L13-L31
| 25 |
[
0,
1,
2,
3,
4,
5
] | 31.578947 |
[
6,
7,
8,
9,
10,
11,
12,
13,
14,
15,
16,
17,
18
] | 68.421053 | false | 17.391304 | 19 | 8 | 31.578947 | 3 |
def sunrise_city_list() -> list:
url = "https://www.timeanddate.com/sun/china"
res = requests.get(url)
city_list = []
china_city_one_df = pd.read_html(res.text)[1]
china_city_two_df = pd.read_html(res.text)[2]
city_list.extend([item.lower() for item in china_city_one_df.iloc[:, 0].tolist()])
city_list.extend([item.lower() for item in china_city_one_df.iloc[:, 3].tolist()])
city_list.extend([item.lower() for item in china_city_two_df.iloc[:, 0].tolist()])
city_list.extend([item.lower() for item in china_city_two_df.iloc[:, 1].tolist()])
city_list.extend([item.lower() for item in china_city_two_df.iloc[:, 2].tolist()])
city_list.extend([item.lower() for item in china_city_two_df.iloc[:, 3].tolist()])
city_list.extend([item.lower() for item in china_city_two_df.iloc[:, 4][:-2].tolist()])
return city_list
| 18,596 |
akfamily/akshare
|
087025d8d6f799b30ca114013e82c1ad22dc9294
|
akshare/air/time_and_date.py
|
sunrise_daily
|
(date: str = "20200428", city: str = "北京") ->
|
每日日出日落数据
https://www.timeanddate.com/sun/china/shaoxing
:param date: 需要查询的日期, e.g., “20200428”
:type date: str
:param city: 需要查询的城市; 注意输入的格式, e.g., "北京", "上海"
:type city: str
:return: 返回指定日期指定地区的日出日落数据
:rtype: pandas.DataFrame
|
每日日出日落数据
https://www.timeanddate.com/sun/china/shaoxing
:param date: 需要查询的日期, e.g., “20200428”
:type date: str
:param city: 需要查询的城市; 注意输入的格式, e.g., "北京", "上海"
:type city: str
:return: 返回指定日期指定地区的日出日落数据
:rtype: pandas.DataFrame
| 34 | 56 |
def sunrise_daily(date: str = "20200428", city: str = "北京") -> pd.DataFrame:
"""
每日日出日落数据
https://www.timeanddate.com/sun/china/shaoxing
:param date: 需要查询的日期, e.g., “20200428”
:type date: str
:param city: 需要查询的城市; 注意输入的格式, e.g., "北京", "上海"
:type city: str
:return: 返回指定日期指定地区的日出日落数据
:rtype: pandas.DataFrame
"""
if pypinyin.slug(city, separator='') in sunrise_city_list():
year = date[:4]
month = date[4:6]
url = f"https://www.timeanddate.com/sun/china/{pypinyin.slug(city, separator='')}?month={month}&year={year}"
res = requests.get(url)
table = pd.read_html(res.text, header=2)[1]
month_df = table.iloc[:-1, ]
day_df = month_df[month_df.iloc[:, 0].astype(str).str.zfill(2) == date[6:]]
day_df.index = pd.to_datetime([date] * len(day_df), format="%Y%m%d")
return day_df
else:
return "请输入正确的城市名称"
|
https://github.com/akfamily/akshare/blob/087025d8d6f799b30ca114013e82c1ad22dc9294/project25/akshare/air/time_and_date.py#L34-L56
| 25 |
[
0,
1,
2,
3,
4,
5,
6,
7,
8,
9,
10
] | 47.826087 |
[
11,
12,
13,
14,
15,
16,
17,
18,
19,
20,
22
] | 47.826087 | false | 17.391304 | 23 | 2 | 52.173913 | 8 |
def sunrise_daily(date: str = "20200428", city: str = "北京") -> pd.DataFrame:
if pypinyin.slug(city, separator='') in sunrise_city_list():
year = date[:4]
month = date[4:6]
url = f"https://www.timeanddate.com/sun/china/{pypinyin.slug(city, separator='')}?month={month}&year={year}"
res = requests.get(url)
table = pd.read_html(res.text, header=2)[1]
month_df = table.iloc[:-1, ]
day_df = month_df[month_df.iloc[:, 0].astype(str).str.zfill(2) == date[6:]]
day_df.index = pd.to_datetime([date] * len(day_df), format="%Y%m%d")
return day_df
else:
return "请输入正确的城市名称"
| 18,597 |
|
akfamily/akshare
|
087025d8d6f799b30ca114013e82c1ad22dc9294
|
akshare/air/time_and_date.py
|
sunrise_monthly
|
(date: str = "20190801", city: str = "北京") ->
|
每个指定 date 所在月份的每日日出日落数据, 如果当前月份未到月底, 则以预测值填充
https://www.timeanddate.com/sun/china/shaoxing
:param date: 需要查询的日期, 这里用来指定 date 所在的月份; e.g., “20200428”
:type date: str
:param city: 需要查询的城市; 注意输入的格式, e.g., "北京", "上海"
:type city: str
:return: 指定 date 所在月份的每日日出日落数据
:rtype: pandas.DataFrame
|
每个指定 date 所在月份的每日日出日落数据, 如果当前月份未到月底, 则以预测值填充
https://www.timeanddate.com/sun/china/shaoxing
:param date: 需要查询的日期, 这里用来指定 date 所在的月份; e.g., “20200428”
:type date: str
:param city: 需要查询的城市; 注意输入的格式, e.g., "北京", "上海"
:type city: str
:return: 指定 date 所在月份的每日日出日落数据
:rtype: pandas.DataFrame
| 59 | 80 |
def sunrise_monthly(date: str = "20190801", city: str = "北京") -> pd.DataFrame:
"""
每个指定 date 所在月份的每日日出日落数据, 如果当前月份未到月底, 则以预测值填充
https://www.timeanddate.com/sun/china/shaoxing
:param date: 需要查询的日期, 这里用来指定 date 所在的月份; e.g., “20200428”
:type date: str
:param city: 需要查询的城市; 注意输入的格式, e.g., "北京", "上海"
:type city: str
:return: 指定 date 所在月份的每日日出日落数据
:rtype: pandas.DataFrame
"""
if pypinyin.slug(city, separator='') in sunrise_city_list():
year = date[:4]
month = date[4:6]
url = f"https://www.timeanddate.com/sun/china/{pypinyin.slug(city, separator='')}?month={month}&year={year}"
res = requests.get(url)
table = pd.read_html(res.text, header=2)[1]
month_df = table.iloc[:-1, ]
month_df.index = [date[:-2]] * len(month_df)
return month_df
else:
return "请输入正确的城市名称"
|
https://github.com/akfamily/akshare/blob/087025d8d6f799b30ca114013e82c1ad22dc9294/project25/akshare/air/time_and_date.py#L59-L80
| 25 |
[
0,
1,
2,
3,
4,
5,
6,
7,
8,
9,
10
] | 50 |
[
11,
12,
13,
14,
15,
16,
17,
18,
19,
21
] | 45.454545 | false | 17.391304 | 22 | 2 | 54.545455 | 8 |
def sunrise_monthly(date: str = "20190801", city: str = "北京") -> pd.DataFrame:
if pypinyin.slug(city, separator='') in sunrise_city_list():
year = date[:4]
month = date[4:6]
url = f"https://www.timeanddate.com/sun/china/{pypinyin.slug(city, separator='')}?month={month}&year={year}"
res = requests.get(url)
table = pd.read_html(res.text, header=2)[1]
month_df = table.iloc[:-1, ]
month_df.index = [date[:-2]] * len(month_df)
return month_df
else:
return "请输入正确的城市名称"
| 18,598 |
|
akfamily/akshare
|
087025d8d6f799b30ca114013e82c1ad22dc9294
|
akshare/currency/currency_safe.py
|
currency_boc_safe
|
()
|
return big_df
|
人民币汇率中间价
http://www.safe.gov.cn/safe/rmbhlzjj/index.html
:return: 人民币汇率中间价
:rtype: pandas.DataFrame
|
人民币汇率中间价
http://www.safe.gov.cn/safe/rmbhlzjj/index.html
:return: 人民币汇率中间价
:rtype: pandas.DataFrame
| 16 | 53 |
def currency_boc_safe() -> pd.DataFrame:
"""
人民币汇率中间价
http://www.safe.gov.cn/safe/rmbhlzjj/index.html
:return: 人民币汇率中间价
:rtype: pandas.DataFrame
"""
url = "http://www.safe.gov.cn/safe/2020/1218/17833.html"
r = requests.get(url)
r.encoding = "utf8"
soup = BeautifulSoup(r.text, "lxml")
content = soup.find("a", text=re.compile("人民币汇率"))["href"]
url = f"http://www.safe.gov.cn{content}"
temp_df = pd.read_excel(url)
temp_df.sort_values(["日期"], inplace=True)
temp_df.reset_index(inplace=True, drop=True)
start_date = (
(pd.Timestamp(temp_df["日期"].tolist()[-1]) + pd.Timedelta(days=1))
.isoformat()
.split("T")[0]
)
end_date = datetime.now().isoformat().split("T")[0]
url = "http://www.safe.gov.cn/AppStructured/hlw/RMBQuery.do"
payload = {
"startDate": start_date,
"endDate": end_date,
"queryYN": "true",
}
r = requests.post(url, data=payload)
current_temp_df = pd.read_html(r.text)[-1]
current_temp_df.sort_values(["日期"], inplace=True)
current_temp_df.reset_index(inplace=True, drop=True)
big_df = pd.concat([temp_df, current_temp_df], ignore_index=True)
column_name_list = big_df.columns[1:]
for item in column_name_list:
big_df[item] = pd.to_numeric(big_df[item], errors="coerce")
big_df["日期"] = pd.to_datetime(big_df["日期"]).dt.date
return big_df
|
https://github.com/akfamily/akshare/blob/087025d8d6f799b30ca114013e82c1ad22dc9294/project25/akshare/currency/currency_safe.py#L16-L53
| 25 |
[
0,
1,
2,
3,
4,
5,
6
] | 18.421053 |
[
7,
8,
9,
10,
11,
12,
13,
14,
15,
16,
21,
22,
23,
28,
29,
30,
31,
32,
33,
34,
35,
36,
37
] | 60.526316 | false | 24.242424 | 38 | 2 | 39.473684 | 4 |
def currency_boc_safe() -> pd.DataFrame:
url = "http://www.safe.gov.cn/safe/2020/1218/17833.html"
r = requests.get(url)
r.encoding = "utf8"
soup = BeautifulSoup(r.text, "lxml")
content = soup.find("a", text=re.compile("人民币汇率"))["href"]
url = f"http://www.safe.gov.cn{content}"
temp_df = pd.read_excel(url)
temp_df.sort_values(["日期"], inplace=True)
temp_df.reset_index(inplace=True, drop=True)
start_date = (
(pd.Timestamp(temp_df["日期"].tolist()[-1]) + pd.Timedelta(days=1))
.isoformat()
.split("T")[0]
)
end_date = datetime.now().isoformat().split("T")[0]
url = "http://www.safe.gov.cn/AppStructured/hlw/RMBQuery.do"
payload = {
"startDate": start_date,
"endDate": end_date,
"queryYN": "true",
}
r = requests.post(url, data=payload)
current_temp_df = pd.read_html(r.text)[-1]
current_temp_df.sort_values(["日期"], inplace=True)
current_temp_df.reset_index(inplace=True, drop=True)
big_df = pd.concat([temp_df, current_temp_df], ignore_index=True)
column_name_list = big_df.columns[1:]
for item in column_name_list:
big_df[item] = pd.to_numeric(big_df[item], errors="coerce")
big_df["日期"] = pd.to_datetime(big_df["日期"]).dt.date
return big_df
| 18,599 |
akfamily/akshare
|
087025d8d6f799b30ca114013e82c1ad22dc9294
|
akshare/currency/currency_china_bank_sina.py
|
_currency_boc_sina_map
|
(date: str = "20210614")
|
return data_dict
|
外汇 symbol 和代码映射
:param date: 交易日
:type date: str
:return: 外汇 symbol 和代码映射
:rtype: dict
|
外汇 symbol 和代码映射
:param date: 交易日
:type date: str
:return: 外汇 symbol 和代码映射
:rtype: dict
| 14 | 48 |
def _currency_boc_sina_map(date: str = "20210614") -> dict:
"""
外汇 symbol 和代码映射
:param date: 交易日
:type date: str
:return: 外汇 symbol 和代码映射
:rtype: dict
"""
url = "http://biz.finance.sina.com.cn/forex/forex.php"
params = {
"startdate": "2012-01-01",
"enddate": "-".join([date[:4], date[4:6], date[6:]]),
"money_code": "EUR",
"type": "0",
}
r = requests.get(url, params=params)
r.encoding = "gbk"
soup = BeautifulSoup(r.text, "lxml")
data_dict = dict(
zip(
[
item.text
for item in soup.find(attrs={"id": "money_code"}).find_all(
"option"
)
],
[
item["value"]
for item in soup.find(attrs={"id": "money_code"}).find_all(
"option"
)
],
)
)
return data_dict
|
https://github.com/akfamily/akshare/blob/087025d8d6f799b30ca114013e82c1ad22dc9294/project25/akshare/currency/currency_china_bank_sina.py#L14-L48
| 25 |
[
0,
1,
2,
3,
4,
5,
6,
7
] | 22.857143 |
[
8,
9,
15,
16,
17,
18,
34
] | 20 | false | 20.512821 | 35 | 3 | 80 | 5 |
def _currency_boc_sina_map(date: str = "20210614") -> dict:
url = "http://biz.finance.sina.com.cn/forex/forex.php"
params = {
"startdate": "2012-01-01",
"enddate": "-".join([date[:4], date[4:6], date[6:]]),
"money_code": "EUR",
"type": "0",
}
r = requests.get(url, params=params)
r.encoding = "gbk"
soup = BeautifulSoup(r.text, "lxml")
data_dict = dict(
zip(
[
item.text
for item in soup.find(attrs={"id": "money_code"}).find_all(
"option"
)
],
[
item["value"]
for item in soup.find(attrs={"id": "money_code"}).find_all(
"option"
)
],
)
)
return data_dict
| 18,600 |
akfamily/akshare
|
087025d8d6f799b30ca114013e82c1ad22dc9294
|
akshare/currency/currency_china_bank_sina.py
|
currency_boc_sina
|
(
symbol: str = "美元", date: str = "20210614"
)
|
return big_df
|
新浪财经-中行人民币牌价历史数据查询
http://biz.finance.sina.com.cn/forex/forex.php?startdate=2012-01-01&enddate=2021-06-14&money_code=EUR&type=0
:param symbol: choice of {'美元', '英镑', '欧元', '澳门元', '泰国铢', '菲律宾比索', '港币', '瑞士法郎', '新加坡元', '瑞典克朗', '丹麦克朗', '挪威克朗', '日元', '加拿大元', '澳大利亚元', '新西兰元', '韩国元'}
:type symbol: str
:param date: 交易日
:type date: str
:return: 中行人民币牌价历史数据查询
:rtype: pandas.DataFrame
|
新浪财经-中行人民币牌价历史数据查询
http://biz.finance.sina.com.cn/forex/forex.php?startdate=2012-01-01&enddate=2021-06-14&money_code=EUR&type=0
:param symbol: choice of {'美元', '英镑', '欧元', '澳门元', '泰国铢', '菲律宾比索', '港币', '瑞士法郎', '新加坡元', '瑞典克朗', '丹麦克朗', '挪威克朗', '日元', '加拿大元', '澳大利亚元', '新西兰元', '韩国元'}
:type symbol: str
:param date: 交易日
:type date: str
:return: 中行人民币牌价历史数据查询
:rtype: pandas.DataFrame
| 51 | 99 |
def currency_boc_sina(
symbol: str = "美元", date: str = "20210614"
) -> pd.DataFrame:
"""
新浪财经-中行人民币牌价历史数据查询
http://biz.finance.sina.com.cn/forex/forex.php?startdate=2012-01-01&enddate=2021-06-14&money_code=EUR&type=0
:param symbol: choice of {'美元', '英镑', '欧元', '澳门元', '泰国铢', '菲律宾比索', '港币', '瑞士法郎', '新加坡元', '瑞典克朗', '丹麦克朗', '挪威克朗', '日元', '加拿大元', '澳大利亚元', '新西兰元', '韩国元'}
:type symbol: str
:param date: 交易日
:type date: str
:return: 中行人民币牌价历史数据查询
:rtype: pandas.DataFrame
"""
data_dict = _currency_boc_sina_map(date="20210614")
url = "http://biz.finance.sina.com.cn/forex/forex.php"
params = {
"money_code": data_dict[symbol],
"type": "0",
"startdate": "2012-01-01",
"enddate": "-".join([date[:4], date[4:6], date[6:]]),
"page": "1",
"call_type": "ajax",
}
r = requests.get(url, params=params)
soup = BeautifulSoup(r.text, "lxml")
soup.find(attrs={"id": "money_code"})
page_num = int(soup.find_all("a", attrs={"class": "page"})[-2].text)
big_df = pd.DataFrame()
for page in tqdm(range(1, page_num + 1), leave=False):
params.update({"page": page})
r = requests.get(url, params=params)
temp_df = pd.read_html(r.text, header=0)[0]
big_df = pd.concat([big_df, temp_df], ignore_index=True)
big_df.columns = [
"日期",
"中行汇买价",
"中行钞买价",
"中行钞卖价/汇卖价",
"央行中间价",
]
big_df["日期"] = pd.to_datetime(big_df["日期"]).dt.date
big_df["中行汇买价"] = pd.to_numeric(big_df["中行汇买价"])
big_df["中行钞买价"] = pd.to_numeric(big_df["中行钞买价"])
big_df["中行钞卖价/汇卖价"] = pd.to_numeric(big_df["中行钞卖价/汇卖价"])
big_df["央行中间价"] = pd.to_numeric(big_df["央行中间价"])
big_df.sort_values(["日期"], inplace=True)
big_df.reset_index(inplace=True, drop=True)
return big_df
|
https://github.com/akfamily/akshare/blob/087025d8d6f799b30ca114013e82c1ad22dc9294/project25/akshare/currency/currency_china_bank_sina.py#L51-L99
| 25 |
[
0
] | 2.040816 |
[
13,
14,
15,
23,
24,
25,
26,
27,
28,
29,
30,
31,
32,
33,
40,
41,
42,
43,
44,
46,
47,
48
] | 44.897959 | false | 20.512821 | 49 | 2 | 55.102041 | 8 |
def currency_boc_sina(
symbol: str = "美元", date: str = "20210614"
) -> pd.DataFrame:
data_dict = _currency_boc_sina_map(date="20210614")
url = "http://biz.finance.sina.com.cn/forex/forex.php"
params = {
"money_code": data_dict[symbol],
"type": "0",
"startdate": "2012-01-01",
"enddate": "-".join([date[:4], date[4:6], date[6:]]),
"page": "1",
"call_type": "ajax",
}
r = requests.get(url, params=params)
soup = BeautifulSoup(r.text, "lxml")
soup.find(attrs={"id": "money_code"})
page_num = int(soup.find_all("a", attrs={"class": "page"})[-2].text)
big_df = pd.DataFrame()
for page in tqdm(range(1, page_num + 1), leave=False):
params.update({"page": page})
r = requests.get(url, params=params)
temp_df = pd.read_html(r.text, header=0)[0]
big_df = pd.concat([big_df, temp_df], ignore_index=True)
big_df.columns = [
"日期",
"中行汇买价",
"中行钞买价",
"中行钞卖价/汇卖价",
"央行中间价",
]
big_df["日期"] = pd.to_datetime(big_df["日期"]).dt.date
big_df["中行汇买价"] = pd.to_numeric(big_df["中行汇买价"])
big_df["中行钞买价"] = pd.to_numeric(big_df["中行钞买价"])
big_df["中行钞卖价/汇卖价"] = pd.to_numeric(big_df["中行钞卖价/汇卖价"])
big_df["央行中间价"] = pd.to_numeric(big_df["央行中间价"])
big_df.sort_values(["日期"], inplace=True)
big_df.reset_index(inplace=True, drop=True)
return big_df
| 18,601 |
akfamily/akshare
|
087025d8d6f799b30ca114013e82c1ad22dc9294
|
akshare/currency/currency.py
|
currency_history
|
(
base: str = "USD", date: str = "2020-02-03", api_key: str = ""
)
|
return temp_df
|
Latest data from currencyscoop.com
https://currencyscoop.com/api-documentation
:param base: The base currency you would like to use for your rates
:type base: str
:param date: Specific date, e.g., "2020-02-03"
:type date: str
:param api_key: Account -> Account Details -> API KEY (use as password in external tools)
:type api_key: str
:return: Latest data of base currency
:rtype: pandas.DataFrame
|
Latest data from currencyscoop.com
https://currencyscoop.com/api-documentation
:param base: The base currency you would like to use for your rates
:type base: str
:param date: Specific date, e.g., "2020-02-03"
:type date: str
:param api_key: Account -> Account Details -> API KEY (use as password in external tools)
:type api_key: str
:return: Latest data of base currency
:rtype: pandas.DataFrame
| 32 | 52 |
def currency_history(
base: str = "USD", date: str = "2020-02-03", api_key: str = ""
) -> pd.DataFrame:
"""
Latest data from currencyscoop.com
https://currencyscoop.com/api-documentation
:param base: The base currency you would like to use for your rates
:type base: str
:param date: Specific date, e.g., "2020-02-03"
:type date: str
:param api_key: Account -> Account Details -> API KEY (use as password in external tools)
:type api_key: str
:return: Latest data of base currency
:rtype: pandas.DataFrame
"""
payload = {"base": base, "date": date, "api_key": api_key}
url = "https://api.currencyscoop.com/v1/historical"
r = requests.get(url, params=payload)
temp_df = pd.DataFrame.from_dict(r.json()["response"])
temp_df["date"] = pd.to_datetime(temp_df["date"])
return temp_df
|
https://github.com/akfamily/akshare/blob/087025d8d6f799b30ca114013e82c1ad22dc9294/project25/akshare/currency/currency.py#L32-L52
| 25 |
[
0
] | 4.761905 |
[
15,
16,
17,
18,
19,
20
] | 28.571429 | false | 19.565217 | 21 | 1 | 71.428571 | 10 |
def currency_history(
base: str = "USD", date: str = "2020-02-03", api_key: str = ""
) -> pd.DataFrame:
payload = {"base": base, "date": date, "api_key": api_key}
url = "https://api.currencyscoop.com/v1/historical"
r = requests.get(url, params=payload)
temp_df = pd.DataFrame.from_dict(r.json()["response"])
temp_df["date"] = pd.to_datetime(temp_df["date"])
return temp_df
| 18,602 |
akfamily/akshare
|
087025d8d6f799b30ca114013e82c1ad22dc9294
|
akshare/currency/currency.py
|
currency_time_series
|
(
base: str = "USD",
start_date: str = "2020-02-03",
end_date: str = "2020-03-04",
api_key: str = "",
)
|
return temp_df
|
Time-series data from currencyscoop.com
P.S. need special authority
https://currencyscoop.com/api-documentation
:param base: The base currency you would like to use for your rates
:type base: str
:param start_date: Specific date, e.g., "2020-02-03"
:type start_date: str
:param end_date: Specific date, e.g., "2020-02-03"
:type end_date: str
:param api_key: Account -> Account Details -> API KEY (use as password in external tools)
:type api_key: str
:return: Latest data of base currency
:rtype: pandas.DataFrame
|
Time-series data from currencyscoop.com
P.S. need special authority
https://currencyscoop.com/api-documentation
:param base: The base currency you would like to use for your rates
:type base: str
:param start_date: Specific date, e.g., "2020-02-03"
:type start_date: str
:param end_date: Specific date, e.g., "2020-02-03"
:type end_date: str
:param api_key: Account -> Account Details -> API KEY (use as password in external tools)
:type api_key: str
:return: Latest data of base currency
:rtype: pandas.DataFrame
| 55 | 86 |
def currency_time_series(
base: str = "USD",
start_date: str = "2020-02-03",
end_date: str = "2020-03-04",
api_key: str = "",
) -> pd.DataFrame:
"""
Time-series data from currencyscoop.com
P.S. need special authority
https://currencyscoop.com/api-documentation
:param base: The base currency you would like to use for your rates
:type base: str
:param start_date: Specific date, e.g., "2020-02-03"
:type start_date: str
:param end_date: Specific date, e.g., "2020-02-03"
:type end_date: str
:param api_key: Account -> Account Details -> API KEY (use as password in external tools)
:type api_key: str
:return: Latest data of base currency
:rtype: pandas.DataFrame
"""
payload = {
"base": base,
"api_key": api_key,
"start_date": start_date,
"end_date": end_date,
}
url = "https://api.currencyscoop.com/v1/timeseries"
r = requests.get(url, params=payload)
temp_df = pd.DataFrame.from_dict(r.json()["response"])
temp_df["date"] = pd.to_datetime(temp_df["date"])
return temp_df
|
https://github.com/akfamily/akshare/blob/087025d8d6f799b30ca114013e82c1ad22dc9294/project25/akshare/currency/currency.py#L55-L86
| 25 |
[
0
] | 3.125 |
[
21,
27,
28,
29,
30,
31
] | 18.75 | false | 19.565217 | 32 | 1 | 81.25 | 13 |
def currency_time_series(
base: str = "USD",
start_date: str = "2020-02-03",
end_date: str = "2020-03-04",
api_key: str = "",
) -> pd.DataFrame:
payload = {
"base": base,
"api_key": api_key,
"start_date": start_date,
"end_date": end_date,
}
url = "https://api.currencyscoop.com/v1/timeseries"
r = requests.get(url, params=payload)
temp_df = pd.DataFrame.from_dict(r.json()["response"])
temp_df["date"] = pd.to_datetime(temp_df["date"])
return temp_df
| 18,603 |
akfamily/akshare
|
087025d8d6f799b30ca114013e82c1ad22dc9294
|
akshare/currency/currency.py
|
currency_currencies
|
(
c_type: str = "fiat", api_key: str = ""
)
|
return temp_df
|
currencies data from currencyscoop.com
https://currencyscoop.com/api-documentation
:param c_type: now only "fiat" can return data
:type c_type: str
:param api_key: Account -> Account Details -> API KEY (use as password in external tools)
:type api_key: str
:return: Latest data of base currency
:rtype: pandas.DataFrame
|
currencies data from currencyscoop.com
https://currencyscoop.com/api-documentation
:param c_type: now only "fiat" can return data
:type c_type: str
:param api_key: Account -> Account Details -> API KEY (use as password in external tools)
:type api_key: str
:return: Latest data of base currency
:rtype: pandas.DataFrame
| 89 | 106 |
def currency_currencies(
c_type: str = "fiat", api_key: str = ""
) -> pd.DataFrame:
"""
currencies data from currencyscoop.com
https://currencyscoop.com/api-documentation
:param c_type: now only "fiat" can return data
:type c_type: str
:param api_key: Account -> Account Details -> API KEY (use as password in external tools)
:type api_key: str
:return: Latest data of base currency
:rtype: pandas.DataFrame
"""
payload = {"type": c_type, "api_key": api_key}
url = "https://api.currencyscoop.com/v1/currencies"
r = requests.get(url, params=payload)
temp_df = pd.DataFrame.from_dict(r.json()["response"]["fiats"]).T
return temp_df
|
https://github.com/akfamily/akshare/blob/087025d8d6f799b30ca114013e82c1ad22dc9294/project25/akshare/currency/currency.py#L89-L106
| 25 |
[
0
] | 5.555556 |
[
13,
14,
15,
16,
17
] | 27.777778 | false | 19.565217 | 18 | 1 | 72.222222 | 8 |
def currency_currencies(
c_type: str = "fiat", api_key: str = ""
) -> pd.DataFrame:
payload = {"type": c_type, "api_key": api_key}
url = "https://api.currencyscoop.com/v1/currencies"
r = requests.get(url, params=payload)
temp_df = pd.DataFrame.from_dict(r.json()["response"]["fiats"]).T
return temp_df
| 18,604 |
akfamily/akshare
|
087025d8d6f799b30ca114013e82c1ad22dc9294
|
akshare/currency/currency.py
|
currency_convert
|
(
base: str = "USD",
to: str = "CNY",
amount: str = "10000",
api_key: str = "",
)
|
return temp_se
|
currencies data from currencyscoop.com
https://currencyscoop.com/api-documentation
:param base: The base currency you would like to use for your rates
:type base: str
:param to: The currency you would like to use for your rates
:type to: str
:param amount: The amount of base currency
:type amount: str
:param api_key: Account -> Account Details -> API KEY (use as password in external tools)
:type api_key: str
:return: Latest data of base currency
:rtype: pandas.Series
|
currencies data from currencyscoop.com
https://currencyscoop.com/api-documentation
:param base: The base currency you would like to use for your rates
:type base: str
:param to: The currency you would like to use for your rates
:type to: str
:param amount: The amount of base currency
:type amount: str
:param api_key: Account -> Account Details -> API KEY (use as password in external tools)
:type api_key: str
:return: Latest data of base currency
:rtype: pandas.Series
| 109 | 140 |
def currency_convert(
base: str = "USD",
to: str = "CNY",
amount: str = "10000",
api_key: str = "",
) -> pd.Series:
"""
currencies data from currencyscoop.com
https://currencyscoop.com/api-documentation
:param base: The base currency you would like to use for your rates
:type base: str
:param to: The currency you would like to use for your rates
:type to: str
:param amount: The amount of base currency
:type amount: str
:param api_key: Account -> Account Details -> API KEY (use as password in external tools)
:type api_key: str
:return: Latest data of base currency
:rtype: pandas.Series
"""
payload = {
"from": base,
"to": to,
"amount": amount,
"api_key": api_key,
}
url = "https://api.currencyscoop.com/v1/convert"
r = requests.get(url, params=payload)
temp_se = pd.Series(r.json()["response"])
temp_se["timestamp"] = pd.to_datetime(temp_se["timestamp"], unit="s")
return temp_se
|
https://github.com/akfamily/akshare/blob/087025d8d6f799b30ca114013e82c1ad22dc9294/project25/akshare/currency/currency.py#L109-L140
| 25 |
[
0
] | 3.125 |
[
20,
26,
27,
28,
29,
31
] | 18.75 | false | 19.565217 | 32 | 1 | 81.25 | 12 |
def currency_convert(
base: str = "USD",
to: str = "CNY",
amount: str = "10000",
api_key: str = "",
) -> pd.Series:
payload = {
"from": base,
"to": to,
"amount": amount,
"api_key": api_key,
}
url = "https://api.currencyscoop.com/v1/convert"
r = requests.get(url, params=payload)
temp_se = pd.Series(r.json()["response"])
temp_se["timestamp"] = pd.to_datetime(temp_se["timestamp"], unit="s")
return temp_se
| 18,605 |
akfamily/akshare
|
087025d8d6f799b30ca114013e82c1ad22dc9294
|
akshare/bank/bank_cbirc_2020.py
|
bank_fjcf_total_num
|
(item: str = "分局本级") -> int:
|
return int(res.json()["data"]["total"])
|
首页-政务信息-行政处罚-银保监分局本级 总页数
http://www.cbirc.gov.cn/cn/view/pages/ItemList.html?itemPId=923&itemId=4115&itemUrl=ItemListRightList.html&itemName=%E9%93%B6%E4%BF%9D%E7%9B%91%E5%88%86%E5%B1%80%E6%9C%AC%E7%BA%A7&itemsubPId=931
:return: 总页数
:rtype: int
|
首页-政务信息-行政处罚-银保监分局本级 总页数
http://www.cbirc.gov.cn/cn/view/pages/ItemList.html?itemPId=923&itemId=4115&itemUrl=ItemListRightList.html&itemName=%E9%93%B6%E4%BF%9D%E7%9B%91%E5%88%86%E5%B1%80%E6%9C%AC%E7%BA%A7&itemsubPId=931
:return: 总页数
:rtype: int
| 23 | 38 |
def bank_fjcf_total_num(item: str = "分局本级") -> int:
"""
首页-政务信息-行政处罚-银保监分局本级 总页数
http://www.cbirc.gov.cn/cn/view/pages/ItemList.html?itemPId=923&itemId=4115&itemUrl=ItemListRightList.html&itemName=%E9%93%B6%E4%BF%9D%E7%9B%91%E5%88%86%E5%B1%80%E6%9C%AC%E7%BA%A7&itemsubPId=931
:return: 总页数
:rtype: int
"""
cbirc_headers = cbirc_headers_without_cookie_2020.copy()
main_url = "http://www.cbirc.gov.cn/cbircweb/DocInfo/SelectDocByItemIdAndChild"
params = {
"itemId": item_id_list[item],
"pageSize": "18",
"pageIndex": "1",
}
res = requests.get(main_url, params=params, headers=cbirc_headers)
return int(res.json()["data"]["total"])
|
https://github.com/akfamily/akshare/blob/087025d8d6f799b30ca114013e82c1ad22dc9294/project25/akshare/bank/bank_cbirc_2020.py#L23-L38
| 25 |
[
0,
1,
2,
3,
4,
5,
6
] | 43.75 |
[
7,
8,
9,
14,
15
] | 31.25 | false | 15.384615 | 16 | 1 | 68.75 | 4 |
def bank_fjcf_total_num(item: str = "分局本级") -> int:
cbirc_headers = cbirc_headers_without_cookie_2020.copy()
main_url = "http://www.cbirc.gov.cn/cbircweb/DocInfo/SelectDocByItemIdAndChild"
params = {
"itemId": item_id_list[item],
"pageSize": "18",
"pageIndex": "1",
}
res = requests.get(main_url, params=params, headers=cbirc_headers)
return int(res.json()["data"]["total"])
| 18,606 |
akfamily/akshare
|
087025d8d6f799b30ca114013e82c1ad22dc9294
|
akshare/bank/bank_cbirc_2020.py
|
bank_fjcf_total_page
|
(item: str = "分局本级", begin: int = 1) -> int:
|
return total_page
|
获取 首页-政务信息-行政处罚-银保监分局本级 总页数
http://www.cbirc.gov.cn/cn/view/pages/ItemList.html?itemPId=923&itemId=4115&itemUrl=ItemListRightList.html&itemName=%E9%93%B6%E4%BF%9D%E7%9B%91%E5%88%86%E5%B1%80%E6%9C%AC%E7%BA%A7&itemsubPId=931
:return: 总页数
:rtype: int
|
获取 首页-政务信息-行政处罚-银保监分局本级 总页数
http://www.cbirc.gov.cn/cn/view/pages/ItemList.html?itemPId=923&itemId=4115&itemUrl=ItemListRightList.html&itemName=%E9%93%B6%E4%BF%9D%E7%9B%91%E5%88%86%E5%B1%80%E6%9C%AC%E7%BA%A7&itemsubPId=931
:return: 总页数
:rtype: int
| 41 | 58 |
def bank_fjcf_total_page(item: str = "分局本级", begin: int = 1) -> int:
"""
获取 首页-政务信息-行政处罚-银保监分局本级 总页数
http://www.cbirc.gov.cn/cn/view/pages/ItemList.html?itemPId=923&itemId=4115&itemUrl=ItemListRightList.html&itemName=%E9%93%B6%E4%BF%9D%E7%9B%91%E5%88%86%E5%B1%80%E6%9C%AC%E7%BA%A7&itemsubPId=931
:return: 总页数
:rtype: int
"""
cbirc_headers = cbirc_headers_without_cookie_2020.copy()
main_url = "http://www.cbirc.gov.cn/cbircweb/DocInfo/SelectDocByItemIdAndChild"
params = {
"itemId": item_id_list[item],
"pageSize": "18",
"pageIndex": str(begin),
}
res = requests.get(main_url, params=params, headers=cbirc_headers)
if res.json()["data"]["total"] / 18 > int(res.json()["data"]["total"] / 18):
total_page = int(res.json()["data"]["total"] / 18) + 1
return total_page
|
https://github.com/akfamily/akshare/blob/087025d8d6f799b30ca114013e82c1ad22dc9294/project25/akshare/bank/bank_cbirc_2020.py#L41-L58
| 25 |
[
0,
1,
2,
3,
4,
5,
6
] | 38.888889 |
[
7,
8,
9,
14,
15,
16,
17
] | 38.888889 | false | 15.384615 | 18 | 2 | 61.111111 | 4 |
def bank_fjcf_total_page(item: str = "分局本级", begin: int = 1) -> int:
cbirc_headers = cbirc_headers_without_cookie_2020.copy()
main_url = "http://www.cbirc.gov.cn/cbircweb/DocInfo/SelectDocByItemIdAndChild"
params = {
"itemId": item_id_list[item],
"pageSize": "18",
"pageIndex": str(begin),
}
res = requests.get(main_url, params=params, headers=cbirc_headers)
if res.json()["data"]["total"] / 18 > int(res.json()["data"]["total"] / 18):
total_page = int(res.json()["data"]["total"] / 18) + 1
return total_page
| 18,607 |
akfamily/akshare
|
087025d8d6f799b30ca114013e82c1ad22dc9294
|
akshare/bank/bank_cbirc_2020.py
|
bank_fjcf_page_url
|
(page: int = 5, item: str = "分局本级", begin: int = 1) -> pd.D
|
return temp_df[["docId", "docSubtitle", "publishDate", "docFileUrl", "docTitle", "generaltype"]]
|
获取 首页-政务信息-行政处罚-银保监分局本级-每一页的 json 数据
:param page: 需要获取前 page 页的内容, 总页数请通过 bank_fjcf_total_page() 获取
:type page: int
:return: 需要的字段
:rtype: pandas.DataFrame
|
获取 首页-政务信息-行政处罚-银保监分局本级-每一页的 json 数据
:param page: 需要获取前 page 页的内容, 总页数请通过 bank_fjcf_total_page() 获取
:type page: int
:return: 需要的字段
:rtype: pandas.DataFrame
| 61 | 81 |
def bank_fjcf_page_url(page: int = 5, item: str = "分局本级", begin: int = 1) -> pd.DataFrame:
"""
获取 首页-政务信息-行政处罚-银保监分局本级-每一页的 json 数据
:param page: 需要获取前 page 页的内容, 总页数请通过 bank_fjcf_total_page() 获取
:type page: int
:return: 需要的字段
:rtype: pandas.DataFrame
"""
cbirc_headers = cbirc_headers_without_cookie_2020.copy()
main_url = "http://www.cbirc.gov.cn/cbircweb/DocInfo/SelectDocByItemIdAndChild"
temp_df = pd.DataFrame()
for i_page in range(begin, page+begin):
print(i_page)
params = {
"itemId": item_id_list[item],
"pageSize": "18",
"pageIndex": str(i_page),
}
res = requests.get(main_url, params=params, headers=cbirc_headers)
temp_df = pd.concat([temp_df, pd.DataFrame(res.json()["data"]["rows"])])
return temp_df[["docId", "docSubtitle", "publishDate", "docFileUrl", "docTitle", "generaltype"]]
|
https://github.com/akfamily/akshare/blob/087025d8d6f799b30ca114013e82c1ad22dc9294/project25/akshare/bank/bank_cbirc_2020.py#L61-L81
| 25 |
[
0,
1,
2,
3,
4,
5,
6,
7
] | 38.095238 |
[
8,
9,
10,
11,
12,
13,
18,
19,
20
] | 42.857143 | false | 15.384615 | 21 | 2 | 57.142857 | 5 |
def bank_fjcf_page_url(page: int = 5, item: str = "分局本级", begin: int = 1) -> pd.DataFrame:
cbirc_headers = cbirc_headers_without_cookie_2020.copy()
main_url = "http://www.cbirc.gov.cn/cbircweb/DocInfo/SelectDocByItemIdAndChild"
temp_df = pd.DataFrame()
for i_page in range(begin, page+begin):
print(i_page)
params = {
"itemId": item_id_list[item],
"pageSize": "18",
"pageIndex": str(i_page),
}
res = requests.get(main_url, params=params, headers=cbirc_headers)
temp_df = pd.concat([temp_df, pd.DataFrame(res.json()["data"]["rows"])])
return temp_df[["docId", "docSubtitle", "publishDate", "docFileUrl", "docTitle", "generaltype"]]
| 18,608 |
akfamily/akshare
|
087025d8d6f799b30ca114013e82c1ad22dc9294
|
akshare/bank/bank_cbirc_2020.py
|
bank_fjcf_table_detail
|
(page: int = 5, item: str = "分局本级", begin: int = 1) -> pd.D
|
return big_df
|
获取 首页-政务信息-行政处罚-银保监分局本级-XXXX行政处罚信息公开表 数据
:param page: 需要获取前 page 页的内容, 总页数请通过 bank_fjcf_total_page() 获取
:type page: int
:return: 返回所有行政处罚信息公开表的集合, 按第一页到最后一页的顺序排列
:rtype: pandas.DataFrame
|
获取 首页-政务信息-行政处罚-银保监分局本级-XXXX行政处罚信息公开表 数据
:param page: 需要获取前 page 页的内容, 总页数请通过 bank_fjcf_total_page() 获取
:type page: int
:return: 返回所有行政处罚信息公开表的集合, 按第一页到最后一页的顺序排列
:rtype: pandas.DataFrame
| 84 | 142 |
def bank_fjcf_table_detail(page: int = 5, item: str = "分局本级", begin: int = 1) -> pd.DataFrame:
"""
获取 首页-政务信息-行政处罚-银保监分局本级-XXXX行政处罚信息公开表 数据
:param page: 需要获取前 page 页的内容, 总页数请通过 bank_fjcf_total_page() 获取
:type page: int
:return: 返回所有行政处罚信息公开表的集合, 按第一页到最后一页的顺序排列
:rtype: pandas.DataFrame
"""
id_list = bank_fjcf_page_url(page=page, item=item, begin=begin)["docId"]
big_df = pd.DataFrame()
for item in id_list:
print(item)
url = f"http://www.cbirc.gov.cn/cn/static/data/DocInfo/SelectByDocId/data_docId={item}.json"
res = requests.get(url)
# print(res.json()["data"]["docClob"])
try:
table_list = pd.read_html(res.json()["data"]["docClob"])[0]
table_list = table_list.iloc[:, 3:].values.tolist()
# 部分旧表缺少字段,所以填充
if len(table_list) == 7:
table_list.insert(2, pd.NA)
table_list.insert(3, pd.NA)
table_list.insert(4, pd.NA)
elif len(table_list) == 8:
table_list.insert(1, pd.NA)
table_list.insert(2, pd.NA)
elif len(table_list) == 9:
table_list.insert(2, pd.NA)
elif len(table_list) == 11:
table_list = table_list[2:]
table_list.insert(2, pd.NA)
# 部分会变成嵌套列表, 这里还原
table_list = [item[0] if isinstance(
item, list) else item for item in table_list]
table_list.append(str(item))
table_list.append(res.json()["data"]["publishDate"])
table_df = pd.DataFrame(table_list)
table_df.columns = ["内容"]
big_df = pd.concat([big_df, table_df.T], ignore_index=True)
# 解决有些页面缺少字段的问题, 都放到 try 里面
except:
print(f"{item} is not table, it will be skip")
continue
big_df.columns = [
"行政处罚决定书文号",
"姓名",
"单位", # 20200108新增
"单位名称",
"主要负责人姓名",
"主要违法违规事实(案由)",
"行政处罚依据",
"行政处罚决定",
"作出处罚决定的机关名称",
"作出处罚决定的日期",
"处罚ID",
"处罚公布日期",
]
return big_df
|
https://github.com/akfamily/akshare/blob/087025d8d6f799b30ca114013e82c1ad22dc9294/project25/akshare/bank/bank_cbirc_2020.py#L84-L142
| 25 |
[
0,
1,
2,
3,
4,
5,
6,
7
] | 13.559322 |
[
8,
9,
10,
11,
12,
13,
15,
16,
17,
19,
20,
21,
22,
23,
24,
25,
26,
27,
28,
29,
30,
33,
35,
36,
37,
38,
39,
41,
42,
43,
44,
58
] | 54.237288 | false | 15.384615 | 59 | 8 | 45.762712 | 5 |
def bank_fjcf_table_detail(page: int = 5, item: str = "分局本级", begin: int = 1) -> pd.DataFrame:
id_list = bank_fjcf_page_url(page=page, item=item, begin=begin)["docId"]
big_df = pd.DataFrame()
for item in id_list:
print(item)
url = f"http://www.cbirc.gov.cn/cn/static/data/DocInfo/SelectByDocId/data_docId={item}.json"
res = requests.get(url)
# print(res.json()["data"]["docClob"])
try:
table_list = pd.read_html(res.json()["data"]["docClob"])[0]
table_list = table_list.iloc[:, 3:].values.tolist()
# 部分旧表缺少字段,所以填充
if len(table_list) == 7:
table_list.insert(2, pd.NA)
table_list.insert(3, pd.NA)
table_list.insert(4, pd.NA)
elif len(table_list) == 8:
table_list.insert(1, pd.NA)
table_list.insert(2, pd.NA)
elif len(table_list) == 9:
table_list.insert(2, pd.NA)
elif len(table_list) == 11:
table_list = table_list[2:]
table_list.insert(2, pd.NA)
# 部分会变成嵌套列表, 这里还原
table_list = [item[0] if isinstance(
item, list) else item for item in table_list]
table_list.append(str(item))
table_list.append(res.json()["data"]["publishDate"])
table_df = pd.DataFrame(table_list)
table_df.columns = ["内容"]
big_df = pd.concat([big_df, table_df.T], ignore_index=True)
# 解决有些页面缺少字段的问题, 都放到 try 里面
except:
print(f"{item} is not table, it will be skip")
continue
big_df.columns = [
"行政处罚决定书文号",
"姓名",
"单位", # 20200108新增
"单位名称",
"主要负责人姓名",
"主要违法违规事实(案由)",
"行政处罚依据",
"行政处罚决定",
"作出处罚决定的机关名称",
"作出处罚决定的日期",
"处罚ID",
"处罚公布日期",
]
return big_df
| 18,609 |
akfamily/akshare
|
087025d8d6f799b30ca114013e82c1ad22dc9294
|
akshare/pro/data_pro.py
|
pro_api
|
(token='')
|
初始化 pro API,第一次可以通过ak.set_token('your token')来记录自己的token凭证,临时token可以通过本参数传入
|
初始化 pro API,第一次可以通过ak.set_token('your token')来记录自己的token凭证,临时token可以通过本参数传入
| 11 | 21 |
def pro_api(token=''):
"""
初始化 pro API,第一次可以通过ak.set_token('your token')来记录自己的token凭证,临时token可以通过本参数传入
"""
if token == '' or token is None:
token = token_process.get_token()
if token is not None and token != '':
pro = client.DataApi(token)
return pro
else:
raise Exception('api init error.')
|
https://github.com/akfamily/akshare/blob/087025d8d6f799b30ca114013e82c1ad22dc9294/project25/akshare/pro/data_pro.py#L11-L21
| 25 |
[
0,
1,
2,
3
] | 36.363636 |
[
4,
5,
6,
7,
8,
10
] | 54.545455 | false | 31.25 | 11 | 5 | 45.454545 | 1 |
def pro_api(token=''):
if token == '' or token is None:
token = token_process.get_token()
if token is not None and token != '':
pro = client.DataApi(token)
return pro
else:
raise Exception('api init error.')
| 18,610 |
|
akfamily/akshare
|
087025d8d6f799b30ca114013e82c1ad22dc9294
|
akshare/pro/client.py
|
DataApi.__init__
|
(self, token, timeout=10)
|
初始化函数
:param token: API接口TOKEN,用于用户认证
:type token: str
:param timeout: 超时设置
:type timeout: int
|
初始化函数
:param token: API接口TOKEN,用于用户认证
:type token: str
:param timeout: 超时设置
:type timeout: int
| 19 | 28 |
def __init__(self, token, timeout=10):
"""
初始化函数
:param token: API接口TOKEN,用于用户认证
:type token: str
:param timeout: 超时设置
:type timeout: int
"""
self.__token = token
self.__timeout = timeout
|
https://github.com/akfamily/akshare/blob/087025d8d6f799b30ca114013e82c1ad22dc9294/project25/akshare/pro/client.py#L19-L28
| 25 |
[
0,
1,
2,
3,
4,
5,
6,
7
] | 80 |
[
8,
9
] | 20 | false | 32.432432 | 10 | 1 | 80 | 5 |
def __init__(self, token, timeout=10):
self.__token = token
self.__timeout = timeout
| 18,611 |
|
akfamily/akshare
|
087025d8d6f799b30ca114013e82c1ad22dc9294
|
akshare/pro/client.py
|
DataApi.query
|
(self, api_name, fields="", **kwargs)
|
:param api_name: 需要调取的接口
:type api_name: str
:param fields: 想要获取的字段
:type fields: str
:param kwargs: 指定需要输入的参数
:type kwargs: 键值对
:return: 指定的数据
:rtype: dict or pandas.DataFrame
|
:param api_name: 需要调取的接口
:type api_name: str
:param fields: 想要获取的字段
:type fields: str
:param kwargs: 指定需要输入的参数
:type kwargs: 键值对
:return: 指定的数据
:rtype: dict or pandas.DataFrame
| 30 | 65 |
def query(self, api_name, fields="", **kwargs):
"""
:param api_name: 需要调取的接口
:type api_name: str
:param fields: 想要获取的字段
:type fields: str
:param kwargs: 指定需要输入的参数
:type kwargs: 键值对
:return: 指定的数据
:rtype: dict or pandas.DataFrame
"""
headers = {
"X-Token": self.__token,
}
url = parse.urljoin(self.__http_url, "/".join([api_name, *kwargs.values()]))
res = requests.get(url, headers=headers, timeout=self.__timeout)
if res.status_code != 200:
raise Exception("连接异常, 请检查您的Token是否过期和输入的参数是否正确")
data_json = res.json()
if fields == "":
try:
return pd.DataFrame(data_json)
except ValueError as e:
result_df = pd.DataFrame.from_dict(data_json, orient="index", columns=[api_name])
return result_df
else: # 此处增加处理
if api_name == "variety_all_positions":
big_df = pd.DataFrame()
for item in data_json[fields].keys():
temp_df = pd.DataFrame(data_json[fields][item])
temp_df["code"] = item
big_df = big_df.append(temp_df, ignore_index=True)
big_df.reset_index(inplace=True, drop=True)
return big_df
else:
return pd.DataFrame(data_json[fields])
|
https://github.com/akfamily/akshare/blob/087025d8d6f799b30ca114013e82c1ad22dc9294/project25/akshare/pro/client.py#L30-L65
| 25 |
[
0,
1,
2,
3,
4,
5,
6,
7,
8,
9,
10
] | 30.555556 |
[
11,
14,
15,
16,
17,
18,
19,
20,
21,
22,
23,
24,
26,
27,
28,
29,
30,
31,
32,
33,
35
] | 58.333333 | false | 32.432432 | 36 | 6 | 41.666667 | 8 |
def query(self, api_name, fields="", **kwargs):
headers = {
"X-Token": self.__token,
}
url = parse.urljoin(self.__http_url, "/".join([api_name, *kwargs.values()]))
res = requests.get(url, headers=headers, timeout=self.__timeout)
if res.status_code != 200:
raise Exception("连接异常, 请检查您的Token是否过期和输入的参数是否正确")
data_json = res.json()
if fields == "":
try:
return pd.DataFrame(data_json)
except ValueError as e:
result_df = pd.DataFrame.from_dict(data_json, orient="index", columns=[api_name])
return result_df
else: # 此处增加处理
if api_name == "variety_all_positions":
big_df = pd.DataFrame()
for item in data_json[fields].keys():
temp_df = pd.DataFrame(data_json[fields][item])
temp_df["code"] = item
big_df = big_df.append(temp_df, ignore_index=True)
big_df.reset_index(inplace=True, drop=True)
return big_df
else:
return pd.DataFrame(data_json[fields])
| 18,612 |
|
akfamily/akshare
|
087025d8d6f799b30ca114013e82c1ad22dc9294
|
akshare/pro/client.py
|
DataApi.__getattr__
|
(self, name)
|
return partial(self.query, name)
| 67 | 68 |
def __getattr__(self, name):
return partial(self.query, name)
|
https://github.com/akfamily/akshare/blob/087025d8d6f799b30ca114013e82c1ad22dc9294/project25/akshare/pro/client.py#L67-L68
| 25 |
[
0
] | 50 |
[
1
] | 50 | false | 32.432432 | 2 | 1 | 50 | 0 |
def __getattr__(self, name):
return partial(self.query, name)
| 18,613 |
||
akfamily/akshare
|
087025d8d6f799b30ca114013e82c1ad22dc9294
|
akshare/article/risk_rv.py
|
article_oman_rv
|
(symbol: str = "FTSE", index: str = "rk_th2")
|
return temp_df
|
Oxford-Man Institute of Quantitative Finance Realized Library 的数据
:param symbol: str ['AEX', 'AORD', 'BFX', 'BSESN', 'BVLG', 'BVSP', 'DJI', 'FCHI', 'FTMIB', 'FTSE', 'GDAXI', 'GSPTSE', 'HSI', 'IBEX', 'IXIC', 'KS11', 'KSE', 'MXX', 'N225', 'NSEI', 'OMXC20', 'OMXHPI', 'OMXSPI', 'OSEAX', 'RUT', 'SMSI', 'SPX', 'SSEC', 'SSMI', 'STI', 'STOXX50E']
:param index: str 指标 ['medrv', 'rk_twoscale', 'bv', 'rv10', 'rv5', 'rk_th2', 'rv10_ss', 'rsv', 'rv5_ss', 'bv_ss', 'rk_parzen', 'rsv_ss']
:return: pandas.DataFrame
The Oxford-Man Institute's "realised library" contains daily non-parametric measures of how volatility financial assets or indexes were in the past. Each day's volatility measure depends solely on financial data from that day. They are driven by the use of the latest innovations in econometric modelling and theory to design them, while we draw our high frequency data from the Thomson Reuters DataScope Tick History database. Realised measures are not volatility forecasts. However, some researchers use these measures as an input into forecasting models. The aim of this line of research is to make financial markets more transparent by exposing how volatility changes through time.
This Library is used as the basis of some of our own research, which effects its scope, and is made available here to encourage the more widespread exploitation of these methods. It is given 'as is' and solely for informational purposes, please read the disclaimer.
The volatility data can be visually explored. We make the complete up-to-date dataset available for download. Lists of assets covered and realized measures available are also available.
| Symbol | Name | Earliest Available | Latest Available |
|-----------|-------------------------------------------|--------------------|-------------------|
| .AEX | AEX index | January 03, 2000 | November 28, 2019 |
| .AORD | All Ordinaries | January 04, 2000 | November 28, 2019 |
| .BFX | Bell 20 Index | January 03, 2000 | November 28, 2019 |
| .BSESN | S&P BSE Sensex | January 03, 2000 | November 28, 2019 |
| .BVLG | PSI All-Share Index | October 15, 2012 | November 28, 2019 |
| .BVSP | BVSP BOVESPA Index | January 03, 2000 | November 28, 2019 |
| .DJI | Dow Jones Industrial Average | January 03, 2000 | November 27, 2019 |
| .FCHI | CAC 40 | January 03, 2000 | November 28, 2019 |
| .FTMIB | FTSE MIB | June 01, 2009 | November 28, 2019 |
| .FTSE | FTSE 100 | January 04, 2000 | November 28, 2019 |
| .GDAXI | DAX | January 03, 2000 | November 28, 2019 |
| .GSPTSE | S&P/TSX Composite index | May 02, 2002 | November 28, 2019 |
| .HSI | HANG SENG Index | January 03, 2000 | November 28, 2019 |
| .IBEX | IBEX 35 Index | January 03, 2000 | November 28, 2019 |
| .IXIC | Nasdaq 100 | January 03, 2000 | November 27, 2019 |
| .KS11 | Korea Composite Stock Price Index (KOSPI) | January 04, 2000 | November 28, 2019 |
| .KSE | Karachi SE 100 Index | January 03, 2000 | November 28, 2019 |
| .MXX | IPC Mexico | January 03, 2000 | November 28, 2019 |
| .N225 | Nikkei 225 | February 02, 2000 | November 28, 2019 |
| .NSEI | NIFTY 50 | January 03, 2000 | November 28, 2019 |
| .OMXC20 | OMX Copenhagen 20 Index | October 03, 2005 | November 28, 2019 |
| .OMXHPI | OMX Helsinki All Share Index | October 03, 2005 | November 28, 2019 |
| .OMXSPI | OMX Stockholm All Share Index | October 03, 2005 | November 28, 2019 |
| .OSEAX | Oslo Exchange All-share Index | September 03, 2001 | November 28, 2019 |
| .RUT | Russel 2000 | January 03, 2000 | November 27, 2019 |
| .SMSI | Madrid General Index | July 04, 2005 | November 28, 2019 |
| .SPX | S&P 500 Index | January 03, 2000 | November 27, 2019 |
| .SSEC | Shanghai Composite Index | January 04, 2000 | November 28, 2019 |
| .SSMI | Swiss Stock Market Index | January 04, 2000 | November 28, 2019 |
| .STI | Straits Times Index | January 03, 2000 | November 28, 2019 |
| .STOXX50E | EURO STOXX 50 | January 03, 2000 | November 28, 2019 |
|
Oxford-Man Institute of Quantitative Finance Realized Library 的数据
:param symbol: str ['AEX', 'AORD', 'BFX', 'BSESN', 'BVLG', 'BVSP', 'DJI', 'FCHI', 'FTMIB', 'FTSE', 'GDAXI', 'GSPTSE', 'HSI', 'IBEX', 'IXIC', 'KS11', 'KSE', 'MXX', 'N225', 'NSEI', 'OMXC20', 'OMXHPI', 'OMXSPI', 'OSEAX', 'RUT', 'SMSI', 'SPX', 'SSEC', 'SSMI', 'STI', 'STOXX50E']
:param index: str 指标 ['medrv', 'rk_twoscale', 'bv', 'rv10', 'rv5', 'rk_th2', 'rv10_ss', 'rsv', 'rv5_ss', 'bv_ss', 'rk_parzen', 'rsv_ss']
:return: pandas.DataFrame
| 17 | 77 |
def article_oman_rv(symbol: str = "FTSE", index: str = "rk_th2") -> pd.DataFrame:
"""
Oxford-Man Institute of Quantitative Finance Realized Library 的数据
:param symbol: str ['AEX', 'AORD', 'BFX', 'BSESN', 'BVLG', 'BVSP', 'DJI', 'FCHI', 'FTMIB', 'FTSE', 'GDAXI', 'GSPTSE', 'HSI', 'IBEX', 'IXIC', 'KS11', 'KSE', 'MXX', 'N225', 'NSEI', 'OMXC20', 'OMXHPI', 'OMXSPI', 'OSEAX', 'RUT', 'SMSI', 'SPX', 'SSEC', 'SSMI', 'STI', 'STOXX50E']
:param index: str 指标 ['medrv', 'rk_twoscale', 'bv', 'rv10', 'rv5', 'rk_th2', 'rv10_ss', 'rsv', 'rv5_ss', 'bv_ss', 'rk_parzen', 'rsv_ss']
:return: pandas.DataFrame
The Oxford-Man Institute's "realised library" contains daily non-parametric measures of how volatility financial assets or indexes were in the past. Each day's volatility measure depends solely on financial data from that day. They are driven by the use of the latest innovations in econometric modelling and theory to design them, while we draw our high frequency data from the Thomson Reuters DataScope Tick History database. Realised measures are not volatility forecasts. However, some researchers use these measures as an input into forecasting models. The aim of this line of research is to make financial markets more transparent by exposing how volatility changes through time.
This Library is used as the basis of some of our own research, which effects its scope, and is made available here to encourage the more widespread exploitation of these methods. It is given 'as is' and solely for informational purposes, please read the disclaimer.
The volatility data can be visually explored. We make the complete up-to-date dataset available for download. Lists of assets covered and realized measures available are also available.
| Symbol | Name | Earliest Available | Latest Available |
|-----------|-------------------------------------------|--------------------|-------------------|
| .AEX | AEX index | January 03, 2000 | November 28, 2019 |
| .AORD | All Ordinaries | January 04, 2000 | November 28, 2019 |
| .BFX | Bell 20 Index | January 03, 2000 | November 28, 2019 |
| .BSESN | S&P BSE Sensex | January 03, 2000 | November 28, 2019 |
| .BVLG | PSI All-Share Index | October 15, 2012 | November 28, 2019 |
| .BVSP | BVSP BOVESPA Index | January 03, 2000 | November 28, 2019 |
| .DJI | Dow Jones Industrial Average | January 03, 2000 | November 27, 2019 |
| .FCHI | CAC 40 | January 03, 2000 | November 28, 2019 |
| .FTMIB | FTSE MIB | June 01, 2009 | November 28, 2019 |
| .FTSE | FTSE 100 | January 04, 2000 | November 28, 2019 |
| .GDAXI | DAX | January 03, 2000 | November 28, 2019 |
| .GSPTSE | S&P/TSX Composite index | May 02, 2002 | November 28, 2019 |
| .HSI | HANG SENG Index | January 03, 2000 | November 28, 2019 |
| .IBEX | IBEX 35 Index | January 03, 2000 | November 28, 2019 |
| .IXIC | Nasdaq 100 | January 03, 2000 | November 27, 2019 |
| .KS11 | Korea Composite Stock Price Index (KOSPI) | January 04, 2000 | November 28, 2019 |
| .KSE | Karachi SE 100 Index | January 03, 2000 | November 28, 2019 |
| .MXX | IPC Mexico | January 03, 2000 | November 28, 2019 |
| .N225 | Nikkei 225 | February 02, 2000 | November 28, 2019 |
| .NSEI | NIFTY 50 | January 03, 2000 | November 28, 2019 |
| .OMXC20 | OMX Copenhagen 20 Index | October 03, 2005 | November 28, 2019 |
| .OMXHPI | OMX Helsinki All Share Index | October 03, 2005 | November 28, 2019 |
| .OMXSPI | OMX Stockholm All Share Index | October 03, 2005 | November 28, 2019 |
| .OSEAX | Oslo Exchange All-share Index | September 03, 2001 | November 28, 2019 |
| .RUT | Russel 2000 | January 03, 2000 | November 27, 2019 |
| .SMSI | Madrid General Index | July 04, 2005 | November 28, 2019 |
| .SPX | S&P 500 Index | January 03, 2000 | November 27, 2019 |
| .SSEC | Shanghai Composite Index | January 04, 2000 | November 28, 2019 |
| .SSMI | Swiss Stock Market Index | January 04, 2000 | November 28, 2019 |
| .STI | Straits Times Index | January 03, 2000 | November 28, 2019 |
| .STOXX50E | EURO STOXX 50 | January 03, 2000 | November 28, 2019 |
"""
url = "https://realized.oxford-man.ox.ac.uk/theme/js/visualization-data.js?20191111113154"
res = requests.get(url)
soup = BeautifulSoup(res.text, "lxml")
soup_text = soup.find("p").get_text()
data_json = json.loads(soup_text[soup_text.find("{") : soup_text.rfind("};") + 1])
date_list = data_json[f".{symbol}"]["dates"]
title_fore = data_json[f".{symbol}"][index]["name"]
title_last = data_json[f".{symbol}"][index]["measure"]
title_list = title_fore + "-" + title_last
temp_df = pd.DataFrame([date_list, data_json[f".{symbol}"][index]["data"]]).T
temp_df.index = pd.to_datetime(temp_df.iloc[:, 0], unit="ms")
temp_df = temp_df.iloc[:, 1]
temp_df.index.name = "date"
temp_df.name = f"{symbol}-{index}"
return temp_df
|
https://github.com/akfamily/akshare/blob/087025d8d6f799b30ca114013e82c1ad22dc9294/project25/akshare/article/risk_rv.py#L17-L77
| 25 |
[
0,
1,
2,
3,
4,
5,
6,
7,
8,
9,
10,
11,
12,
13,
14,
15,
16,
17,
18,
19,
20,
21,
22,
23,
24,
25,
26,
27,
28,
29,
30,
31,
32,
33,
34,
35,
36,
37,
38,
39,
40,
41,
42,
43,
44,
45
] | 75.409836 |
[
46,
47,
48,
49,
50,
51,
52,
53,
54,
55,
56,
57,
58,
59,
60
] | 24.590164 | false | 16.41791 | 61 | 1 | 75.409836 | 43 |
def article_oman_rv(symbol: str = "FTSE", index: str = "rk_th2") -> pd.DataFrame:
url = "https://realized.oxford-man.ox.ac.uk/theme/js/visualization-data.js?20191111113154"
res = requests.get(url)
soup = BeautifulSoup(res.text, "lxml")
soup_text = soup.find("p").get_text()
data_json = json.loads(soup_text[soup_text.find("{") : soup_text.rfind("};") + 1])
date_list = data_json[f".{symbol}"]["dates"]
title_fore = data_json[f".{symbol}"][index]["name"]
title_last = data_json[f".{symbol}"][index]["measure"]
title_list = title_fore + "-" + title_last
temp_df = pd.DataFrame([date_list, data_json[f".{symbol}"][index]["data"]]).T
temp_df.index = pd.to_datetime(temp_df.iloc[:, 0], unit="ms")
temp_df = temp_df.iloc[:, 1]
temp_df.index.name = "date"
temp_df.name = f"{symbol}-{index}"
return temp_df
| 18,614 |
akfamily/akshare
|
087025d8d6f799b30ca114013e82c1ad22dc9294
|
akshare/article/risk_rv.py
|
article_oman_rv_short
|
(symbol: str = "FTSE")
|
return temp_df
|
Oxford-Man Institute of Quantitative Finance Realized Library 的数据
:param symbol: str FTSE: FTSE 100, GDAXI: DAX, RUT: Russel 2000, SPX: S&P 500 Index, STOXX50E: EURO STOXX 50, SSEC: Shanghai Composite Index, N225: Nikkei 225
:return: pandas.DataFrame
The Oxford-Man Institute's "realised library" contains daily non-parametric measures of how volatility financial assets or indexes were in the past. Each day's volatility measure depends solely on financial data from that day. They are driven by the use of the latest innovations in econometric modelling and theory to design them, while we draw our high frequency data from the Thomson Reuters DataScope Tick History database. Realised measures are not volatility forecasts. However, some researchers use these measures as an input into forecasting models. The aim of this line of research is to make financial markets more transparent by exposing how volatility changes through time.
This Library is used as the basis of some of our own research, which effects its scope, and is made available here to encourage the more widespread exploitation of these methods. It is given 'as is' and solely for informational purposes, please read the disclaimer.
The volatility data can be visually explored. We make the complete up-to-date dataset available for download. Lists of assets covered and realized measures available are also available.
|
Oxford-Man Institute of Quantitative Finance Realized Library 的数据
:param symbol: str FTSE: FTSE 100, GDAXI: DAX, RUT: Russel 2000, SPX: S&P 500 Index, STOXX50E: EURO STOXX 50, SSEC: Shanghai Composite Index, N225: Nikkei 225
:return: pandas.DataFrame
| 80 | 119 |
def article_oman_rv_short(symbol: str = "FTSE") -> pd.DataFrame:
"""
Oxford-Man Institute of Quantitative Finance Realized Library 的数据
:param symbol: str FTSE: FTSE 100, GDAXI: DAX, RUT: Russel 2000, SPX: S&P 500 Index, STOXX50E: EURO STOXX 50, SSEC: Shanghai Composite Index, N225: Nikkei 225
:return: pandas.DataFrame
The Oxford-Man Institute's "realised library" contains daily non-parametric measures of how volatility financial assets or indexes were in the past. Each day's volatility measure depends solely on financial data from that day. They are driven by the use of the latest innovations in econometric modelling and theory to design them, while we draw our high frequency data from the Thomson Reuters DataScope Tick History database. Realised measures are not volatility forecasts. However, some researchers use these measures as an input into forecasting models. The aim of this line of research is to make financial markets more transparent by exposing how volatility changes through time.
This Library is used as the basis of some of our own research, which effects its scope, and is made available here to encourage the more widespread exploitation of these methods. It is given 'as is' and solely for informational purposes, please read the disclaimer.
The volatility data can be visually explored. We make the complete up-to-date dataset available for download. Lists of assets covered and realized measures available are also available.
"""
url = "https://realized.oxford-man.ox.ac.uk/theme/js/front-page-chart.js"
headers = {
"Accept": "*/*",
"Accept-Encoding": "gzip, deflate, br",
"Accept-Language": "zh-CN,zh;q=0.9,en;q=0.8",
"Cache-Control": "no-cache",
"Connection": "keep-alive",
"Host": "realized.oxford-man.ox.ac.uk",
"Pragma": "no-cache",
"Referer": "https://realized.oxford-man.ox.ac.uk/?from=groupmessage&isappinstalled=0",
"Sec-Fetch-Mode": "no-cors",
"Sec-Fetch-Site": "same-origin",
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/78.0.3904.97 Safari/537.36",
}
res = requests.get(url, headers=headers)
soup = BeautifulSoup(res.text, "lxml")
soup_text = soup.find("p").get_text()
data_json = json.loads(soup_text[soup_text.find("{") : soup_text.rfind("}") + 1])
title_fore = data_json[f".{symbol}"]["name"]
title_last = data_json[f".{symbol}"]["measure"]
title_list = title_fore + "-" + title_last
temp_df = pd.DataFrame(data_json[f".{symbol}"]["data"])
temp_df.index = pd.to_datetime(temp_df.iloc[:, 0], unit="ms")
temp_df = temp_df.iloc[:, 1]
temp_df.index.name = "date"
temp_df.name = f"{symbol}"
return temp_df
|
https://github.com/akfamily/akshare/blob/087025d8d6f799b30ca114013e82c1ad22dc9294/project25/akshare/article/risk_rv.py#L80-L119
| 25 |
[
0,
1,
2,
3,
4,
5,
6,
7,
8,
9,
10,
11
] | 30 |
[
12,
13,
27,
28,
29,
30,
31,
32,
33,
34,
35,
36,
37,
38,
39
] | 37.5 | false | 16.41791 | 40 | 1 | 62.5 | 9 |
def article_oman_rv_short(symbol: str = "FTSE") -> pd.DataFrame:
url = "https://realized.oxford-man.ox.ac.uk/theme/js/front-page-chart.js"
headers = {
"Accept": "*/*",
"Accept-Encoding": "gzip, deflate, br",
"Accept-Language": "zh-CN,zh;q=0.9,en;q=0.8",
"Cache-Control": "no-cache",
"Connection": "keep-alive",
"Host": "realized.oxford-man.ox.ac.uk",
"Pragma": "no-cache",
"Referer": "https://realized.oxford-man.ox.ac.uk/?from=groupmessage&isappinstalled=0",
"Sec-Fetch-Mode": "no-cors",
"Sec-Fetch-Site": "same-origin",
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/78.0.3904.97 Safari/537.36",
}
res = requests.get(url, headers=headers)
soup = BeautifulSoup(res.text, "lxml")
soup_text = soup.find("p").get_text()
data_json = json.loads(soup_text[soup_text.find("{") : soup_text.rfind("}") + 1])
title_fore = data_json[f".{symbol}"]["name"]
title_last = data_json[f".{symbol}"]["measure"]
title_list = title_fore + "-" + title_last
temp_df = pd.DataFrame(data_json[f".{symbol}"]["data"])
temp_df.index = pd.to_datetime(temp_df.iloc[:, 0], unit="ms")
temp_df = temp_df.iloc[:, 1]
temp_df.index.name = "date"
temp_df.name = f"{symbol}"
return temp_df
| 18,615 |
akfamily/akshare
|
087025d8d6f799b30ca114013e82c1ad22dc9294
|
akshare/article/risk_rv.py
|
article_rlab_rv
|
(symbol: str = "39693")
|
return temp_df
|
修大成主页-Risk Lab-Realized Volatility
:param symbol: str 股票代码
:return: pandas.DataFrame
1996-01-02 0.000000
1996-01-04 0.000000
1996-01-05 0.000000
1996-01-09 0.000000
1996-01-10 0.000000
...
2019-11-04 0.175107
2019-11-05 0.185112
2019-11-06 0.210373
2019-11-07 0.240808
2019-11-08 0.199549
Name: RV, Length: 5810, dtype: float64
Website
https://dachxiu.chicagobooth.edu/
Objective
We provide up-to-date daily annualized realized volatilities for individual stocks, ETFs, and future contracts, which are estimated from high-frequency data. We are in the process of incorporating equities from global markets.
Data
We collect trades at their highest frequencies available (up to every millisecond for US equities after 2007), and clean them using the prevalent national best bid and offer (NBBO) that are available up to every second. The mid-quotes are calculated based on the NBBOs, so their highest sampling frequencies are also up to every second.
Methodology
We provide quasi-maximum likelihood estimates of volatility (QMLE) based on moving-average models MA(q), using non-zero returns of transaction prices (or mid-quotes if available) sampled up to their highest frequency available, for days with at least 12 observations. We select the best model (q) using Akaike Information Criterion (AIC). For comparison, we report realized volatility (RV) estimates using 5-minute and 15-minute subsampled returns.
References
1. “When Moving-Average Models Meet High-Frequency Data: Uniform Inference on Volatility”, by Rui Da and Dacheng Xiu. 2017.
2. “Quasi-Maximum Likelihood Estimation of Volatility with High Frequency Data”, by Dacheng Xiu. Journal of Econometrics, 159 (2010), 235-250.
3. “How Often to Sample A Continuous-time Process in the Presence of Market Microstructure Noise”, by Yacine Aït-Sahalia, Per Mykland, and Lan Zhang. Review of Financial Studies, 18 (2005), 351–416.
4. “The Distribution of Exchange Rate Volatility”, by Torben Andersen, Tim Bollerslev, Francis X. Diebold, and Paul Labys. Journal of the American Statistical Association, 96 (2001), 42-55.
5. “Econometric Analysis of Realized Volatility and Its Use in Estimating Stochastic Volatility Models”, by Ole E Barndorff‐Nielsen and Neil Shephard. Journal of the Royal Statistical Society: Series B, 64 (2002), 253-280.
|
修大成主页-Risk Lab-Realized Volatility
:param symbol: str 股票代码
:return: pandas.DataFrame
1996-01-02 0.000000
1996-01-04 0.000000
1996-01-05 0.000000
1996-01-09 0.000000
1996-01-10 0.000000
...
2019-11-04 0.175107
2019-11-05 0.185112
2019-11-06 0.210373
2019-11-07 0.240808
2019-11-08 0.199549
Name: RV, Length: 5810, dtype: float64
| 122 | 185 |
def article_rlab_rv(symbol: str = "39693") -> pd.DataFrame:
"""
修大成主页-Risk Lab-Realized Volatility
:param symbol: str 股票代码
:return: pandas.DataFrame
1996-01-02 0.000000
1996-01-04 0.000000
1996-01-05 0.000000
1996-01-09 0.000000
1996-01-10 0.000000
...
2019-11-04 0.175107
2019-11-05 0.185112
2019-11-06 0.210373
2019-11-07 0.240808
2019-11-08 0.199549
Name: RV, Length: 5810, dtype: float64
Website
https://dachxiu.chicagobooth.edu/
Objective
We provide up-to-date daily annualized realized volatilities for individual stocks, ETFs, and future contracts, which are estimated from high-frequency data. We are in the process of incorporating equities from global markets.
Data
We collect trades at their highest frequencies available (up to every millisecond for US equities after 2007), and clean them using the prevalent national best bid and offer (NBBO) that are available up to every second. The mid-quotes are calculated based on the NBBOs, so their highest sampling frequencies are also up to every second.
Methodology
We provide quasi-maximum likelihood estimates of volatility (QMLE) based on moving-average models MA(q), using non-zero returns of transaction prices (or mid-quotes if available) sampled up to their highest frequency available, for days with at least 12 observations. We select the best model (q) using Akaike Information Criterion (AIC). For comparison, we report realized volatility (RV) estimates using 5-minute and 15-minute subsampled returns.
References
1. “When Moving-Average Models Meet High-Frequency Data: Uniform Inference on Volatility”, by Rui Da and Dacheng Xiu. 2017.
2. “Quasi-Maximum Likelihood Estimation of Volatility with High Frequency Data”, by Dacheng Xiu. Journal of Econometrics, 159 (2010), 235-250.
3. “How Often to Sample A Continuous-time Process in the Presence of Market Microstructure Noise”, by Yacine Aït-Sahalia, Per Mykland, and Lan Zhang. Review of Financial Studies, 18 (2005), 351–416.
4. “The Distribution of Exchange Rate Volatility”, by Torben Andersen, Tim Bollerslev, Francis X. Diebold, and Paul Labys. Journal of the American Statistical Association, 96 (2001), 42-55.
5. “Econometric Analysis of Realized Volatility and Its Use in Estimating Stochastic Volatility Models”, by Ole E Barndorff‐Nielsen and Neil Shephard. Journal of the Royal Statistical Society: Series B, 64 (2002), 253-280.
"""
print("由于服务器在国外, 请稍后, 如果访问失败, 请使用代理工具")
url = "https://dachxiu.chicagobooth.edu/data.php"
payload = {"ticker": symbol}
res = requests.get(url, params=payload, verify=False)
soup = BeautifulSoup(res.text, "lxml")
title_fore = (
pd.DataFrame(soup.find("p").get_text().split(symbol)).iloc[0, 0].strip()
)
title_list = (
pd.DataFrame(soup.find("p").get_text().split(symbol))
.iloc[1, 0]
.strip()
.split("\n")
)
title_list.insert(0, title_fore)
temp_df = pd.DataFrame(soup.find("p").get_text().split(symbol)).iloc[2:, :]
temp_df = temp_df.iloc[:, 0].str.split(" ", expand=True)
temp_df = temp_df.iloc[:, 1:]
temp_df.iloc[:, -1] = temp_df.iloc[:, -1].str.replace(r"\n", "")
temp_df.reset_index(inplace=True)
temp_df.index = pd.to_datetime(temp_df.iloc[:, 1], format="%Y%m%d", errors="coerce")
temp_df = temp_df.iloc[:, 1:]
data_se = temp_df.iloc[:, 1]
data_se.name = "RV"
temp_df = data_se.astype("float", errors="ignore")
temp_df.index.name = "date"
return temp_df
|
https://github.com/akfamily/akshare/blob/087025d8d6f799b30ca114013e82c1ad22dc9294/project25/akshare/article/risk_rv.py#L122-L185
| 25 |
[
0,
1,
2,
3,
4,
5,
6,
7,
8,
9,
10,
11,
12,
13,
14,
15,
16,
17,
18,
19,
20,
21,
22,
23,
24,
25,
26,
27,
28,
29,
30,
31,
32,
33,
34,
35,
36
] | 57.8125 |
[
37,
38,
39,
40,
41,
42,
45,
51,
52,
53,
54,
55,
56,
57,
58,
59,
60,
61,
62,
63
] | 31.25 | false | 16.41791 | 64 | 1 | 68.75 | 34 |
def article_rlab_rv(symbol: str = "39693") -> pd.DataFrame:
print("由于服务器在国外, 请稍后, 如果访问失败, 请使用代理工具")
url = "https://dachxiu.chicagobooth.edu/data.php"
payload = {"ticker": symbol}
res = requests.get(url, params=payload, verify=False)
soup = BeautifulSoup(res.text, "lxml")
title_fore = (
pd.DataFrame(soup.find("p").get_text().split(symbol)).iloc[0, 0].strip()
)
title_list = (
pd.DataFrame(soup.find("p").get_text().split(symbol))
.iloc[1, 0]
.strip()
.split("\n")
)
title_list.insert(0, title_fore)
temp_df = pd.DataFrame(soup.find("p").get_text().split(symbol)).iloc[2:, :]
temp_df = temp_df.iloc[:, 0].str.split(" ", expand=True)
temp_df = temp_df.iloc[:, 1:]
temp_df.iloc[:, -1] = temp_df.iloc[:, -1].str.replace(r"\n", "")
temp_df.reset_index(inplace=True)
temp_df.index = pd.to_datetime(temp_df.iloc[:, 1], format="%Y%m%d", errors="coerce")
temp_df = temp_df.iloc[:, 1:]
data_se = temp_df.iloc[:, 1]
data_se.name = "RV"
temp_df = data_se.astype("float", errors="ignore")
temp_df.index.name = "date"
return temp_df
| 18,616 |
akfamily/akshare
|
087025d8d6f799b30ca114013e82c1ad22dc9294
|
akshare/article/epu_index.py
|
article_epu_index
|
(index: str = "China")
|
return epu_df
|
经济政策不确定性指数
http://www.policyuncertainty.com/index.html
:param index: 指定的国家名称, e.g. “China”
:type index: str
:return: 指定 index 的数据
:rtype: pandas.DataFrame
|
经济政策不确定性指数
http://www.policyuncertainty.com/index.html
:param index: 指定的国家名称, e.g. “China”
:type index: str
:return: 指定 index 的数据
:rtype: pandas.DataFrame
| 11 | 51 |
def article_epu_index(index: str = "China") -> pd.DataFrame:
"""
经济政策不确定性指数
http://www.policyuncertainty.com/index.html
:param index: 指定的国家名称, e.g. “China”
:type index: str
:return: 指定 index 的数据
:rtype: pandas.DataFrame
"""
if index == "China New":
index = "China"
if index == "USA":
index = "US"
if index == "Hong Kong":
index = "HK"
epu_df = pd.read_excel(
f"http://www.policyuncertainty.com/media/{index}_EPU_Data_Annotated.xlsx",
engine="openpyxl"
)
return epu_df
if index in ["Germany", "France", "Italy"]: # 欧洲
index = "Europe"
if index == "South Korea":
index = "Korea"
if index == "Spain New":
index = "Spain"
if index in ["Ireland", "Chile", "Colombia", "Netherlands", "Singapore", "Sweden"]:
epu_df = pd.read_excel(
f"http://www.policyuncertainty.com/media/{index}_Policy_Uncertainty_Data.xlsx",
engine="openpyxl"
)
return epu_df
if index == "Greece":
epu_df = pd.read_excel(
f"http://www.policyuncertainty.com/media/FKT_{index}_Policy_Uncertainty_Data.xlsx",
engine="openpyxl"
)
return epu_df
url = f"http://www.policyuncertainty.com/media/{index}_Policy_Uncertainty_Data.csv"
epu_df = pd.read_csv(url)
return epu_df
|
https://github.com/akfamily/akshare/blob/087025d8d6f799b30ca114013e82c1ad22dc9294/project25/akshare/article/epu_index.py#L11-L51
| 25 |
[
0,
1,
2,
3,
4,
5,
6,
7,
8
] | 21.95122 |
[
9,
10,
11,
12,
13,
14,
15,
19,
20,
21,
22,
23,
24,
25,
26,
27,
31,
32,
33,
37,
38,
39,
40
] | 56.097561 | false | 13.793103 | 41 | 9 | 43.902439 | 6 |
def article_epu_index(index: str = "China") -> pd.DataFrame:
if index == "China New":
index = "China"
if index == "USA":
index = "US"
if index == "Hong Kong":
index = "HK"
epu_df = pd.read_excel(
f"http://www.policyuncertainty.com/media/{index}_EPU_Data_Annotated.xlsx",
engine="openpyxl"
)
return epu_df
if index in ["Germany", "France", "Italy"]: # 欧洲
index = "Europe"
if index == "South Korea":
index = "Korea"
if index == "Spain New":
index = "Spain"
if index in ["Ireland", "Chile", "Colombia", "Netherlands", "Singapore", "Sweden"]:
epu_df = pd.read_excel(
f"http://www.policyuncertainty.com/media/{index}_Policy_Uncertainty_Data.xlsx",
engine="openpyxl"
)
return epu_df
if index == "Greece":
epu_df = pd.read_excel(
f"http://www.policyuncertainty.com/media/FKT_{index}_Policy_Uncertainty_Data.xlsx",
engine="openpyxl"
)
return epu_df
url = f"http://www.policyuncertainty.com/media/{index}_Policy_Uncertainty_Data.csv"
epu_df = pd.read_csv(url)
return epu_df
| 18,617 |
akfamily/akshare
|
087025d8d6f799b30ca114013e82c1ad22dc9294
|
akshare/article/fred_md.py
|
fred_md
|
(date: str = "2020-01")
|
return temp_df
|
The accompanying paper shows that factors extracted from the FRED-MD dataset share the same predictive content as those based on the various vintages of the so-called Stock-Watson data. In addition, it suggests that diffusion indexes constructed as the partial sum of the factor estimates can potentially be useful for the study of business cycle chronology.
:param date: e.g., "2020-03"; from "2015-01" to now
:type date: str
:return: Monthly Data
:rtype: pandas.DataFrame
|
The accompanying paper shows that factors extracted from the FRED-MD dataset share the same predictive content as those based on the various vintages of the so-called Stock-Watson data. In addition, it suggests that diffusion indexes constructed as the partial sum of the factor estimates can potentially be useful for the study of business cycle chronology.
:param date: e.g., "2020-03"; from "2015-01" to now
:type date: str
:return: Monthly Data
:rtype: pandas.DataFrame
| 12 | 22 |
def fred_md(date: str = "2020-01") -> pd.DataFrame:
"""
The accompanying paper shows that factors extracted from the FRED-MD dataset share the same predictive content as those based on the various vintages of the so-called Stock-Watson data. In addition, it suggests that diffusion indexes constructed as the partial sum of the factor estimates can potentially be useful for the study of business cycle chronology.
:param date: e.g., "2020-03"; from "2015-01" to now
:type date: str
:return: Monthly Data
:rtype: pandas.DataFrame
"""
url = f"https://s3.amazonaws.com/files.fred.stlouisfed.org/fred-md/monthly/{date}.csv"
temp_df = pd.read_csv(url)
return temp_df
|
https://github.com/akfamily/akshare/blob/087025d8d6f799b30ca114013e82c1ad22dc9294/project25/akshare/article/fred_md.py#L12-L22
| 25 |
[
0,
1,
2,
3,
4,
5,
6,
7
] | 72.727273 |
[
8,
9,
10
] | 27.272727 | false | 33.333333 | 11 | 1 | 72.727273 | 5 |
def fred_md(date: str = "2020-01") -> pd.DataFrame:
url = f"https://s3.amazonaws.com/files.fred.stlouisfed.org/fred-md/monthly/{date}.csv"
temp_df = pd.read_csv(url)
return temp_df
| 18,618 |
akfamily/akshare
|
087025d8d6f799b30ca114013e82c1ad22dc9294
|
akshare/article/fred_md.py
|
fred_qd
|
(date: str = "2020-01")
|
return temp_df
|
FRED-QD is a quarterly frequency companion to FRED-MD. It is designed to emulate the dataset used in "Disentangling the Channels of the 2007-2009 Recession" by Stock and Watson (2012, NBER WP No. 18094) but also contains several additional series. Comments or suggestions are welcome.
:param date: e.g., "2020-03"; from "2015-01" to now
:type date: str
:return: Quarterly Data
:rtype: pandas.DataFrame
|
FRED-QD is a quarterly frequency companion to FRED-MD. It is designed to emulate the dataset used in "Disentangling the Channels of the 2007-2009 Recession" by Stock and Watson (2012, NBER WP No. 18094) but also contains several additional series. Comments or suggestions are welcome.
:param date: e.g., "2020-03"; from "2015-01" to now
:type date: str
:return: Quarterly Data
:rtype: pandas.DataFrame
| 25 | 35 |
def fred_qd(date: str = "2020-01") -> pd.DataFrame:
"""
FRED-QD is a quarterly frequency companion to FRED-MD. It is designed to emulate the dataset used in "Disentangling the Channels of the 2007-2009 Recession" by Stock and Watson (2012, NBER WP No. 18094) but also contains several additional series. Comments or suggestions are welcome.
:param date: e.g., "2020-03"; from "2015-01" to now
:type date: str
:return: Quarterly Data
:rtype: pandas.DataFrame
"""
url = f"https://s3.amazonaws.com/files.fred.stlouisfed.org/fred-md/quarterly/{date}.csv"
temp_df = pd.read_csv(url)
return temp_df
|
https://github.com/akfamily/akshare/blob/087025d8d6f799b30ca114013e82c1ad22dc9294/project25/akshare/article/fred_md.py#L25-L35
| 25 |
[
0,
1,
2,
3,
4,
5,
6,
7
] | 72.727273 |
[
8,
9,
10
] | 27.272727 | false | 33.333333 | 11 | 1 | 72.727273 | 5 |
def fred_qd(date: str = "2020-01") -> pd.DataFrame:
url = f"https://s3.amazonaws.com/files.fred.stlouisfed.org/fred-md/quarterly/{date}.csv"
temp_df = pd.read_csv(url)
return temp_df
| 18,619 |
akfamily/akshare
|
087025d8d6f799b30ca114013e82c1ad22dc9294
|
akshare/article/ff_factor.py
|
article_ff_crr
|
()
|
return all_df
|
FF多因子模型
:return: FF多因子模型单一表格
:rtype: pandas.DataFrame
|
FF多因子模型
:return: FF多因子模型单一表格
:rtype: pandas.DataFrame
| 14 | 145 |
def article_ff_crr() -> pd.DataFrame:
"""
FF多因子模型
:return: FF多因子模型单一表格
:rtype: pandas.DataFrame
"""
res = requests.get(ff_home_url)
# first table
list_index = (
pd.read_html(res.text, header=0, index_col=0)[4].iloc[2, :].index.tolist()
)
list_0 = [
item
for item in pd.read_html(res.text, header=0, index_col=0)[4]
.iloc[0, :][0]
.split(" ")
if item != ""
]
list_1 = [
item
for item in pd.read_html(res.text, header=0, index_col=0)[4]
.iloc[0, :][1]
.split(" ")
if item != ""
]
list_2 = [
item
for item in pd.read_html(res.text, header=0, index_col=0)[4]
.iloc[0, :][2]
.split(" ")
if item != ""
]
list_0.insert(0, "-")
list_1.insert(0, "-")
list_2.insert(0, "-")
temp_columns = (
pd.read_html(res.text, header=0)[4]
.iloc[:, 0]
.str.split(" ", expand=True)
.T[0]
.dropna()
.tolist()
)
table_one = pd.DataFrame(
[list_0, list_1, list_2], index=list_index, columns=temp_columns
).T
# second table
list_index = (
pd.read_html(res.text, header=0, index_col=0)[4].iloc[1, :].index.tolist()
)
list_0 = [
item
for item in pd.read_html(res.text, header=0, index_col=0)[4]
.iloc[1, :][0]
.split(" ")
if item != ""
]
list_1 = [
item
for item in pd.read_html(res.text, header=0, index_col=0)[4]
.iloc[1, :][1]
.split(" ")
if item != ""
]
list_2 = [
item
for item in pd.read_html(res.text, header=0, index_col=0)[4]
.iloc[1, :][2]
.split(" ")
if item != ""
]
list_0.insert(0, "-")
list_1.insert(0, "-")
list_2.insert(0, "-")
temp_columns = (
pd.read_html(res.text, header=0)[4]
.iloc[:, 0]
.str.split(" ", expand=True)
.T[1]
.dropna()
.tolist()
)
table_two = pd.DataFrame(
[list_0, list_1, list_2], index=list_index, columns=temp_columns
).T
# third table
df = pd.read_html(res.text, header=0, index_col=0)[4].iloc[2, :]
name_list = (
pd.read_html(res.text, header=0)[4]
.iloc[:, 0]
.str.split(r" ", expand=True)
.iloc[2, :]
.tolist()
)
value_list_0 = df[0].split(" ")
value_list_0.insert(0, "-")
value_list_0.insert(1, "-")
value_list_0.insert(8, "-")
value_list_0.insert(15, "-")
value_list_1 = df[1].split(" ")
value_list_1.insert(0, "-")
value_list_1.insert(1, "-")
value_list_1.insert(8, "-")
value_list_1.insert(15, "-")
value_list_2 = df[2].split(" ")
value_list_2.insert(0, "-")
value_list_2.insert(1, "-")
value_list_2.insert(8, "-")
value_list_2.insert(15, "-")
name_list.remove("Small Growth Big Value")
name_list.insert(5, "Small Growth")
name_list.insert(6, "Big Value")
temp_list = [item for item in name_list if "Portfolios" not in item]
temp_list.insert(0, "Fama/French Research Portfolios")
temp_list.insert(1, "Size and Book-to-Market Portfolios")
temp_list.insert(8, "Size and Operating Profitability Portfolios")
temp_list.insert(15, "Size and Investment Portfolios")
temp_df = pd.DataFrame([temp_list, value_list_0, value_list_1, value_list_2]).T
temp_df.index = temp_df.iloc[:, 0]
temp_df = temp_df.iloc[:, 1:]
# concat
all_df = pd.DataFrame()
all_df = all_df.append(table_one)
all_df = all_df.append(table_two)
temp_df.columns = table_two.columns
all_df = all_df.append(temp_df)
return all_df
|
https://github.com/akfamily/akshare/blob/087025d8d6f799b30ca114013e82c1ad22dc9294/project25/akshare/article/ff_factor.py#L14-L145
| 25 |
[
0,
1,
2,
3,
4,
5
] | 4.545455 |
[
6,
8,
11,
18,
25,
32,
33,
34,
35,
43,
48,
51,
58,
65,
72,
73,
74,
75,
83,
88,
89,
96,
97,
98,
99,
100,
102,
103,
104,
105,
106,
108,
109,
110,
111,
112,
114,
115,
116,
117,
118,
119,
120,
121,
122,
123,
124,
126,
127,
128,
129,
130,
131
] | 40.151515 | false | 9.836066 | 132 | 8 | 59.848485 | 3 |
def article_ff_crr() -> pd.DataFrame:
res = requests.get(ff_home_url)
# first table
list_index = (
pd.read_html(res.text, header=0, index_col=0)[4].iloc[2, :].index.tolist()
)
list_0 = [
item
for item in pd.read_html(res.text, header=0, index_col=0)[4]
.iloc[0, :][0]
.split(" ")
if item != ""
]
list_1 = [
item
for item in pd.read_html(res.text, header=0, index_col=0)[4]
.iloc[0, :][1]
.split(" ")
if item != ""
]
list_2 = [
item
for item in pd.read_html(res.text, header=0, index_col=0)[4]
.iloc[0, :][2]
.split(" ")
if item != ""
]
list_0.insert(0, "-")
list_1.insert(0, "-")
list_2.insert(0, "-")
temp_columns = (
pd.read_html(res.text, header=0)[4]
.iloc[:, 0]
.str.split(" ", expand=True)
.T[0]
.dropna()
.tolist()
)
table_one = pd.DataFrame(
[list_0, list_1, list_2], index=list_index, columns=temp_columns
).T
# second table
list_index = (
pd.read_html(res.text, header=0, index_col=0)[4].iloc[1, :].index.tolist()
)
list_0 = [
item
for item in pd.read_html(res.text, header=0, index_col=0)[4]
.iloc[1, :][0]
.split(" ")
if item != ""
]
list_1 = [
item
for item in pd.read_html(res.text, header=0, index_col=0)[4]
.iloc[1, :][1]
.split(" ")
if item != ""
]
list_2 = [
item
for item in pd.read_html(res.text, header=0, index_col=0)[4]
.iloc[1, :][2]
.split(" ")
if item != ""
]
list_0.insert(0, "-")
list_1.insert(0, "-")
list_2.insert(0, "-")
temp_columns = (
pd.read_html(res.text, header=0)[4]
.iloc[:, 0]
.str.split(" ", expand=True)
.T[1]
.dropna()
.tolist()
)
table_two = pd.DataFrame(
[list_0, list_1, list_2], index=list_index, columns=temp_columns
).T
# third table
df = pd.read_html(res.text, header=0, index_col=0)[4].iloc[2, :]
name_list = (
pd.read_html(res.text, header=0)[4]
.iloc[:, 0]
.str.split(r" ", expand=True)
.iloc[2, :]
.tolist()
)
value_list_0 = df[0].split(" ")
value_list_0.insert(0, "-")
value_list_0.insert(1, "-")
value_list_0.insert(8, "-")
value_list_0.insert(15, "-")
value_list_1 = df[1].split(" ")
value_list_1.insert(0, "-")
value_list_1.insert(1, "-")
value_list_1.insert(8, "-")
value_list_1.insert(15, "-")
value_list_2 = df[2].split(" ")
value_list_2.insert(0, "-")
value_list_2.insert(1, "-")
value_list_2.insert(8, "-")
value_list_2.insert(15, "-")
name_list.remove("Small Growth Big Value")
name_list.insert(5, "Small Growth")
name_list.insert(6, "Big Value")
temp_list = [item for item in name_list if "Portfolios" not in item]
temp_list.insert(0, "Fama/French Research Portfolios")
temp_list.insert(1, "Size and Book-to-Market Portfolios")
temp_list.insert(8, "Size and Operating Profitability Portfolios")
temp_list.insert(15, "Size and Investment Portfolios")
temp_df = pd.DataFrame([temp_list, value_list_0, value_list_1, value_list_2]).T
temp_df.index = temp_df.iloc[:, 0]
temp_df = temp_df.iloc[:, 1:]
# concat
all_df = pd.DataFrame()
all_df = all_df.append(table_one)
all_df = all_df.append(table_two)
temp_df.columns = table_two.columns
all_df = all_df.append(temp_df)
return all_df
| 18,620 |
akfamily/akshare
|
087025d8d6f799b30ca114013e82c1ad22dc9294
|
akshare/stock_fundamental/stock_ipo_declare.py
|
stock_ipo_declare
|
()
|
return temp_df
|
东方财富网-数据中心-新股申购-首发申报信息-首发申报企业信息
https://data.eastmoney.com/xg/xg/sbqy.html
:return: 首发申报企业信息
:rtype: pandas.DataFrame
|
东方财富网-数据中心-新股申购-首发申报信息-首发申报企业信息
https://data.eastmoney.com/xg/xg/sbqy.html
:return: 首发申报企业信息
:rtype: pandas.DataFrame
| 14 | 69 |
def stock_ipo_declare() -> pd.DataFrame:
"""
东方财富网-数据中心-新股申购-首发申报信息-首发申报企业信息
https://data.eastmoney.com/xg/xg/sbqy.html
:return: 首发申报企业信息
:rtype: pandas.DataFrame
"""
url = "https://datainterface.eastmoney.com/EM_DataCenter/JS.aspx"
params = {
"st": "1",
"sr": "-1",
"ps": "500",
"p": "1",
"type": "NS",
"sty": "NSFR",
"js": "({data:[(x)],pages:(pc)})",
"mkt": "1",
"fd": "2021-04-02",
}
r = requests.get(url, params=params)
data_text = r.text
data_json = demjson.decode(data_text[1:-1])
temp_df = pd.DataFrame([item.split(",") for item in data_json["data"]])
temp_df.reset_index(inplace=True)
temp_df["index"] = temp_df.index + 1
temp_df.columns = [
"序号",
"会计师事务所",
"_",
"保荐机构",
"_",
"律师事务所",
"_",
"_",
"拟上市地",
"_",
"_",
"备注",
"申报企业",
"_",
"_",
"_",
"_",
]
temp_df = temp_df[
[
"序号",
"申报企业",
"拟上市地",
"保荐机构",
"会计师事务所",
"律师事务所",
"备注",
]
]
return temp_df
|
https://github.com/akfamily/akshare/blob/087025d8d6f799b30ca114013e82c1ad22dc9294/project25/akshare/stock_fundamental/stock_ipo_declare.py#L14-L69
| 25 |
[
0,
1,
2,
3,
4,
5,
6
] | 12.5 |
[
7,
8,
19,
20,
21,
22,
23,
24,
25,
44,
55
] | 19.642857 | false | 31.578947 | 56 | 2 | 80.357143 | 4 |
def stock_ipo_declare() -> pd.DataFrame:
url = "https://datainterface.eastmoney.com/EM_DataCenter/JS.aspx"
params = {
"st": "1",
"sr": "-1",
"ps": "500",
"p": "1",
"type": "NS",
"sty": "NSFR",
"js": "({data:[(x)],pages:(pc)})",
"mkt": "1",
"fd": "2021-04-02",
}
r = requests.get(url, params=params)
data_text = r.text
data_json = demjson.decode(data_text[1:-1])
temp_df = pd.DataFrame([item.split(",") for item in data_json["data"]])
temp_df.reset_index(inplace=True)
temp_df["index"] = temp_df.index + 1
temp_df.columns = [
"序号",
"会计师事务所",
"_",
"保荐机构",
"_",
"律师事务所",
"_",
"_",
"拟上市地",
"_",
"_",
"备注",
"申报企业",
"_",
"_",
"_",
"_",
]
temp_df = temp_df[
[
"序号",
"申报企业",
"拟上市地",
"保荐机构",
"会计师事务所",
"律师事务所",
"备注",
]
]
return temp_df
| 18,621 |
akfamily/akshare
|
087025d8d6f799b30ca114013e82c1ad22dc9294
|
akshare/stock_fundamental/stock_register.py
|
stock_register_kcb
|
()
|
return big_df
|
东方财富网-数据中心-新股数据-注册制审核-科创板
http://data.eastmoney.com/kcb/?type=nsb
:return: 科创板注册制审核结果
:rtype: pandas.DataFrame
|
东方财富网-数据中心-新股数据-注册制审核-科创板
http://data.eastmoney.com/kcb/?type=nsb
:return: 科创板注册制审核结果
:rtype: pandas.DataFrame
| 12 | 93 |
def stock_register_kcb() -> pd.DataFrame:
"""
东方财富网-数据中心-新股数据-注册制审核-科创板
http://data.eastmoney.com/kcb/?type=nsb
:return: 科创板注册制审核结果
:rtype: pandas.DataFrame
"""
url = "https://datacenter.eastmoney.com/securities/api/data/get"
params = {
'st': 'UPDATE_DATE',
'sr': '-1',
'ps': '5000',
'p': '1',
'type': 'RPT_REGISTERED_INFO',
'sty': 'ORG_CODE,ORG_CODE_OLD,ISSUER_NAME,CHECK_STATUS,CHECK_STATUS_CODE,REG_ADDRESS,CSRC_INDUSTRY,RECOMMEND_ORG,LAW_FIRM,ACCOUNT_FIRM,UPDATE_DATE,ACCEPT_DATE,TOLIST_MARKET,SECURITY_CODE',
'token': '894050c76af8597a853f5b408b759f5d',
'client': 'WEB',
'filter': '(TOLIST_MARKET="科创板")',
}
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/89.0.4389.114 Safari/537.36'
}
r = requests.get(url, params=params, headers=headers)
data_json = r.json()
page_num = data_json['result']['pages']
big_df = pd.DataFrame()
for page in range(1, page_num+1):
params = {
'st': 'UPDATE_DATE',
'sr': '-1',
'ps': '5000',
'p': page,
'type': 'RPT_REGISTERED_INFO',
'sty': 'ORG_CODE,ORG_CODE_OLD,ISSUER_NAME,CHECK_STATUS,CHECK_STATUS_CODE,REG_ADDRESS,CSRC_INDUSTRY,RECOMMEND_ORG,LAW_FIRM,ACCOUNT_FIRM,UPDATE_DATE,ACCEPT_DATE,TOLIST_MARKET,SECURITY_CODE',
'token': '894050c76af8597a853f5b408b759f5d',
'client': 'WEB',
'filter': '(TOLIST_MARKET="科创板")',
}
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/89.0.4389.114 Safari/537.36'
}
r = requests.get(url, params=params, headers=headers)
data_json = r.json()
temp_df = pd.DataFrame(data_json['result']["data"])
big_df = big_df.append(temp_df, ignore_index=True)
big_df.reset_index(inplace=True)
big_df['index'] = range(1, len(big_df) + 1)
big_df.columns = [
"序号",
"_",
"_",
"发行人全称",
"审核状态",
"_",
"注册地",
"证监会行业",
"保荐机构",
"律师事务所",
"会计师事务所",
"更新日期",
"受理日期",
"拟上市地点",
"_",
]
big_df = big_df[
[
"序号",
"发行人全称",
"审核状态",
"注册地",
"证监会行业",
"保荐机构",
"律师事务所",
"会计师事务所",
"更新日期",
"受理日期",
"拟上市地点",
]
]
big_df['更新日期'] = pd.to_datetime(big_df['更新日期']).dt.date
big_df['受理日期'] = pd.to_datetime(big_df['受理日期']).dt.date
return big_df
|
https://github.com/akfamily/akshare/blob/087025d8d6f799b30ca114013e82c1ad22dc9294/project25/akshare/stock_fundamental/stock_register.py#L12-L93
| 25 |
[
0,
1,
2,
3,
4,
5,
6
] | 8.536585 |
[
7,
8,
19,
22,
23,
24,
25,
26,
27,
38,
41,
42,
43,
44,
45,
46,
47,
64,
79,
80,
81
] | 25.609756 | false | 9.459459 | 82 | 2 | 74.390244 | 4 |
def stock_register_kcb() -> pd.DataFrame:
url = "https://datacenter.eastmoney.com/securities/api/data/get"
params = {
'st': 'UPDATE_DATE',
'sr': '-1',
'ps': '5000',
'p': '1',
'type': 'RPT_REGISTERED_INFO',
'sty': 'ORG_CODE,ORG_CODE_OLD,ISSUER_NAME,CHECK_STATUS,CHECK_STATUS_CODE,REG_ADDRESS,CSRC_INDUSTRY,RECOMMEND_ORG,LAW_FIRM,ACCOUNT_FIRM,UPDATE_DATE,ACCEPT_DATE,TOLIST_MARKET,SECURITY_CODE',
'token': '894050c76af8597a853f5b408b759f5d',
'client': 'WEB',
'filter': '(TOLIST_MARKET="科创板")',
}
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/89.0.4389.114 Safari/537.36'
}
r = requests.get(url, params=params, headers=headers)
data_json = r.json()
page_num = data_json['result']['pages']
big_df = pd.DataFrame()
for page in range(1, page_num+1):
params = {
'st': 'UPDATE_DATE',
'sr': '-1',
'ps': '5000',
'p': page,
'type': 'RPT_REGISTERED_INFO',
'sty': 'ORG_CODE,ORG_CODE_OLD,ISSUER_NAME,CHECK_STATUS,CHECK_STATUS_CODE,REG_ADDRESS,CSRC_INDUSTRY,RECOMMEND_ORG,LAW_FIRM,ACCOUNT_FIRM,UPDATE_DATE,ACCEPT_DATE,TOLIST_MARKET,SECURITY_CODE',
'token': '894050c76af8597a853f5b408b759f5d',
'client': 'WEB',
'filter': '(TOLIST_MARKET="科创板")',
}
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/89.0.4389.114 Safari/537.36'
}
r = requests.get(url, params=params, headers=headers)
data_json = r.json()
temp_df = pd.DataFrame(data_json['result']["data"])
big_df = big_df.append(temp_df, ignore_index=True)
big_df.reset_index(inplace=True)
big_df['index'] = range(1, len(big_df) + 1)
big_df.columns = [
"序号",
"_",
"_",
"发行人全称",
"审核状态",
"_",
"注册地",
"证监会行业",
"保荐机构",
"律师事务所",
"会计师事务所",
"更新日期",
"受理日期",
"拟上市地点",
"_",
]
big_df = big_df[
[
"序号",
"发行人全称",
"审核状态",
"注册地",
"证监会行业",
"保荐机构",
"律师事务所",
"会计师事务所",
"更新日期",
"受理日期",
"拟上市地点",
]
]
big_df['更新日期'] = pd.to_datetime(big_df['更新日期']).dt.date
big_df['受理日期'] = pd.to_datetime(big_df['受理日期']).dt.date
return big_df
| 18,622 |
akfamily/akshare
|
087025d8d6f799b30ca114013e82c1ad22dc9294
|
akshare/stock_fundamental/stock_register.py
|
stock_register_cyb
|
()
|
return big_df
|
东方财富网-数据中心-新股数据-注册制审核-创业板
http://data.eastmoney.com/xg/cyb/
:return: 创业板注册制审核结果
:rtype: pandas.DataFrame
|
东方财富网-数据中心-新股数据-注册制审核-创业板
http://data.eastmoney.com/xg/cyb/
:return: 创业板注册制审核结果
:rtype: pandas.DataFrame
| 96 | 177 |
def stock_register_cyb() -> pd.DataFrame:
"""
东方财富网-数据中心-新股数据-注册制审核-创业板
http://data.eastmoney.com/xg/cyb/
:return: 创业板注册制审核结果
:rtype: pandas.DataFrame
"""
url = "https://datacenter.eastmoney.com/securities/api/data/get"
params = {
'st': 'UPDATE_DATE',
'sr': '-1',
'ps': '5000',
'p': '1',
'type': 'RPT_REGISTERED_INFO',
'sty': 'ORG_CODE,ORG_CODE_OLD,ISSUER_NAME,CHECK_STATUS,CHECK_STATUS_CODE,REG_ADDRESS,CSRC_INDUSTRY,RECOMMEND_ORG,LAW_FIRM,ACCOUNT_FIRM,UPDATE_DATE,ACCEPT_DATE,TOLIST_MARKET,SECURITY_CODE',
'token': '894050c76af8597a853f5b408b759f5d',
'client': 'WEB',
'filter': '(TOLIST_MARKET="创业板")',
}
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/89.0.4389.114 Safari/537.36'
}
r = requests.get(url, params=params, headers=headers)
data_json = r.json()
page_num = data_json['result']['pages']
big_df = pd.DataFrame()
for page in range(1, page_num+1):
params = {
'st': 'UPDATE_DATE',
'sr': '-1',
'ps': '5000',
'p': page,
'type': 'RPT_REGISTERED_INFO',
'sty': 'ORG_CODE,ORG_CODE_OLD,ISSUER_NAME,CHECK_STATUS,CHECK_STATUS_CODE,REG_ADDRESS,CSRC_INDUSTRY,RECOMMEND_ORG,LAW_FIRM,ACCOUNT_FIRM,UPDATE_DATE,ACCEPT_DATE,TOLIST_MARKET,SECURITY_CODE',
'token': '894050c76af8597a853f5b408b759f5d',
'client': 'WEB',
'filter': '(TOLIST_MARKET="创业板")',
}
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/89.0.4389.114 Safari/537.36'
}
r = requests.get(url, params=params, headers=headers)
data_json = r.json()
temp_df = pd.DataFrame(data_json['result']["data"])
big_df = big_df.append(temp_df, ignore_index=True)
big_df.reset_index(inplace=True)
big_df['index'] = big_df.index + 1
big_df.columns = [
"序号",
"_",
"_",
"发行人全称",
"审核状态",
"_",
"注册地",
"证监会行业",
"保荐机构",
"律师事务所",
"会计师事务所",
"更新日期",
"受理日期",
"拟上市地点",
"_",
]
big_df = big_df[
[
"序号",
"发行人全称",
"审核状态",
"注册地",
"证监会行业",
"保荐机构",
"律师事务所",
"会计师事务所",
"更新日期",
"受理日期",
"拟上市地点",
]
]
big_df['更新日期'] = pd.to_datetime(big_df['更新日期']).dt.date
big_df['受理日期'] = pd.to_datetime(big_df['受理日期']).dt.date
return big_df
|
https://github.com/akfamily/akshare/blob/087025d8d6f799b30ca114013e82c1ad22dc9294/project25/akshare/stock_fundamental/stock_register.py#L96-L177
| 25 |
[
0,
1,
2,
3,
4,
5,
6
] | 8.536585 |
[
7,
8,
19,
22,
23,
24,
25,
26,
27,
38,
41,
42,
43,
44,
45,
46,
47,
64,
79,
80,
81
] | 25.609756 | false | 9.459459 | 82 | 2 | 74.390244 | 4 |
def stock_register_cyb() -> pd.DataFrame:
url = "https://datacenter.eastmoney.com/securities/api/data/get"
params = {
'st': 'UPDATE_DATE',
'sr': '-1',
'ps': '5000',
'p': '1',
'type': 'RPT_REGISTERED_INFO',
'sty': 'ORG_CODE,ORG_CODE_OLD,ISSUER_NAME,CHECK_STATUS,CHECK_STATUS_CODE,REG_ADDRESS,CSRC_INDUSTRY,RECOMMEND_ORG,LAW_FIRM,ACCOUNT_FIRM,UPDATE_DATE,ACCEPT_DATE,TOLIST_MARKET,SECURITY_CODE',
'token': '894050c76af8597a853f5b408b759f5d',
'client': 'WEB',
'filter': '(TOLIST_MARKET="创业板")',
}
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/89.0.4389.114 Safari/537.36'
}
r = requests.get(url, params=params, headers=headers)
data_json = r.json()
page_num = data_json['result']['pages']
big_df = pd.DataFrame()
for page in range(1, page_num+1):
params = {
'st': 'UPDATE_DATE',
'sr': '-1',
'ps': '5000',
'p': page,
'type': 'RPT_REGISTERED_INFO',
'sty': 'ORG_CODE,ORG_CODE_OLD,ISSUER_NAME,CHECK_STATUS,CHECK_STATUS_CODE,REG_ADDRESS,CSRC_INDUSTRY,RECOMMEND_ORG,LAW_FIRM,ACCOUNT_FIRM,UPDATE_DATE,ACCEPT_DATE,TOLIST_MARKET,SECURITY_CODE',
'token': '894050c76af8597a853f5b408b759f5d',
'client': 'WEB',
'filter': '(TOLIST_MARKET="创业板")',
}
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/89.0.4389.114 Safari/537.36'
}
r = requests.get(url, params=params, headers=headers)
data_json = r.json()
temp_df = pd.DataFrame(data_json['result']["data"])
big_df = big_df.append(temp_df, ignore_index=True)
big_df.reset_index(inplace=True)
big_df['index'] = big_df.index + 1
big_df.columns = [
"序号",
"_",
"_",
"发行人全称",
"审核状态",
"_",
"注册地",
"证监会行业",
"保荐机构",
"律师事务所",
"会计师事务所",
"更新日期",
"受理日期",
"拟上市地点",
"_",
]
big_df = big_df[
[
"序号",
"发行人全称",
"审核状态",
"注册地",
"证监会行业",
"保荐机构",
"律师事务所",
"会计师事务所",
"更新日期",
"受理日期",
"拟上市地点",
]
]
big_df['更新日期'] = pd.to_datetime(big_df['更新日期']).dt.date
big_df['受理日期'] = pd.to_datetime(big_df['受理日期']).dt.date
return big_df
| 18,623 |
akfamily/akshare
|
087025d8d6f799b30ca114013e82c1ad22dc9294
|
akshare/stock_fundamental/stock_register.py
|
stock_register_db
|
()
|
return big_df
|
东方财富网-数据中心-新股数据-注册制审核-达标企业
http://data.eastmoney.com/xg/cyb/
:return: 达标企业
:rtype: pandas.DataFrame
|
东方财富网-数据中心-新股数据-注册制审核-达标企业
http://data.eastmoney.com/xg/cyb/
:return: 达标企业
:rtype: pandas.DataFrame
| 180 | 281 |
def stock_register_db() -> pd.DataFrame:
"""
东方财富网-数据中心-新股数据-注册制审核-达标企业
http://data.eastmoney.com/xg/cyb/
:return: 达标企业
:rtype: pandas.DataFrame
"""
# TODO
url = "https://datacenter-web.eastmoney.com/api/data/v1/get"
params = {
'sortColumns': 'NOTICE_DATE,SECURITY_CODE',
'sortTypes': '-1,-1',
'pageSize': '50',
'pageNumber': '1',
'reportName': 'RPT_KCB_IPO',
'columns': 'KCB_LB',
'source': 'WEB',
'client': 'WEB',
'filter': '(ORG_TYPE_CODE="03")',
}
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/89.0.4389.114 Safari/537.36'
}
r = requests.get(url, params=params, headers=headers)
data_json = r.json()
page_num = data_json['result']['pages']
big_df = pd.DataFrame()
for page in range(1, page_num+1):
params.update({'pageNumber': page})
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/89.0.4389.114 Safari/537.36'
}
r = requests.get(url, params=params, headers=headers)
data_json = r.json()
temp_df = pd.DataFrame(data_json['result']['data'])
big_df = big_df.append(temp_df, ignore_index=True)
big_df.reset_index(inplace=True)
big_df['index'] = range(1, len(big_df) + 1)
big_df.columns = [
"序号",
"_",
"_",
"_",
"企业名称",
"_",
"_",
"_",
"_",
"_",
"_",
"_",
"_",
"_",
"_",
"_",
"_",
"经营范围",
"_",
"_",
"_",
"_",
"_",
"_",
"_",
"_",
"_",
"_",
"近三年营业收入-2019",
"近三年净利润-2019",
"近三年研发费用-2019",
"近三年营业收入-2018",
"近三年净利润-2018",
"近三年研发费用-2018",
"近三年营业收入-2017",
"近三年净利润-2017",
"近三年研发费用-2017",
"近两年累计净利润",
"_",
"_",
"_",
"_",
"_",
]
big_df = big_df[
[
"序号",
"企业名称",
"经营范围",
"近三年营业收入-2019",
"近三年净利润-2019",
"近三年研发费用-2019",
"近三年营业收入-2018",
"近三年净利润-2018",
"近三年研发费用-2018",
"近三年营业收入-2017",
"近三年净利润-2017",
"近三年研发费用-2017",
"近两年累计净利润",
]
]
return big_df
|
https://github.com/akfamily/akshare/blob/087025d8d6f799b30ca114013e82c1ad22dc9294/project25/akshare/stock_fundamental/stock_register.py#L180-L281
| 25 |
[
0,
1,
2,
3,
4,
5,
6,
7
] | 7.843137 |
[
8,
9,
20,
23,
24,
25,
26,
27,
28,
29,
32,
33,
34,
35,
36,
37,
38,
83,
101
] | 18.627451 | false | 9.459459 | 102 | 2 | 81.372549 | 4 |
def stock_register_db() -> pd.DataFrame:
# TODO
url = "https://datacenter-web.eastmoney.com/api/data/v1/get"
params = {
'sortColumns': 'NOTICE_DATE,SECURITY_CODE',
'sortTypes': '-1,-1',
'pageSize': '50',
'pageNumber': '1',
'reportName': 'RPT_KCB_IPO',
'columns': 'KCB_LB',
'source': 'WEB',
'client': 'WEB',
'filter': '(ORG_TYPE_CODE="03")',
}
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/89.0.4389.114 Safari/537.36'
}
r = requests.get(url, params=params, headers=headers)
data_json = r.json()
page_num = data_json['result']['pages']
big_df = pd.DataFrame()
for page in range(1, page_num+1):
params.update({'pageNumber': page})
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/89.0.4389.114 Safari/537.36'
}
r = requests.get(url, params=params, headers=headers)
data_json = r.json()
temp_df = pd.DataFrame(data_json['result']['data'])
big_df = big_df.append(temp_df, ignore_index=True)
big_df.reset_index(inplace=True)
big_df['index'] = range(1, len(big_df) + 1)
big_df.columns = [
"序号",
"_",
"_",
"_",
"企业名称",
"_",
"_",
"_",
"_",
"_",
"_",
"_",
"_",
"_",
"_",
"_",
"_",
"经营范围",
"_",
"_",
"_",
"_",
"_",
"_",
"_",
"_",
"_",
"_",
"近三年营业收入-2019",
"近三年净利润-2019",
"近三年研发费用-2019",
"近三年营业收入-2018",
"近三年净利润-2018",
"近三年研发费用-2018",
"近三年营业收入-2017",
"近三年净利润-2017",
"近三年研发费用-2017",
"近两年累计净利润",
"_",
"_",
"_",
"_",
"_",
]
big_df = big_df[
[
"序号",
"企业名称",
"经营范围",
"近三年营业收入-2019",
"近三年净利润-2019",
"近三年研发费用-2019",
"近三年营业收入-2018",
"近三年净利润-2018",
"近三年研发费用-2018",
"近三年营业收入-2017",
"近三年净利润-2017",
"近三年研发费用-2017",
"近两年累计净利润",
]
]
return big_df
| 18,624 |
akfamily/akshare
|
087025d8d6f799b30ca114013e82c1ad22dc9294
|
akshare/stock_fundamental/stock_restricted_em.py
|
stock_restricted_release_summary_em
|
(
symbol: str = "全部股票", start_date: str = "20221101", end_date: str = "20221209"
)
|
return temp_df
|
东方财富网-数据中心-特色数据-限售股解禁
https://data.eastmoney.com/dxf/marketStatistics.html?type=day&startdate=2022-11-08&enddate=2022-12-19
:param symbol: 标的市场; choice of {"全部股票", "沪市A股", "科创板", "深市A股", "创业板", "京市A股"}
:type symbol: str
:param start_date: 开始时间
:type start_date: str
:param end_date: 结束时间
:type end_date: str
:return: 限售股解禁
:rtype: pandas.DataFrame
|
东方财富网-数据中心-特色数据-限售股解禁
https://data.eastmoney.com/dxf/marketStatistics.html?type=day&startdate=2022-11-08&enddate=2022-12-19
:param symbol: 标的市场; choice of {"全部股票", "沪市A股", "科创板", "深市A股", "创业板", "京市A股"}
:type symbol: str
:param start_date: 开始时间
:type start_date: str
:param end_date: 结束时间
:type end_date: str
:return: 限售股解禁
:rtype: pandas.DataFrame
| 13 | 94 |
def stock_restricted_release_summary_em(
symbol: str = "全部股票", start_date: str = "20221101", end_date: str = "20221209"
) -> pd.DataFrame:
"""
东方财富网-数据中心-特色数据-限售股解禁
https://data.eastmoney.com/dxf/marketStatistics.html?type=day&startdate=2022-11-08&enddate=2022-12-19
:param symbol: 标的市场; choice of {"全部股票", "沪市A股", "科创板", "深市A股", "创业板", "京市A股"}
:type symbol: str
:param start_date: 开始时间
:type start_date: str
:param end_date: 结束时间
:type end_date: str
:return: 限售股解禁
:rtype: pandas.DataFrame
"""
symbol_map = {
"全部股票": "000300",
"沪市A股": "000001",
"科创板": "000688",
"深市A股": "399001",
"创业板": "399001",
"京市A股": "999999",
}
start_date_str = "-".join([start_date[:4], start_date[4:6], start_date[6:]])
end_date_str = "-".join([end_date[:4], end_date[4:6], end_date[6:]])
url = "https://datacenter-web.eastmoney.com/api/data/v1/get"
params = {
"sortColumns": "FREE_DATE",
"sortTypes": "1",
"pageSize": "500",
"pageNumber": "1",
"columns": "ALL",
"quoteColumns": "f2~03~INDEX_CODE,f3~03~INDEX_CODE,f124~03~INDEX_CODE",
"quoteType": "0",
"source": "WEB",
"client": "WEB",
"filter": f"""(INDEX_CODE="{symbol_map[symbol]}")(FREE_DATE>='{start_date_str}')(FREE_DATE<='{end_date_str}')""",
"reportName": "RPT_LIFTDAY_STA",
}
r = requests.get(url, params=params)
data_json = r.json()
temp_df = pd.DataFrame(data_json["result"]["data"])
temp_df.reset_index(inplace=True)
temp_df["index"] = temp_df["index"] + 1
temp_df.columns = [
"序号",
"解禁时间",
"当日解禁股票家数",
"实际解禁数量",
"实际解禁市值",
"沪深300指数",
"沪深300指数涨跌幅",
"解禁数量",
"-",
"-",
"-",
"-",
"-",
"-",
"-",
]
temp_df = temp_df[
[
"序号",
"解禁时间",
"当日解禁股票家数",
"解禁数量",
"实际解禁数量",
"实际解禁市值",
"沪深300指数",
"沪深300指数涨跌幅",
]
]
temp_df["解禁时间"] = pd.to_datetime(temp_df["解禁时间"]).dt.date
temp_df["当日解禁股票家数"] = pd.to_numeric(temp_df["当日解禁股票家数"], errors="coerce")
temp_df["解禁数量"] = pd.to_numeric(temp_df["解禁数量"], errors="coerce") * 10000
temp_df["实际解禁数量"] = pd.to_numeric(temp_df["实际解禁数量"], errors="coerce") * 10000
temp_df["实际解禁市值"] = pd.to_numeric(temp_df["实际解禁市值"], errors="coerce") * 10000
temp_df["沪深300指数"] = pd.to_numeric(temp_df["沪深300指数"], errors="coerce")
temp_df["沪深300指数涨跌幅"] = pd.to_numeric(temp_df["沪深300指数涨跌幅"], errors="coerce")
return temp_df
|
https://github.com/akfamily/akshare/blob/087025d8d6f799b30ca114013e82c1ad22dc9294/project25/akshare/stock_fundamental/stock_restricted_em.py#L13-L94
| 25 |
[
0
] | 1.219512 |
[
15,
23,
24,
25,
26,
39,
40,
41,
42,
43,
44,
61,
73,
75,
76,
77,
78,
79,
80,
81
] | 24.390244 | false | 8.910891 | 82 | 1 | 75.609756 | 10 |
def stock_restricted_release_summary_em(
symbol: str = "全部股票", start_date: str = "20221101", end_date: str = "20221209"
) -> pd.DataFrame:
symbol_map = {
"全部股票": "000300",
"沪市A股": "000001",
"科创板": "000688",
"深市A股": "399001",
"创业板": "399001",
"京市A股": "999999",
}
start_date_str = "-".join([start_date[:4], start_date[4:6], start_date[6:]])
end_date_str = "-".join([end_date[:4], end_date[4:6], end_date[6:]])
url = "https://datacenter-web.eastmoney.com/api/data/v1/get"
params = {
"sortColumns": "FREE_DATE",
"sortTypes": "1",
"pageSize": "500",
"pageNumber": "1",
"columns": "ALL",
"quoteColumns": "f2~03~INDEX_CODE,f3~03~INDEX_CODE,f124~03~INDEX_CODE",
"quoteType": "0",
"source": "WEB",
"client": "WEB",
"filter": f"""(INDEX_CODE="{symbol_map[symbol]}")(FREE_DATE>='{start_date_str}')(FREE_DATE<='{end_date_str}')""",
"reportName": "RPT_LIFTDAY_STA",
}
r = requests.get(url, params=params)
data_json = r.json()
temp_df = pd.DataFrame(data_json["result"]["data"])
temp_df.reset_index(inplace=True)
temp_df["index"] = temp_df["index"] + 1
temp_df.columns = [
"序号",
"解禁时间",
"当日解禁股票家数",
"实际解禁数量",
"实际解禁市值",
"沪深300指数",
"沪深300指数涨跌幅",
"解禁数量",
"-",
"-",
"-",
"-",
"-",
"-",
"-",
]
temp_df = temp_df[
[
"序号",
"解禁时间",
"当日解禁股票家数",
"解禁数量",
"实际解禁数量",
"实际解禁市值",
"沪深300指数",
"沪深300指数涨跌幅",
]
]
temp_df["解禁时间"] = pd.to_datetime(temp_df["解禁时间"]).dt.date
temp_df["当日解禁股票家数"] = pd.to_numeric(temp_df["当日解禁股票家数"], errors="coerce")
temp_df["解禁数量"] = pd.to_numeric(temp_df["解禁数量"], errors="coerce") * 10000
temp_df["实际解禁数量"] = pd.to_numeric(temp_df["实际解禁数量"], errors="coerce") * 10000
temp_df["实际解禁市值"] = pd.to_numeric(temp_df["实际解禁市值"], errors="coerce") * 10000
temp_df["沪深300指数"] = pd.to_numeric(temp_df["沪深300指数"], errors="coerce")
temp_df["沪深300指数涨跌幅"] = pd.to_numeric(temp_df["沪深300指数涨跌幅"], errors="coerce")
return temp_df
| 18,625 |
akfamily/akshare
|
087025d8d6f799b30ca114013e82c1ad22dc9294
|
akshare/stock_fundamental/stock_restricted_em.py
|
stock_restricted_release_detail_em
|
(
start_date: str = "20221202", end_date: str = "20241202"
)
|
return big_df
|
东方财富网-数据中心-限售股解禁-解禁详情一览
https://data.eastmoney.com/dxf/detail.html
:param start_date: 开始时间
:type start_date: str
:param end_date: 结束时间
:type end_date: str
:return: 解禁详情一览
:rtype: pandas.DataFrame
|
东方财富网-数据中心-限售股解禁-解禁详情一览
https://data.eastmoney.com/dxf/detail.html
:param start_date: 开始时间
:type start_date: str
:param end_date: 结束时间
:type end_date: str
:return: 解禁详情一览
:rtype: pandas.DataFrame
| 97 | 183 |
def stock_restricted_release_detail_em(
start_date: str = "20221202", end_date: str = "20241202"
) -> pd.DataFrame:
"""
东方财富网-数据中心-限售股解禁-解禁详情一览
https://data.eastmoney.com/dxf/detail.html
:param start_date: 开始时间
:type start_date: str
:param end_date: 结束时间
:type end_date: str
:return: 解禁详情一览
:rtype: pandas.DataFrame
"""
start_date_str = "-".join([start_date[:4], start_date[4:6], start_date[6:]])
end_date_str = "-".join([end_date[:4], end_date[4:6], end_date[6:]])
url = "https://datacenter-web.eastmoney.com/api/data/v1/get"
params = {
"sortColumns": "FREE_DATE,CURRENT_FREE_SHARES",
"sortTypes": "1,1",
"pageSize": "500",
"pageNumber": "1",
"reportName": "RPT_LIFT_STAGE",
"columns": "SECURITY_CODE,SECURITY_NAME_ABBR,FREE_DATE,CURRENT_FREE_SHARES,ABLE_FREE_SHARES,LIFT_MARKET_CAP,FREE_RATIO,NEW,B20_ADJCHRATE,A20_ADJCHRATE,FREE_SHARES_TYPE,TOTAL_RATIO,NON_FREE_SHARES,BATCH_HOLDER_NUM",
"source": "WEB",
"client": "WEB",
"filter": f"""(FREE_DATE>='{start_date_str}')(FREE_DATE<='{end_date_str}')""",
}
r = requests.get(url, params=params)
data_json = r.json()
total_page = data_json["result"]["pages"]
big_df = pd.DataFrame()
for page in tqdm(range(1, total_page + 1), leave=False):
params.update(
{
"pageNumber": page,
}
)
r = requests.get(url, params=params)
data_json = r.json()
temp_df = pd.DataFrame(data_json["result"]["data"])
big_df = pd.concat([big_df, temp_df], ignore_index=True)
big_df.reset_index(inplace=True)
big_df["index"] = big_df["index"] + 1
big_df.columns = [
"序号",
"股票代码",
"股票简称",
"解禁时间",
"实际解禁数量",
"解禁数量",
"实际解禁市值",
"占解禁前流通市值比例",
"解禁前一交易日收盘价",
"解禁前20日涨跌幅",
"解禁后20日涨跌幅",
"限售股类型",
"-",
"-",
"-",
]
big_df = big_df[
[
"序号",
"股票代码",
"股票简称",
"解禁时间",
"限售股类型",
"解禁数量",
"实际解禁数量",
"实际解禁市值",
"占解禁前流通市值比例",
"解禁前一交易日收盘价",
"解禁前20日涨跌幅",
"解禁后20日涨跌幅",
]
]
big_df["解禁时间"] = pd.to_datetime(big_df["解禁时间"]).dt.date
big_df["解禁数量"] = pd.to_numeric(big_df["解禁数量"], errors="coerce") * 10000
big_df["实际解禁数量"] = pd.to_numeric(big_df["实际解禁数量"], errors="coerce") * 10000
big_df["实际解禁市值"] = pd.to_numeric(big_df["实际解禁市值"], errors="coerce") * 10000
big_df["占解禁前流通市值比例"] = pd.to_numeric(big_df["占解禁前流通市值比例"], errors="coerce")
big_df["解禁前一交易日收盘价"] = pd.to_numeric(big_df["解禁前一交易日收盘价"], errors="coerce")
big_df["解禁前20日涨跌幅"] = pd.to_numeric(big_df["解禁前20日涨跌幅"], errors="coerce")
big_df["解禁后20日涨跌幅"] = pd.to_numeric(big_df["解禁后20日涨跌幅"], errors="coerce")
return big_df
|
https://github.com/akfamily/akshare/blob/087025d8d6f799b30ca114013e82c1ad22dc9294/project25/akshare/stock_fundamental/stock_restricted_em.py#L97-L183
| 25 |
[
0
] | 1.149425 |
[
13,
14,
15,
16,
27,
28,
29,
30,
31,
32,
37,
38,
39,
40,
42,
43,
44,
61,
77,
79,
80,
81,
82,
83,
84,
85,
86
] | 31.034483 | false | 8.910891 | 87 | 2 | 68.965517 | 8 |
def stock_restricted_release_detail_em(
start_date: str = "20221202", end_date: str = "20241202"
) -> pd.DataFrame:
start_date_str = "-".join([start_date[:4], start_date[4:6], start_date[6:]])
end_date_str = "-".join([end_date[:4], end_date[4:6], end_date[6:]])
url = "https://datacenter-web.eastmoney.com/api/data/v1/get"
params = {
"sortColumns": "FREE_DATE,CURRENT_FREE_SHARES",
"sortTypes": "1,1",
"pageSize": "500",
"pageNumber": "1",
"reportName": "RPT_LIFT_STAGE",
"columns": "SECURITY_CODE,SECURITY_NAME_ABBR,FREE_DATE,CURRENT_FREE_SHARES,ABLE_FREE_SHARES,LIFT_MARKET_CAP,FREE_RATIO,NEW,B20_ADJCHRATE,A20_ADJCHRATE,FREE_SHARES_TYPE,TOTAL_RATIO,NON_FREE_SHARES,BATCH_HOLDER_NUM",
"source": "WEB",
"client": "WEB",
"filter": f"""(FREE_DATE>='{start_date_str}')(FREE_DATE<='{end_date_str}')""",
}
r = requests.get(url, params=params)
data_json = r.json()
total_page = data_json["result"]["pages"]
big_df = pd.DataFrame()
for page in tqdm(range(1, total_page + 1), leave=False):
params.update(
{
"pageNumber": page,
}
)
r = requests.get(url, params=params)
data_json = r.json()
temp_df = pd.DataFrame(data_json["result"]["data"])
big_df = pd.concat([big_df, temp_df], ignore_index=True)
big_df.reset_index(inplace=True)
big_df["index"] = big_df["index"] + 1
big_df.columns = [
"序号",
"股票代码",
"股票简称",
"解禁时间",
"实际解禁数量",
"解禁数量",
"实际解禁市值",
"占解禁前流通市值比例",
"解禁前一交易日收盘价",
"解禁前20日涨跌幅",
"解禁后20日涨跌幅",
"限售股类型",
"-",
"-",
"-",
]
big_df = big_df[
[
"序号",
"股票代码",
"股票简称",
"解禁时间",
"限售股类型",
"解禁数量",
"实际解禁数量",
"实际解禁市值",
"占解禁前流通市值比例",
"解禁前一交易日收盘价",
"解禁前20日涨跌幅",
"解禁后20日涨跌幅",
]
]
big_df["解禁时间"] = pd.to_datetime(big_df["解禁时间"]).dt.date
big_df["解禁数量"] = pd.to_numeric(big_df["解禁数量"], errors="coerce") * 10000
big_df["实际解禁数量"] = pd.to_numeric(big_df["实际解禁数量"], errors="coerce") * 10000
big_df["实际解禁市值"] = pd.to_numeric(big_df["实际解禁市值"], errors="coerce") * 10000
big_df["占解禁前流通市值比例"] = pd.to_numeric(big_df["占解禁前流通市值比例"], errors="coerce")
big_df["解禁前一交易日收盘价"] = pd.to_numeric(big_df["解禁前一交易日收盘价"], errors="coerce")
big_df["解禁前20日涨跌幅"] = pd.to_numeric(big_df["解禁前20日涨跌幅"], errors="coerce")
big_df["解禁后20日涨跌幅"] = pd.to_numeric(big_df["解禁后20日涨跌幅"], errors="coerce")
return big_df
| 18,626 |
akfamily/akshare
|
087025d8d6f799b30ca114013e82c1ad22dc9294
|
akshare/stock_fundamental/stock_restricted_em.py
|
stock_restricted_release_queue_em
|
(symbol: str = "600000")
|
return temp_df
|
东方财富网-数据中心-个股限售解禁-解禁批次
https://data.eastmoney.com/dxf/q/600000.html
:param symbol: 股票代码
:type symbol: str
:return: 个股限售解禁
:rtype: pandas.DataFrame
|
东方财富网-数据中心-个股限售解禁-解禁批次
https://data.eastmoney.com/dxf/q/600000.html
:param symbol: 股票代码
:type symbol: str
:return: 个股限售解禁
:rtype: pandas.DataFrame
| 186 | 258 |
def stock_restricted_release_queue_em(symbol: str = "600000") -> pd.DataFrame:
"""
东方财富网-数据中心-个股限售解禁-解禁批次
https://data.eastmoney.com/dxf/q/600000.html
:param symbol: 股票代码
:type symbol: str
:return: 个股限售解禁
:rtype: pandas.DataFrame
"""
url = "https://datacenter-web.eastmoney.com/api/data/v1/get"
params = {
"sortColumns": "FREE_DATE",
"sortTypes": "-1",
"pageSize": "500",
"pageNumber": "1",
"reportName": "RPT_LIFT_STAGE",
"filter": f'(SECURITY_CODE="{symbol}")',
"columns": "SECURITY_CODE,SECURITY_NAME_ABBR,FREE_DATE,CURRENT_FREE_SHARES,ABLE_FREE_SHARES,LIFT_MARKET_CAP,FREE_RATIO,NEW,B20_ADJCHRATE,A20_ADJCHRATE,FREE_SHARES_TYPE,TOTAL_RATIO,NON_FREE_SHARES,BATCH_HOLDER_NUM",
"source": "WEB",
"client": "WEB",
}
r = requests.get(url, params=params)
data_json = r.json()
temp_df = pd.DataFrame(data_json["result"]["data"])
temp_df.reset_index(inplace=True)
temp_df["index"] = temp_df["index"] + 1
temp_df.columns = [
"序号",
"-",
"-",
"解禁时间",
"实际解禁数量",
"解禁数量",
"实际解禁数量市值",
"占流通市值比例",
"解禁前一交易日收盘价",
"解禁前20日涨跌幅",
"解禁后20日涨跌幅",
"限售股类型",
"占总市值比例",
"未解禁数量",
"解禁股东数",
]
temp_df = temp_df[
[
"序号",
"解禁时间",
"解禁股东数",
"解禁数量",
"实际解禁数量",
"未解禁数量",
"实际解禁数量市值",
"占总市值比例",
"占流通市值比例",
"解禁前一交易日收盘价",
"限售股类型",
"解禁前20日涨跌幅",
"解禁后20日涨跌幅",
]
]
temp_df["解禁时间"] = pd.to_datetime(temp_df["解禁时间"]).dt.date
temp_df["解禁股东数"] = pd.to_numeric(temp_df["解禁股东数"], errors="coerce")
temp_df["解禁数量"] = pd.to_numeric(temp_df["解禁数量"], errors="coerce") * 10000
temp_df["实际解禁数量"] = pd.to_numeric(temp_df["实际解禁数量"], errors="coerce") * 10000
temp_df["未解禁数量"] = pd.to_numeric(temp_df["未解禁数量"], errors="coerce") * 10000
temp_df["实际解禁数量市值"] = pd.to_numeric(temp_df["实际解禁数量市值"], errors="coerce") * 10000
temp_df["占总市值比例"] = pd.to_numeric(temp_df["占总市值比例"], errors="coerce")
temp_df["占流通市值比例"] = pd.to_numeric(temp_df["占流通市值比例"], errors="coerce")
temp_df["解禁前一交易日收盘价"] = pd.to_numeric(temp_df["解禁前一交易日收盘价"], errors="coerce")
temp_df["解禁前20日涨跌幅"] = pd.to_numeric(temp_df["解禁前20日涨跌幅"], errors="coerce")
temp_df["解禁后20日涨跌幅"] = pd.to_numeric(temp_df["解禁后20日涨跌幅"], errors="coerce")
return temp_df
|
https://github.com/akfamily/akshare/blob/087025d8d6f799b30ca114013e82c1ad22dc9294/project25/akshare/stock_fundamental/stock_restricted_em.py#L186-L258
| 25 |
[
0,
1,
2,
3,
4,
5,
6,
7,
8
] | 12.328767 |
[
9,
10,
21,
22,
23,
24,
25,
26,
43,
60,
62,
63,
64,
65,
66,
67,
68,
69,
70,
71,
72
] | 28.767123 | false | 8.910891 | 73 | 1 | 71.232877 | 6 |
def stock_restricted_release_queue_em(symbol: str = "600000") -> pd.DataFrame:
url = "https://datacenter-web.eastmoney.com/api/data/v1/get"
params = {
"sortColumns": "FREE_DATE",
"sortTypes": "-1",
"pageSize": "500",
"pageNumber": "1",
"reportName": "RPT_LIFT_STAGE",
"filter": f'(SECURITY_CODE="{symbol}")',
"columns": "SECURITY_CODE,SECURITY_NAME_ABBR,FREE_DATE,CURRENT_FREE_SHARES,ABLE_FREE_SHARES,LIFT_MARKET_CAP,FREE_RATIO,NEW,B20_ADJCHRATE,A20_ADJCHRATE,FREE_SHARES_TYPE,TOTAL_RATIO,NON_FREE_SHARES,BATCH_HOLDER_NUM",
"source": "WEB",
"client": "WEB",
}
r = requests.get(url, params=params)
data_json = r.json()
temp_df = pd.DataFrame(data_json["result"]["data"])
temp_df.reset_index(inplace=True)
temp_df["index"] = temp_df["index"] + 1
temp_df.columns = [
"序号",
"-",
"-",
"解禁时间",
"实际解禁数量",
"解禁数量",
"实际解禁数量市值",
"占流通市值比例",
"解禁前一交易日收盘价",
"解禁前20日涨跌幅",
"解禁后20日涨跌幅",
"限售股类型",
"占总市值比例",
"未解禁数量",
"解禁股东数",
]
temp_df = temp_df[
[
"序号",
"解禁时间",
"解禁股东数",
"解禁数量",
"实际解禁数量",
"未解禁数量",
"实际解禁数量市值",
"占总市值比例",
"占流通市值比例",
"解禁前一交易日收盘价",
"限售股类型",
"解禁前20日涨跌幅",
"解禁后20日涨跌幅",
]
]
temp_df["解禁时间"] = pd.to_datetime(temp_df["解禁时间"]).dt.date
temp_df["解禁股东数"] = pd.to_numeric(temp_df["解禁股东数"], errors="coerce")
temp_df["解禁数量"] = pd.to_numeric(temp_df["解禁数量"], errors="coerce") * 10000
temp_df["实际解禁数量"] = pd.to_numeric(temp_df["实际解禁数量"], errors="coerce") * 10000
temp_df["未解禁数量"] = pd.to_numeric(temp_df["未解禁数量"], errors="coerce") * 10000
temp_df["实际解禁数量市值"] = pd.to_numeric(temp_df["实际解禁数量市值"], errors="coerce") * 10000
temp_df["占总市值比例"] = pd.to_numeric(temp_df["占总市值比例"], errors="coerce")
temp_df["占流通市值比例"] = pd.to_numeric(temp_df["占流通市值比例"], errors="coerce")
temp_df["解禁前一交易日收盘价"] = pd.to_numeric(temp_df["解禁前一交易日收盘价"], errors="coerce")
temp_df["解禁前20日涨跌幅"] = pd.to_numeric(temp_df["解禁前20日涨跌幅"], errors="coerce")
temp_df["解禁后20日涨跌幅"] = pd.to_numeric(temp_df["解禁后20日涨跌幅"], errors="coerce")
return temp_df
| 18,627 |
akfamily/akshare
|
087025d8d6f799b30ca114013e82c1ad22dc9294
|
akshare/stock_fundamental/stock_restricted_em.py
|
stock_restricted_release_stockholder_em
|
(
symbol: str = "600000", date: str = "20200904"
)
|
return temp_df
|
东方财富网-数据中心-个股限售解禁-解禁股东
https://data.eastmoney.com/dxf/q/600000.html
:param symbol: 股票代码
:type symbol: str
:param date: 日期; 通过 ak.stock_restricted_release_queue_em(symbol="600000") 获取
:type date: str
:return: 个股限售解禁
:rtype: pandas.DataFrame
|
东方财富网-数据中心-个股限售解禁-解禁股东
https://data.eastmoney.com/dxf/q/600000.html
:param symbol: 股票代码
:type symbol: str
:param date: 日期; 通过 ak.stock_restricted_release_queue_em(symbol="600000") 获取
:type date: str
:return: 个股限售解禁
:rtype: pandas.DataFrame
| 261 | 309 |
def stock_restricted_release_stockholder_em(
symbol: str = "600000", date: str = "20200904"
) -> pd.DataFrame:
"""
东方财富网-数据中心-个股限售解禁-解禁股东
https://data.eastmoney.com/dxf/q/600000.html
:param symbol: 股票代码
:type symbol: str
:param date: 日期; 通过 ak.stock_restricted_release_queue_em(symbol="600000") 获取
:type date: str
:return: 个股限售解禁
:rtype: pandas.DataFrame
"""
url = "https://datacenter-web.eastmoney.com/api/data/v1/get"
date_str = "-".join([date[:4], date[4:6], date[6:]])
params = {
"sortColumns": "ADD_LISTING_SHARES",
"sortTypes": "-1",
"pageSize": "500",
"pageNumber": "1",
"reportName": "RPT_LIFT_GD",
"filter": f"""(SECURITY_CODE="{symbol}")(FREE_DATE='{date_str}')""",
"columns": "LIMITED_HOLDER_NAME,ADD_LISTING_SHARES,ACTUAL_LISTED_SHARES,ADD_LISTING_CAP,LOCK_MONTH,RESIDUAL_LIMITED_SHARES,FREE_SHARES_TYPE,PLAN_FEATURE",
"source": "WEB",
"client": "WEB",
}
r = requests.get(url, params=params)
data_json = r.json()
temp_df = pd.DataFrame(data_json["result"]["data"])
temp_df.reset_index(inplace=True)
temp_df["index"] = temp_df["index"] + 1
temp_df.columns = [
"序号",
"股东名称",
"解禁数量",
"实际解禁数量",
"解禁市值",
"锁定期",
"剩余未解禁数量",
"限售股类型",
"进度",
]
temp_df["解禁数量"] = pd.to_numeric(temp_df["解禁数量"], errors="coerce")
temp_df["实际解禁数量"] = pd.to_numeric(temp_df["实际解禁数量"], errors="coerce")
temp_df["解禁市值"] = pd.to_numeric(temp_df["解禁市值"], errors="coerce")
temp_df["锁定期"] = pd.to_numeric(temp_df["锁定期"], errors="coerce")
temp_df["剩余未解禁数量"] = pd.to_numeric(temp_df["剩余未解禁数量"], errors="coerce")
temp_df["剩余未解禁数量"] = pd.to_numeric(temp_df["剩余未解禁数量"], errors="coerce")
return temp_df
|
https://github.com/akfamily/akshare/blob/087025d8d6f799b30ca114013e82c1ad22dc9294/project25/akshare/stock_fundamental/stock_restricted_em.py#L261-L309
| 25 |
[
0
] | 2.040816 |
[
13,
14,
15,
26,
27,
28,
29,
30,
31,
42,
43,
44,
45,
46,
47,
48
] | 32.653061 | false | 8.910891 | 49 | 1 | 67.346939 | 8 |
def stock_restricted_release_stockholder_em(
symbol: str = "600000", date: str = "20200904"
) -> pd.DataFrame:
url = "https://datacenter-web.eastmoney.com/api/data/v1/get"
date_str = "-".join([date[:4], date[4:6], date[6:]])
params = {
"sortColumns": "ADD_LISTING_SHARES",
"sortTypes": "-1",
"pageSize": "500",
"pageNumber": "1",
"reportName": "RPT_LIFT_GD",
"filter": f"""(SECURITY_CODE="{symbol}")(FREE_DATE='{date_str}')""",
"columns": "LIMITED_HOLDER_NAME,ADD_LISTING_SHARES,ACTUAL_LISTED_SHARES,ADD_LISTING_CAP,LOCK_MONTH,RESIDUAL_LIMITED_SHARES,FREE_SHARES_TYPE,PLAN_FEATURE",
"source": "WEB",
"client": "WEB",
}
r = requests.get(url, params=params)
data_json = r.json()
temp_df = pd.DataFrame(data_json["result"]["data"])
temp_df.reset_index(inplace=True)
temp_df["index"] = temp_df["index"] + 1
temp_df.columns = [
"序号",
"股东名称",
"解禁数量",
"实际解禁数量",
"解禁市值",
"锁定期",
"剩余未解禁数量",
"限售股类型",
"进度",
]
temp_df["解禁数量"] = pd.to_numeric(temp_df["解禁数量"], errors="coerce")
temp_df["实际解禁数量"] = pd.to_numeric(temp_df["实际解禁数量"], errors="coerce")
temp_df["解禁市值"] = pd.to_numeric(temp_df["解禁市值"], errors="coerce")
temp_df["锁定期"] = pd.to_numeric(temp_df["锁定期"], errors="coerce")
temp_df["剩余未解禁数量"] = pd.to_numeric(temp_df["剩余未解禁数量"], errors="coerce")
temp_df["剩余未解禁数量"] = pd.to_numeric(temp_df["剩余未解禁数量"], errors="coerce")
return temp_df
| 18,628 |
akfamily/akshare
|
087025d8d6f799b30ca114013e82c1ad22dc9294
|
akshare/stock_fundamental/stock_recommend.py
|
stock_institute_recommend
|
(symbol: str = "投资评级选股") -> pd.DataF
|
return temp_df
|
新浪财经-机构推荐池-最新投资评级
http://stock.finance.sina.com.cn/stock/go.php/vIR_RatingNewest/index.phtml
:param symbol: choice of {'最新投资评级', '上调评级股票', '下调评级股票', '股票综合评级', '首次评级股票', '目标涨幅排名', '机构关注度', '行业关注度', '投资评级选股'}
:type symbol: str
:return: 最新投资评级数据
:rtype: pandas.DataFrame
|
新浪财经-机构推荐池-最新投资评级
http://stock.finance.sina.com.cn/stock/go.php/vIR_RatingNewest/index.phtml
:param symbol: choice of {'最新投资评级', '上调评级股票', '下调评级股票', '股票综合评级', '首次评级股票', '目标涨幅排名', '机构关注度', '行业关注度', '投资评级选股'}
:type symbol: str
:return: 最新投资评级数据
:rtype: pandas.DataFrame
| 13 | 69 |
def stock_institute_recommend(symbol: str = "投资评级选股") -> pd.DataFrame:
"""
新浪财经-机构推荐池-最新投资评级
http://stock.finance.sina.com.cn/stock/go.php/vIR_RatingNewest/index.phtml
:param symbol: choice of {'最新投资评级', '上调评级股票', '下调评级股票', '股票综合评级', '首次评级股票', '目标涨幅排名', '机构关注度', '行业关注度', '投资评级选股'}
:type symbol: str
:return: 最新投资评级数据
:rtype: pandas.DataFrame
"""
url = "http://stock.finance.sina.com.cn/stock/go.php/vIR_RatingNewest/index.phtml"
params = {
"num": "40",
"p": "1",
}
r = requests.get(url, params=params)
soup = BeautifulSoup(r.text, "lxml")
indicator_map = {item.find("a").text: item.find("a")["href"] for item in soup.find(attrs={"id": "leftMenu"}).find_all("dd")[1].find_all("li")}
url = indicator_map[symbol]
params = {
"num": "10000",
"p": "1",
}
r = requests.get(url, params=params)
if symbol == "股票综合评级":
temp_df = pd.read_html(r.text, header=0)[0].iloc[:, :9]
temp_df["股票代码"] = temp_df["股票代码"].astype(str).str.zfill(6)
temp_df = temp_df.rename(columns={"综合评级↓": "综合评级"})
return temp_df
if symbol == "首次评级股票":
temp_df = pd.read_html(r.text, header=0)[0].iloc[:, :8]
temp_df["股票代码"] = temp_df["股票代码"].astype(str).str.zfill(6)
temp_df = temp_df.rename(columns={"评级日期↓": "评级日期"})
return temp_df
if symbol == "目标涨幅排名":
temp_df = pd.read_html(r.text, header=0)[0].iloc[:, :7]
temp_df["股票代码"] = temp_df["股票代码"].astype(str).str.zfill(6)
temp_df = temp_df.rename(columns={"平均目标涨幅↓": "平均目标涨幅"})
return temp_df
if symbol == "机构关注度":
temp_df = pd.read_html(r.text, header=0)[0].iloc[:, :11]
temp_df["股票代码"] = temp_df["股票代码"].astype(str).str.zfill(6)
temp_df = temp_df.rename(columns={"关注度↓": "关注度"})
return temp_df
if symbol == "行业关注度":
temp_df = pd.read_html(r.text, header=0)[0].iloc[:, :11]
temp_df = temp_df.rename(columns={"关注度↓": "关注度"})
return temp_df
if symbol == "投资评级选股":
temp_df = pd.read_html(r.text, header=0)[0].iloc[:, :9]
temp_df["股票代码"] = temp_df["股票代码"].astype(str).str.zfill(6)
del temp_df["评级明细"]
temp_df = temp_df.rename(columns={"评级日期↓": "评级日期"})
return temp_df
temp_df = pd.read_html(r.text, header=0)[0].iloc[:, :8]
temp_df["股票代码"] = temp_df["股票代码"].astype(str).str.zfill(6)
temp_df = temp_df.rename(columns={"评级日期↓": "评级日期"})
return temp_df
|
https://github.com/akfamily/akshare/blob/087025d8d6f799b30ca114013e82c1ad22dc9294/project25/akshare/stock_fundamental/stock_recommend.py#L13-L69
| 25 |
[
0,
1,
2,
3,
4,
5,
6,
7,
8
] | 15.789474 |
[
9,
10,
14,
15,
16,
17,
18,
22,
23,
24,
25,
26,
27,
28,
29,
30,
31,
32,
33,
34,
35,
36,
37,
38,
39,
40,
41,
42,
43,
44,
45,
46,
47,
48,
49,
50,
51,
52,
53,
54,
55,
56
] | 73.684211 | false | 11.47541 | 57 | 7 | 26.315789 | 6 |
def stock_institute_recommend(symbol: str = "投资评级选股") -> pd.DataFrame:
url = "http://stock.finance.sina.com.cn/stock/go.php/vIR_RatingNewest/index.phtml"
params = {
"num": "40",
"p": "1",
}
r = requests.get(url, params=params)
soup = BeautifulSoup(r.text, "lxml")
indicator_map = {item.find("a").text: item.find("a")["href"] for item in soup.find(attrs={"id": "leftMenu"}).find_all("dd")[1].find_all("li")}
url = indicator_map[symbol]
params = {
"num": "10000",
"p": "1",
}
r = requests.get(url, params=params)
if symbol == "股票综合评级":
temp_df = pd.read_html(r.text, header=0)[0].iloc[:, :9]
temp_df["股票代码"] = temp_df["股票代码"].astype(str).str.zfill(6)
temp_df = temp_df.rename(columns={"综合评级↓": "综合评级"})
return temp_df
if symbol == "首次评级股票":
temp_df = pd.read_html(r.text, header=0)[0].iloc[:, :8]
temp_df["股票代码"] = temp_df["股票代码"].astype(str).str.zfill(6)
temp_df = temp_df.rename(columns={"评级日期↓": "评级日期"})
return temp_df
if symbol == "目标涨幅排名":
temp_df = pd.read_html(r.text, header=0)[0].iloc[:, :7]
temp_df["股票代码"] = temp_df["股票代码"].astype(str).str.zfill(6)
temp_df = temp_df.rename(columns={"平均目标涨幅↓": "平均目标涨幅"})
return temp_df
if symbol == "机构关注度":
temp_df = pd.read_html(r.text, header=0)[0].iloc[:, :11]
temp_df["股票代码"] = temp_df["股票代码"].astype(str).str.zfill(6)
temp_df = temp_df.rename(columns={"关注度↓": "关注度"})
return temp_df
if symbol == "行业关注度":
temp_df = pd.read_html(r.text, header=0)[0].iloc[:, :11]
temp_df = temp_df.rename(columns={"关注度↓": "关注度"})
return temp_df
if symbol == "投资评级选股":
temp_df = pd.read_html(r.text, header=0)[0].iloc[:, :9]
temp_df["股票代码"] = temp_df["股票代码"].astype(str).str.zfill(6)
del temp_df["评级明细"]
temp_df = temp_df.rename(columns={"评级日期↓": "评级日期"})
return temp_df
temp_df = pd.read_html(r.text, header=0)[0].iloc[:, :8]
temp_df["股票代码"] = temp_df["股票代码"].astype(str).str.zfill(6)
temp_df = temp_df.rename(columns={"评级日期↓": "评级日期"})
return temp_df
| 18,629 |
akfamily/akshare
|
087025d8d6f799b30ca114013e82c1ad22dc9294
|
akshare/stock_fundamental/stock_recommend.py
|
stock_institute_recommend_detail
|
(symbol: str = "000001")
|
return temp_df
|
新浪财经-机构推荐池-股票评级记录
http://stock.finance.sina.com.cn/stock/go.php/vIR_StockSearch/key/sz000001.phtml
:param symbol: 股票代码
:type symbol: str
:return: 具体股票的股票评级记录
:rtype: pandas.DataFrame
|
新浪财经-机构推荐池-股票评级记录
http://stock.finance.sina.com.cn/stock/go.php/vIR_StockSearch/key/sz000001.phtml
:param symbol: 股票代码
:type symbol: str
:return: 具体股票的股票评级记录
:rtype: pandas.DataFrame
| 72 | 90 |
def stock_institute_recommend_detail(symbol: str = "000001") -> pd.DataFrame:
"""
新浪财经-机构推荐池-股票评级记录
http://stock.finance.sina.com.cn/stock/go.php/vIR_StockSearch/key/sz000001.phtml
:param symbol: 股票代码
:type symbol: str
:return: 具体股票的股票评级记录
:rtype: pandas.DataFrame
"""
url = f"http://stock.finance.sina.com.cn/stock/go.php/vIR_StockSearch/key/{symbol}.phtml"
params = {
"num": "5000",
"p": "1",
}
r = requests.get(url, params=params)
temp_df = pd.read_html(r.text, header=0)[0].iloc[:, :8]
temp_df["股票代码"] = temp_df["股票代码"].astype(str).str.zfill(6)
temp_df = temp_df.rename(columns={"评级日期↓": "评级日期"})
return temp_df
|
https://github.com/akfamily/akshare/blob/087025d8d6f799b30ca114013e82c1ad22dc9294/project25/akshare/stock_fundamental/stock_recommend.py#L72-L90
| 25 |
[
0,
1,
2,
3,
4,
5,
6,
7,
8
] | 47.368421 |
[
9,
10,
14,
15,
16,
17,
18
] | 36.842105 | false | 11.47541 | 19 | 1 | 63.157895 | 6 |
def stock_institute_recommend_detail(symbol: str = "000001") -> pd.DataFrame:
url = f"http://stock.finance.sina.com.cn/stock/go.php/vIR_StockSearch/key/{symbol}.phtml"
params = {
"num": "5000",
"p": "1",
}
r = requests.get(url, params=params)
temp_df = pd.read_html(r.text, header=0)[0].iloc[:, :8]
temp_df["股票代码"] = temp_df["股票代码"].astype(str).str.zfill(6)
temp_df = temp_df.rename(columns={"评级日期↓": "评级日期"})
return temp_df
| 18,630 |
akfamily/akshare
|
087025d8d6f799b30ca114013e82c1ad22dc9294
|
akshare/stock_fundamental/stock_profit_forecast.py
|
stock_profit_forecast
|
()
|
return big_df
|
东方财富网-数据中心-研究报告-盈利预测
http://data.eastmoney.com/report/profitforecast.jshtml
:return: 盈利预测
:rtype: pandas.DataFrame
|
东方财富网-数据中心-研究报告-盈利预测
http://data.eastmoney.com/report/profitforecast.jshtml
:return: 盈利预测
:rtype: pandas.DataFrame
| 13 | 118 |
def stock_profit_forecast():
"""
东方财富网-数据中心-研究报告-盈利预测
http://data.eastmoney.com/report/profitforecast.jshtml
:return: 盈利预测
:rtype: pandas.DataFrame
"""
url = "https://datacenter-web.eastmoney.com/api/data/v1/get"
params = {
'reportName': 'RPT_WEB_RESPREDICT',
'columns': 'WEB_RESPREDICT',
'pageNumber': '1',
'pageSize': '500',
'sortTypes': '-1',
'sortColumns': 'RATING_ORG_NUM',
'p': '1',
'pageNo': '1',
'pageNum': '1',
'filter': '',
'_': '1640241417037',
}
r = requests.get(url, params=params)
data_json = r.json()
page_num = int(data_json['result']['pages'])
big_df = pd.DataFrame()
for page in tqdm(range(1, page_num + 1), leave=False):
params.update({
'pageNumber': page,
'p': page,
'pageNo': page,
'pageNum': page,
})
r = requests.get(url, params=params)
data_json = r.json()
temp_df = pd.DataFrame(data_json['result']['data'])
big_df = pd.concat([big_df, temp_df], ignore_index=True)
big_df.reset_index(inplace=True)
big_df["index"] = big_df.index + 1
year1 = list(set(big_df['YEAR1']))[-1]
year2 = list(set(big_df['YEAR2']))[-1]
year3 = list(set(big_df['YEAR3']))[-1]
year4 = list(set(big_df['YEAR4']))[0]
big_df.columns = [
"序号",
"-",
"代码",
"名称",
"研报数",
"机构投资评级(近六个月)-买入",
"机构投资评级(近六个月)-增持",
"机构投资评级(近六个月)-中性",
"机构投资评级(近六个月)-减持",
"机构投资评级(近六个月)-卖出",
"-",
"_",
f"{year1}预测每股收益",
"-",
"_",
f"{year2}预测每股收益",
"-",
"_",
f"{year3}预测每股收益",
"-",
"_",
f"{year4}预测每股收益",
"_",
"_",
"_",
"_",
"_",
"_",
"_",
"_",
"_",
"_",
]
big_df = big_df[
[
"序号",
"代码",
"名称",
"研报数",
"机构投资评级(近六个月)-买入",
"机构投资评级(近六个月)-增持",
"机构投资评级(近六个月)-中性",
"机构投资评级(近六个月)-减持",
"机构投资评级(近六个月)-卖出",
f"{year1}预测每股收益",
f"{year2}预测每股收益",
f"{year3}预测每股收益",
f"{year4}预测每股收益",
]
]
big_df['研报数'] = pd.to_numeric(big_df['研报数'])
big_df['机构投资评级(近六个月)-买入'] = pd.to_numeric(big_df['机构投资评级(近六个月)-买入'])
big_df['机构投资评级(近六个月)-增持'] = pd.to_numeric(big_df['机构投资评级(近六个月)-增持'])
big_df['机构投资评级(近六个月)-中性'] = pd.to_numeric(big_df['机构投资评级(近六个月)-中性'])
big_df['机构投资评级(近六个月)-减持'] = pd.to_numeric(big_df['机构投资评级(近六个月)-减持'])
big_df['机构投资评级(近六个月)-卖出'] = pd.to_numeric(big_df['机构投资评级(近六个月)-卖出'])
big_df[f"{year1}预测每股收益"] = pd.to_numeric(big_df[f"{year1}预测每股收益"])
big_df[f"{year2}预测每股收益"] = pd.to_numeric(big_df[f"{year2}预测每股收益"])
big_df[f"{year3}预测每股收益"] = pd.to_numeric(big_df[f"{year3}预测每股收益"])
big_df[f"{year4}预测每股收益"] = pd.to_numeric(big_df[f"{year4}预测每股收益"])
return big_df
|
https://github.com/akfamily/akshare/blob/087025d8d6f799b30ca114013e82c1ad22dc9294/project25/akshare/stock_fundamental/stock_profit_forecast.py#L13-L118
| 25 |
[
0,
1,
2,
3,
4,
5,
6
] | 6.603774 |
[
7,
8,
21,
22,
23,
24,
25,
26,
32,
33,
34,
35,
37,
38,
39,
40,
41,
42,
43,
78,
95,
96,
97,
98,
99,
100,
101,
102,
103,
104,
105
] | 29.245283 | false | 15.384615 | 106 | 2 | 70.754717 | 4 |
def stock_profit_forecast():
url = "https://datacenter-web.eastmoney.com/api/data/v1/get"
params = {
'reportName': 'RPT_WEB_RESPREDICT',
'columns': 'WEB_RESPREDICT',
'pageNumber': '1',
'pageSize': '500',
'sortTypes': '-1',
'sortColumns': 'RATING_ORG_NUM',
'p': '1',
'pageNo': '1',
'pageNum': '1',
'filter': '',
'_': '1640241417037',
}
r = requests.get(url, params=params)
data_json = r.json()
page_num = int(data_json['result']['pages'])
big_df = pd.DataFrame()
for page in tqdm(range(1, page_num + 1), leave=False):
params.update({
'pageNumber': page,
'p': page,
'pageNo': page,
'pageNum': page,
})
r = requests.get(url, params=params)
data_json = r.json()
temp_df = pd.DataFrame(data_json['result']['data'])
big_df = pd.concat([big_df, temp_df], ignore_index=True)
big_df.reset_index(inplace=True)
big_df["index"] = big_df.index + 1
year1 = list(set(big_df['YEAR1']))[-1]
year2 = list(set(big_df['YEAR2']))[-1]
year3 = list(set(big_df['YEAR3']))[-1]
year4 = list(set(big_df['YEAR4']))[0]
big_df.columns = [
"序号",
"-",
"代码",
"名称",
"研报数",
"机构投资评级(近六个月)-买入",
"机构投资评级(近六个月)-增持",
"机构投资评级(近六个月)-中性",
"机构投资评级(近六个月)-减持",
"机构投资评级(近六个月)-卖出",
"-",
"_",
f"{year1}预测每股收益",
"-",
"_",
f"{year2}预测每股收益",
"-",
"_",
f"{year3}预测每股收益",
"-",
"_",
f"{year4}预测每股收益",
"_",
"_",
"_",
"_",
"_",
"_",
"_",
"_",
"_",
"_",
]
big_df = big_df[
[
"序号",
"代码",
"名称",
"研报数",
"机构投资评级(近六个月)-买入",
"机构投资评级(近六个月)-增持",
"机构投资评级(近六个月)-中性",
"机构投资评级(近六个月)-减持",
"机构投资评级(近六个月)-卖出",
f"{year1}预测每股收益",
f"{year2}预测每股收益",
f"{year3}预测每股收益",
f"{year4}预测每股收益",
]
]
big_df['研报数'] = pd.to_numeric(big_df['研报数'])
big_df['机构投资评级(近六个月)-买入'] = pd.to_numeric(big_df['机构投资评级(近六个月)-买入'])
big_df['机构投资评级(近六个月)-增持'] = pd.to_numeric(big_df['机构投资评级(近六个月)-增持'])
big_df['机构投资评级(近六个月)-中性'] = pd.to_numeric(big_df['机构投资评级(近六个月)-中性'])
big_df['机构投资评级(近六个月)-减持'] = pd.to_numeric(big_df['机构投资评级(近六个月)-减持'])
big_df['机构投资评级(近六个月)-卖出'] = pd.to_numeric(big_df['机构投资评级(近六个月)-卖出'])
big_df[f"{year1}预测每股收益"] = pd.to_numeric(big_df[f"{year1}预测每股收益"])
big_df[f"{year2}预测每股收益"] = pd.to_numeric(big_df[f"{year2}预测每股收益"])
big_df[f"{year3}预测每股收益"] = pd.to_numeric(big_df[f"{year3}预测每股收益"])
big_df[f"{year4}预测每股收益"] = pd.to_numeric(big_df[f"{year4}预测每股收益"])
return big_df
| 18,632 |
akfamily/akshare
|
087025d8d6f799b30ca114013e82c1ad22dc9294
|
akshare/stock_fundamental/stock_mda_ym.py
|
stock_mda_ym
|
(symbol: str = "000001")
|
return big_df
|
益盟-F10-管理层讨论与分析
http://f10.emoney.cn/f10/zbyz/1000001
:param symbol: 股票代码
:type symbol: str
:return: 管理层讨论与分析
:rtype: pandas.DataFrame
|
益盟-F10-管理层讨论与分析
http://f10.emoney.cn/f10/zbyz/1000001
:param symbol: 股票代码
:type symbol: str
:return: 管理层讨论与分析
:rtype: pandas.DataFrame
| 13 | 35 |
def stock_mda_ym(symbol: str = "000001") -> pd.DataFrame:
"""
益盟-F10-管理层讨论与分析
http://f10.emoney.cn/f10/zbyz/1000001
:param symbol: 股票代码
:type symbol: str
:return: 管理层讨论与分析
:rtype: pandas.DataFrame
"""
url = f"http://f10.emoney.cn/f10/zygc/{symbol}"
r = requests.get(url)
soup = BeautifulSoup(r.text, "lxml")
year_list = [
item.text.strip()
for item in soup.find(attrs={"class": "swlab_t"}).find_all("li")
]
talk_list = [
item.text.strip().replace("\xa0", " ")
for item in soup.find_all(attrs={"class": "cnt"})
]
big_df = pd.DataFrame([year_list, talk_list]).T
big_df.columns = ["报告期", "内容"]
return big_df
|
https://github.com/akfamily/akshare/blob/087025d8d6f799b30ca114013e82c1ad22dc9294/project25/akshare/stock_fundamental/stock_mda_ym.py#L13-L35
| 25 |
[
0,
1,
2,
3,
4,
5,
6,
7,
8
] | 39.130435 |
[
9,
10,
11,
12,
16,
20,
21,
22
] | 34.782609 | false | 37.5 | 23 | 3 | 65.217391 | 6 |
def stock_mda_ym(symbol: str = "000001") -> pd.DataFrame:
url = f"http://f10.emoney.cn/f10/zygc/{symbol}"
r = requests.get(url)
soup = BeautifulSoup(r.text, "lxml")
year_list = [
item.text.strip()
for item in soup.find(attrs={"class": "swlab_t"}).find_all("li")
]
talk_list = [
item.text.strip().replace("\xa0", " ")
for item in soup.find_all(attrs={"class": "cnt"})
]
big_df = pd.DataFrame([year_list, talk_list]).T
big_df.columns = ["报告期", "内容"]
return big_df
| 18,633 |
akfamily/akshare
|
087025d8d6f799b30ca114013e82c1ad22dc9294
|
akshare/stock_fundamental/stock_finance_hk.py
|
stock_financial_hk_report_em
|
(
stock: str = "00700", symbol: str = "现金流量表", indicator: str = "年度"
)
|
return temp_df
|
东方财富-港股-财务报表-三大报表
https://emweb.securities.eastmoney.com/PC_HKF10/FinancialAnalysis/index?type=web&code=00700
:param stock: 股票代码
:type stock: str
:param symbol: choice of {"资产负债表", "利润表", "现金流量表"}
:type symbol:
:param indicator: choice of {"年度", "报告期"}
:type indicator:
:return: 东方财富-港股-财务报表-三大报表
:rtype: pandas.DataFrame
|
东方财富-港股-财务报表-三大报表
https://emweb.securities.eastmoney.com/PC_HKF10/FinancialAnalysis/index?type=web&code=00700
:param stock: 股票代码
:type stock: str
:param symbol: choice of {"资产负债表", "利润表", "现金流量表"}
:type symbol:
:param indicator: choice of {"年度", "报告期"}
:type indicator:
:return: 东方财富-港股-财务报表-三大报表
:rtype: pandas.DataFrame
| 12 | 46 |
def stock_financial_hk_report_em(
stock: str = "00700", symbol: str = "现金流量表", indicator: str = "年度"
) -> pd.DataFrame:
"""
东方财富-港股-财务报表-三大报表
https://emweb.securities.eastmoney.com/PC_HKF10/FinancialAnalysis/index?type=web&code=00700
:param stock: 股票代码
:type stock: str
:param symbol: choice of {"资产负债表", "利润表", "现金流量表"}
:type symbol:
:param indicator: choice of {"年度", "报告期"}
:type indicator:
:return: 东方财富-港股-财务报表-三大报表
:rtype: pandas.DataFrame
"""
if indicator == "年度":
rtype = 6
elif indicator == "报告期":
rtype = 0
else:
raise Exception("请输入正确的 indicator !", indicator)
if symbol == "资产负债表":
url = f"https://emweb.securities.eastmoney.com/PC_HKF10/NewFinancialAnalysis/GetZCFZB?code={stock}&startdate=&ctype=4&rtype={rtype}" # 资产负债表
elif symbol == "利润表":
url = f"https://emweb.securities.eastmoney.com/PC_HKF10/NewFinancialAnalysis/GetLRB?code={stock}&startdate=&ctype=4&rtype={rtype}" # 利润表
elif symbol == "现金流量表":
url = f"https://emweb.securities.eastmoney.com/PC_HKF10/NewFinancialAnalysis/GetXJLLB?code={stock}&startdate=&rtype={rtype}" # 现金流量表
r = requests.get(url)
temp_df = pd.DataFrame(eval(r.text)["data"])
temp_df.columns = temp_df.loc[0]
temp_df = temp_df.drop(0, axis=0)
temp_df["截止日期"] = pd.to_datetime(temp_df["截止日期"], format="%y-%m-%d").dt.date
temp_df.reset_index(drop=True, inplace=True)
temp_df.columns.name = None
return temp_df
|
https://github.com/akfamily/akshare/blob/087025d8d6f799b30ca114013e82c1ad22dc9294/project25/akshare/stock_fundamental/stock_finance_hk.py#L12-L46
| 25 |
[
0
] | 2.857143 |
[
15,
16,
17,
18,
20,
21,
22,
23,
24,
25,
26,
27,
28,
29,
30,
31,
32,
33,
34
] | 54.285714 | false | 10.344828 | 35 | 6 | 45.714286 | 10 |
def stock_financial_hk_report_em(
stock: str = "00700", symbol: str = "现金流量表", indicator: str = "年度"
) -> pd.DataFrame:
if indicator == "年度":
rtype = 6
elif indicator == "报告期":
rtype = 0
else:
raise Exception("请输入正确的 indicator !", indicator)
if symbol == "资产负债表":
url = f"https://emweb.securities.eastmoney.com/PC_HKF10/NewFinancialAnalysis/GetZCFZB?code={stock}&startdate=&ctype=4&rtype={rtype}" # 资产负债表
elif symbol == "利润表":
url = f"https://emweb.securities.eastmoney.com/PC_HKF10/NewFinancialAnalysis/GetLRB?code={stock}&startdate=&ctype=4&rtype={rtype}" # 利润表
elif symbol == "现金流量表":
url = f"https://emweb.securities.eastmoney.com/PC_HKF10/NewFinancialAnalysis/GetXJLLB?code={stock}&startdate=&rtype={rtype}" # 现金流量表
r = requests.get(url)
temp_df = pd.DataFrame(eval(r.text)["data"])
temp_df.columns = temp_df.loc[0]
temp_df = temp_df.drop(0, axis=0)
temp_df["截止日期"] = pd.to_datetime(temp_df["截止日期"], format="%y-%m-%d").dt.date
temp_df.reset_index(drop=True, inplace=True)
temp_df.columns.name = None
return temp_df
| 18,634 |
akfamily/akshare
|
087025d8d6f799b30ca114013e82c1ad22dc9294
|
akshare/stock_fundamental/stock_finance_hk.py
|
stock_financial_hk_analysis_indicator_em
|
(
symbol: str = "00700", indicator: str = "年度"
)
|
return temp_df
|
东方财富-港股-财务分析-主要指标
https://emweb.securities.eastmoney.com/PC_HKF10/NewFinancialAnalysis/index?type=web&code=00700
:param symbol: 股票代码
:type symbol: str
:param indicator: choice of {"年度", "报告期"}
:type indicator: str
:return: 新浪财经-港股-财务分析-主要指标
:rtype: pandas.DataFrame
|
东方财富-港股-财务分析-主要指标
https://emweb.securities.eastmoney.com/PC_HKF10/NewFinancialAnalysis/index?type=web&code=00700
:param symbol: 股票代码
:type symbol: str
:param indicator: choice of {"年度", "报告期"}
:type indicator: str
:return: 新浪财经-港股-财务分析-主要指标
:rtype: pandas.DataFrame
| 49 | 113 |
def stock_financial_hk_analysis_indicator_em(
symbol: str = "00700", indicator: str = "年度"
) -> pd.DataFrame:
"""
东方财富-港股-财务分析-主要指标
https://emweb.securities.eastmoney.com/PC_HKF10/NewFinancialAnalysis/index?type=web&code=00700
:param symbol: 股票代码
:type symbol: str
:param indicator: choice of {"年度", "报告期"}
:type indicator: str
:return: 新浪财经-港股-财务分析-主要指标
:rtype: pandas.DataFrame
"""
if indicator == "年度":
key = "zyzb_an"
elif indicator == "报告期":
key = "zyzb_abgq"
else:
raise Exception("非法的关键字!", indicator)
url = f"http://emweb.securities.eastmoney.com/PC_HKF10/NewFinancialAnalysis/GetZYZB?code={symbol}"
r = requests.get(url)
temp_df = pd.DataFrame.from_records(eval(r.text)["data"][key])
temp_df.columns = temp_df.loc[0]
temp_df = temp_df.drop(0, axis=0)
temp_df["周期"] = pd.to_datetime(temp_df["每股指标"], format="%y-%m-%d").dt.date
temp_df = temp_df.drop("每股指标", axis=1)
temp_df = temp_df[
[
"周期",
"基本每股收益(元)",
"稀释每股收益(元)",
"TTM每股收益(元)",
"每股净资产(元)",
"每股经营现金流(元)",
"每股营业收入(元)",
"成长能力指标",
"营业总收入(元)",
"毛利润",
"归母净利润",
"营业总收入同比增长(%)",
"毛利润同比增长(%)",
"归母净利润同比增长(%)",
"营业总收入滚动环比增长(%)",
"毛利润滚动环比增长(%)",
"归母净利润滚动环比增长(%)",
"盈利能力指标",
"平均净资产收益率(%)",
"年化净资产收益率(%)",
"总资产净利率(%)",
"毛利率(%)",
"净利率(%)",
"年化投资回报率(%)",
"盈利质量指标",
"所得税/利润总额(%)",
"经营现金流/营业收入(%)",
"财务风险指标",
"资产负债率(%)",
"流动负债/总负债(%)",
"流动比率",
]
]
temp_df.reset_index(drop=True, inplace=True)
temp_df.columns.name = None
temp_df["周期"] = pd.to_datetime(temp_df["周期"]).dt.date
return temp_df
|
https://github.com/akfamily/akshare/blob/087025d8d6f799b30ca114013e82c1ad22dc9294/project25/akshare/stock_fundamental/stock_finance_hk.py#L49-L113
| 25 |
[
0
] | 1.538462 |
[
13,
14,
15,
16,
18,
19,
20,
21,
22,
23,
24,
25,
26,
61,
62,
63,
64
] | 26.153846 | false | 10.344828 | 65 | 3 | 73.846154 | 8 |
def stock_financial_hk_analysis_indicator_em(
symbol: str = "00700", indicator: str = "年度"
) -> pd.DataFrame:
if indicator == "年度":
key = "zyzb_an"
elif indicator == "报告期":
key = "zyzb_abgq"
else:
raise Exception("非法的关键字!", indicator)
url = f"http://emweb.securities.eastmoney.com/PC_HKF10/NewFinancialAnalysis/GetZYZB?code={symbol}"
r = requests.get(url)
temp_df = pd.DataFrame.from_records(eval(r.text)["data"][key])
temp_df.columns = temp_df.loc[0]
temp_df = temp_df.drop(0, axis=0)
temp_df["周期"] = pd.to_datetime(temp_df["每股指标"], format="%y-%m-%d").dt.date
temp_df = temp_df.drop("每股指标", axis=1)
temp_df = temp_df[
[
"周期",
"基本每股收益(元)",
"稀释每股收益(元)",
"TTM每股收益(元)",
"每股净资产(元)",
"每股经营现金流(元)",
"每股营业收入(元)",
"成长能力指标",
"营业总收入(元)",
"毛利润",
"归母净利润",
"营业总收入同比增长(%)",
"毛利润同比增长(%)",
"归母净利润同比增长(%)",
"营业总收入滚动环比增长(%)",
"毛利润滚动环比增长(%)",
"归母净利润滚动环比增长(%)",
"盈利能力指标",
"平均净资产收益率(%)",
"年化净资产收益率(%)",
"总资产净利率(%)",
"毛利率(%)",
"净利率(%)",
"年化投资回报率(%)",
"盈利质量指标",
"所得税/利润总额(%)",
"经营现金流/营业收入(%)",
"财务风险指标",
"资产负债率(%)",
"流动负债/总负债(%)",
"流动比率",
]
]
temp_df.reset_index(drop=True, inplace=True)
temp_df.columns.name = None
temp_df["周期"] = pd.to_datetime(temp_df["周期"]).dt.date
return temp_df
| 18,635 |
akfamily/akshare
|
087025d8d6f799b30ca114013e82c1ad22dc9294
|
akshare/stock_fundamental/stock_zygc_ym.py
|
stock_zygc_ym
|
(symbol: str = "000001")
|
return big_df
|
益盟-F10-主营构成
http://f10.emoney.cn/f10/zbyz/1000001
:param symbol: 股票代码
:type symbol: str
:return: 主营构成
:rtype: pandas.DataFrame
|
益盟-F10-主营构成
http://f10.emoney.cn/f10/zbyz/1000001
:param symbol: 股票代码
:type symbol: str
:return: 主营构成
:rtype: pandas.DataFrame
| 13 | 63 |
def stock_zygc_ym(symbol: str = "000001") -> pd.DataFrame:
"""
益盟-F10-主营构成
http://f10.emoney.cn/f10/zbyz/1000001
:param symbol: 股票代码
:type symbol: str
:return: 主营构成
:rtype: pandas.DataFrame
"""
url = f"http://f10.emoney.cn/f10/zygc/{symbol}"
r = requests.get(url)
soup = BeautifulSoup(r.text, "lxml")
year_list = [
item.text.strip()
for item in soup.find(attrs={"class": "swlab_t"}).find_all("li")
]
big_df = pd.DataFrame()
for i, item in enumerate(year_list, 2):
temp_df = pd.read_html(r.text, header=0)[i]
temp_df.columns = [
"分类方向",
"分类",
"营业收入",
"营业收入-同比增长",
"营业收入-占主营收入比",
"营业成本",
"营业成本-同比增长",
"营业成本-占主营成本比",
"毛利率",
"毛利率-同比增长",
]
temp_df["报告期"] = item
big_df = pd.concat([big_df, temp_df], ignore_index=True)
big_df = big_df[
[
"报告期",
"分类方向",
"分类",
"营业收入",
"营业收入-同比增长",
"营业收入-占主营收入比",
"营业成本",
"营业成本-同比增长",
"营业成本-占主营成本比",
"毛利率",
"毛利率-同比增长",
]
]
return big_df
|
https://github.com/akfamily/akshare/blob/087025d8d6f799b30ca114013e82c1ad22dc9294/project25/akshare/stock_fundamental/stock_zygc_ym.py#L13-L63
| 25 |
[
0,
1,
2,
3,
4,
5,
6,
7,
8
] | 17.647059 |
[
9,
10,
11,
12,
17,
18,
19,
20,
32,
33,
35,
50
] | 23.529412 | false | 30 | 51 | 3 | 76.470588 | 6 |
def stock_zygc_ym(symbol: str = "000001") -> pd.DataFrame:
url = f"http://f10.emoney.cn/f10/zygc/{symbol}"
r = requests.get(url)
soup = BeautifulSoup(r.text, "lxml")
year_list = [
item.text.strip()
for item in soup.find(attrs={"class": "swlab_t"}).find_all("li")
]
big_df = pd.DataFrame()
for i, item in enumerate(year_list, 2):
temp_df = pd.read_html(r.text, header=0)[i]
temp_df.columns = [
"分类方向",
"分类",
"营业收入",
"营业收入-同比增长",
"营业收入-占主营收入比",
"营业成本",
"营业成本-同比增长",
"营业成本-占主营成本比",
"毛利率",
"毛利率-同比增长",
]
temp_df["报告期"] = item
big_df = pd.concat([big_df, temp_df], ignore_index=True)
big_df = big_df[
[
"报告期",
"分类方向",
"分类",
"营业收入",
"营业收入-同比增长",
"营业收入-占主营收入比",
"营业成本",
"营业成本-同比增长",
"营业成本-占主营成本比",
"毛利率",
"毛利率-同比增长",
]
]
return big_df
| 18,636 |
akfamily/akshare
|
087025d8d6f799b30ca114013e82c1ad22dc9294
|
akshare/stock_fundamental/stock_finance.py
|
stock_financial_report_sina
|
(
stock: str = "600004", symbol: str = "现金流量表"
)
|
return temp_df
|
新浪财经-财务报表-三大报表
https://vip.stock.finance.sina.com.cn/corp/go.php/vFD_BalanceSheet/stockid/600004/ctrl/part/displaytype/4.phtml
:param stock: 股票代码
:type stock: str
:param symbol: choice of {"资产负债表", "利润表", "现金流量表"}
:type symbol:
:return: 新浪财经-财务报表-三大报表
:rtype: pandas.DataFrame
|
新浪财经-财务报表-三大报表
https://vip.stock.finance.sina.com.cn/corp/go.php/vFD_BalanceSheet/stockid/600004/ctrl/part/displaytype/4.phtml
:param stock: 股票代码
:type stock: str
:param symbol: choice of {"资产负债表", "利润表", "现金流量表"}
:type symbol:
:return: 新浪财经-财务报表-三大报表
:rtype: pandas.DataFrame
| 21 | 49 |
def stock_financial_report_sina(
stock: str = "600004", symbol: str = "现金流量表"
) -> pd.DataFrame:
"""
新浪财经-财务报表-三大报表
https://vip.stock.finance.sina.com.cn/corp/go.php/vFD_BalanceSheet/stockid/600004/ctrl/part/displaytype/4.phtml
:param stock: 股票代码
:type stock: str
:param symbol: choice of {"资产负债表", "利润表", "现金流量表"}
:type symbol:
:return: 新浪财经-财务报表-三大报表
:rtype: pandas.DataFrame
"""
if symbol == "资产负债表":
url = f"http://money.finance.sina.com.cn/corp/go.php/vDOWN_BalanceSheet/displaytype/4/stockid/{stock}/ctrl/all.phtml" # 资产负债表
elif symbol == "利润表":
url = f"http://money.finance.sina.com.cn/corp/go.php/vDOWN_ProfitStatement/displaytype/4/stockid/{stock}/ctrl/all.phtml" # 利润表
elif symbol == "现金流量表":
url = f"http://money.finance.sina.com.cn/corp/go.php/vDOWN_CashFlow/displaytype/4/stockid/{stock}/ctrl/all.phtml" # 现金流量表
r = requests.get(url)
temp_df = pd.read_table(BytesIO(r.content), encoding="gb2312", header=None).iloc[
:, :-2
]
temp_df = temp_df.T
temp_df.columns = temp_df.iloc[0, :]
temp_df = temp_df.iloc[1:, :]
temp_df.index.name = None
temp_df.columns.name = None
return temp_df
|
https://github.com/akfamily/akshare/blob/087025d8d6f799b30ca114013e82c1ad22dc9294/project25/akshare/stock_fundamental/stock_finance.py#L21-L49
| 25 |
[
0
] | 3.448276 |
[
13,
14,
15,
16,
17,
18,
19,
20,
23,
24,
25,
26,
27,
28
] | 48.275862 | false | 6.896552 | 29 | 4 | 51.724138 | 8 |
def stock_financial_report_sina(
stock: str = "600004", symbol: str = "现金流量表"
) -> pd.DataFrame:
if symbol == "资产负债表":
url = f"http://money.finance.sina.com.cn/corp/go.php/vDOWN_BalanceSheet/displaytype/4/stockid/{stock}/ctrl/all.phtml" # 资产负债表
elif symbol == "利润表":
url = f"http://money.finance.sina.com.cn/corp/go.php/vDOWN_ProfitStatement/displaytype/4/stockid/{stock}/ctrl/all.phtml" # 利润表
elif symbol == "现金流量表":
url = f"http://money.finance.sina.com.cn/corp/go.php/vDOWN_CashFlow/displaytype/4/stockid/{stock}/ctrl/all.phtml" # 现金流量表
r = requests.get(url)
temp_df = pd.read_table(BytesIO(r.content), encoding="gb2312", header=None).iloc[
:, :-2
]
temp_df = temp_df.T
temp_df.columns = temp_df.iloc[0, :]
temp_df = temp_df.iloc[1:, :]
temp_df.index.name = None
temp_df.columns.name = None
return temp_df
| 18,637 |
akfamily/akshare
|
087025d8d6f799b30ca114013e82c1ad22dc9294
|
akshare/stock_fundamental/stock_finance.py
|
stock_financial_abstract
|
(stock: str = "600004")
|
return data_df
|
新浪财经-财务报表-财务摘要
https://vip.stock.finance.sina.com.cn/corp/go.php/vFD_FinanceSummary/stockid/600004.phtml
:param stock: 股票代码
:type stock: str
:return: 新浪财经-财务报表-财务摘要
:rtype: pandas.DataFrame
|
新浪财经-财务报表-财务摘要
https://vip.stock.finance.sina.com.cn/corp/go.php/vFD_FinanceSummary/stockid/600004.phtml
:param stock: 股票代码
:type stock: str
:return: 新浪财经-财务报表-财务摘要
:rtype: pandas.DataFrame
| 52 | 72 |
def stock_financial_abstract(stock: str = "600004") -> pd.DataFrame:
"""
新浪财经-财务报表-财务摘要
https://vip.stock.finance.sina.com.cn/corp/go.php/vFD_FinanceSummary/stockid/600004.phtml
:param stock: 股票代码
:type stock: str
:return: 新浪财经-财务报表-财务摘要
:rtype: pandas.DataFrame
"""
url = f"https://vip.stock.finance.sina.com.cn/corp/go.php/vFD_FinanceSummary/stockid/{stock}.phtml"
r = requests.get(url)
temp_df = pd.read_html(r.text)[13].iloc[:, :2]
big_df = pd.DataFrame()
for i in range(0, len(temp_df), 12):
truncated_df = temp_df.iloc[i : i + 11, 1]
big_df = pd.concat(
[big_df, truncated_df.reset_index(drop=True)], axis=1, ignore_index=True
)
data_df = big_df.T
data_df.columns = temp_df.iloc[:11, 0].tolist()
return data_df
|
https://github.com/akfamily/akshare/blob/087025d8d6f799b30ca114013e82c1ad22dc9294/project25/akshare/stock_fundamental/stock_finance.py#L52-L72
| 25 |
[
0,
1,
2,
3,
4,
5,
6,
7,
8
] | 42.857143 |
[
9,
10,
11,
12,
13,
14,
15,
18,
19,
20
] | 47.619048 | false | 6.896552 | 21 | 2 | 52.380952 | 6 |
def stock_financial_abstract(stock: str = "600004") -> pd.DataFrame:
url = f"https://vip.stock.finance.sina.com.cn/corp/go.php/vFD_FinanceSummary/stockid/{stock}.phtml"
r = requests.get(url)
temp_df = pd.read_html(r.text)[13].iloc[:, :2]
big_df = pd.DataFrame()
for i in range(0, len(temp_df), 12):
truncated_df = temp_df.iloc[i : i + 11, 1]
big_df = pd.concat(
[big_df, truncated_df.reset_index(drop=True)], axis=1, ignore_index=True
)
data_df = big_df.T
data_df.columns = temp_df.iloc[:11, 0].tolist()
return data_df
| 18,638 |
akfamily/akshare
|
087025d8d6f799b30ca114013e82c1ad22dc9294
|
akshare/stock_fundamental/stock_finance.py
|
stock_financial_analysis_indicator
|
(symbol: str = "600004")
|
return out_df
|
新浪财经-财务分析-财务指标
https://money.finance.sina.com.cn/corp/go.php/vFD_FinancialGuideLine/stockid/600004/ctrl/2019/displaytype/4.phtml
:param symbol: 股票代码
:type symbol: str
:return: 新浪财经-财务分析-财务指标
:rtype: pandas.DataFrame
|
新浪财经-财务分析-财务指标
https://money.finance.sina.com.cn/corp/go.php/vFD_FinancialGuideLine/stockid/600004/ctrl/2019/displaytype/4.phtml
:param symbol: 股票代码
:type symbol: str
:return: 新浪财经-财务分析-财务指标
:rtype: pandas.DataFrame
| 75 | 124 |
def stock_financial_analysis_indicator(symbol: str = "600004") -> pd.DataFrame:
"""
新浪财经-财务分析-财务指标
https://money.finance.sina.com.cn/corp/go.php/vFD_FinancialGuideLine/stockid/600004/ctrl/2019/displaytype/4.phtml
:param symbol: 股票代码
:type symbol: str
:return: 新浪财经-财务分析-财务指标
:rtype: pandas.DataFrame
"""
url = f"https://money.finance.sina.com.cn/corp/go.php/vFD_FinancialGuideLine/stockid/{symbol}/ctrl/2020/displaytype/4.phtml"
r = requests.get(url)
soup = BeautifulSoup(r.text, "lxml")
year_context = soup.find(attrs={"id": "con02-1"}).find("table").find_all("a")
year_list = [item.text for item in year_context]
out_df = pd.DataFrame()
for year_item in tqdm(year_list, leave=False):
url = f"https://money.finance.sina.com.cn/corp/go.php/vFD_FinancialGuideLine/stockid/{symbol}/ctrl/{year_item}/displaytype/4.phtml"
r = requests.get(url)
temp_df = pd.read_html(r.text)[12].iloc[:, :-1]
temp_df.columns = temp_df.iloc[0, :]
temp_df = temp_df.iloc[1:, :]
big_df = pd.DataFrame()
indicator_list = ["每股指标", "盈利能力", "成长能力", "营运能力", "偿债及资本结构", "现金流量", "其他指标"]
for i in range(len(indicator_list)):
if i == 6:
inner_df = temp_df[
temp_df.loc[
temp_df.iloc[:, 0].str.find(indicator_list[i]) == 0, :
].index[0] :
].T
else:
inner_df = temp_df[
temp_df.loc[temp_df.iloc[:, 0].str.find(indicator_list[i]) == 0, :]
.index[0] : temp_df.loc[
temp_df.iloc[:, 0].str.find(indicator_list[i + 1]) == 0, :
]
.index[0]
- 1
].T
inner_df = inner_df.reset_index(drop=True)
big_df = pd.concat([big_df, inner_df], axis=1)
big_df.columns = big_df.iloc[0, :].tolist()
big_df = big_df.iloc[1:, :]
big_df.index = temp_df.columns.tolist()[1:]
out_df = pd.concat([out_df, big_df])
out_df.dropna(inplace=True)
out_df.reset_index(inplace=True)
out_df.rename(columns={'index': '日期'}, inplace=True)
return out_df
|
https://github.com/akfamily/akshare/blob/087025d8d6f799b30ca114013e82c1ad22dc9294/project25/akshare/stock_fundamental/stock_finance.py#L75-L124
| 25 |
[
0,
1,
2,
3,
4,
5,
6,
7,
8
] | 18 |
[
9,
10,
11,
12,
13,
14,
15,
16,
17,
18,
19,
20,
21,
22,
23,
24,
25,
31,
39,
40,
41,
42,
43,
44,
46,
47,
48,
49
] | 56 | false | 6.896552 | 50 | 5 | 44 | 6 |
def stock_financial_analysis_indicator(symbol: str = "600004") -> pd.DataFrame:
url = f"https://money.finance.sina.com.cn/corp/go.php/vFD_FinancialGuideLine/stockid/{symbol}/ctrl/2020/displaytype/4.phtml"
r = requests.get(url)
soup = BeautifulSoup(r.text, "lxml")
year_context = soup.find(attrs={"id": "con02-1"}).find("table").find_all("a")
year_list = [item.text for item in year_context]
out_df = pd.DataFrame()
for year_item in tqdm(year_list, leave=False):
url = f"https://money.finance.sina.com.cn/corp/go.php/vFD_FinancialGuideLine/stockid/{symbol}/ctrl/{year_item}/displaytype/4.phtml"
r = requests.get(url)
temp_df = pd.read_html(r.text)[12].iloc[:, :-1]
temp_df.columns = temp_df.iloc[0, :]
temp_df = temp_df.iloc[1:, :]
big_df = pd.DataFrame()
indicator_list = ["每股指标", "盈利能力", "成长能力", "营运能力", "偿债及资本结构", "现金流量", "其他指标"]
for i in range(len(indicator_list)):
if i == 6:
inner_df = temp_df[
temp_df.loc[
temp_df.iloc[:, 0].str.find(indicator_list[i]) == 0, :
].index[0] :
].T
else:
inner_df = temp_df[
temp_df.loc[temp_df.iloc[:, 0].str.find(indicator_list[i]) == 0, :]
.index[0] : temp_df.loc[
temp_df.iloc[:, 0].str.find(indicator_list[i + 1]) == 0, :
]
.index[0]
- 1
].T
inner_df = inner_df.reset_index(drop=True)
big_df = pd.concat([big_df, inner_df], axis=1)
big_df.columns = big_df.iloc[0, :].tolist()
big_df = big_df.iloc[1:, :]
big_df.index = temp_df.columns.tolist()[1:]
out_df = pd.concat([out_df, big_df])
out_df.dropna(inplace=True)
out_df.reset_index(inplace=True)
out_df.rename(columns={'index': '日期'}, inplace=True)
return out_df
| 18,639 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.