query
stringlengths 9
60
| language
stringclasses 1
value | code
stringlengths 105
25.7k
| url
stringlengths 91
217
|
---|---|---|---|
parse json file
|
python
|
def __parse_json_file(self, file_path):
"""Process Json file data
:@param file_path
:@type file_path: string
:@throws IOError
"""
if file_path == '' or os.path.splitext(file_path)[1] != '.json':
raise IOError('Invalid Json file')
with open(file_path) as json_file:
self._raw_data = json.load(json_file)
self._json_data = copy.deepcopy(self._raw_data)
|
https://github.com/s1s1ty/py-jsonq/blob/9625597a2578bddcbed4e540174d5253b1fc3b75/pyjsonq/query.py#L46-L60
|
parse json file
|
python
|
def json(self, json_file):
"""
Reads and parses the input of a json file handler or file.
Json files are parsed differently depending on if the root is a dictionary or an array.
1) If the json's root is a dictionary, these are parsed into a sequence of (Key, Value)
pairs
2) If the json's root is an array, these are parsed into a sequence
of entries
>>> seq.json('examples/users.json').first()
[u'sarah', {u'date_created': u'08/08', u'news_email': True, u'email': u'sarah@gmail.com'}]
:param json_file: path or file containing json content
:return: Sequence wrapping jsonl file
"""
if isinstance(json_file, str):
file_open = get_read_function(json_file, self.disable_compression)
input_file = file_open(json_file)
json_input = jsonapi.load(input_file)
elif hasattr(json_file, 'read'):
json_input = jsonapi.load(json_file)
else:
raise ValueError('json_file must be a file path or implement the iterator interface')
if isinstance(json_input, list):
return self(json_input)
else:
return self(six.viewitems(json_input))
|
https://github.com/EntilZha/PyFunctional/blob/ac04e4a8552b0c464a7f492f7c9862424867b63e/functional/streams.py#L174-L204
|
parse json file
|
python
|
def parse_file(self, sourcepath):
"""Parse single JSON object into a LogData object"""
# Open input file and read JSON array:
with open(sourcepath, 'r') as logfile:
jsonstr = logfile.read()
# Set our attributes for this entry and add it to data.entries:
data = {}
data['entries'] = json.loads(jsonstr)
if self.tzone:
for e in data['entries']:
e['tzone'] = self.tzone
# Return the parsed data
return data
|
https://github.com/dogoncouch/logdissect/blob/426b50264cbfa9665c86df3781e1e415ba8dbbd3/logdissect/parsers/sojson.py#L41-L56
|
parse json file
|
python
|
def parse_file(self, sourcepath):
"""Parse an object-per-line JSON file into a log data dict"""
# Open input file and read JSON array:
with open(sourcepath, 'r') as logfile:
jsonlist = logfile.readlines()
# Set our attributes for this entry and add it to data.entries:
data = {}
data['entries'] = []
for line in jsonlist:
entry = self.parse_line(line)
data['entries'].append(entry)
if self.tzone:
for e in data['entries']:
e['tzone'] = self.tzone
# Return the parsed data
return data
|
https://github.com/dogoncouch/logdissect/blob/426b50264cbfa9665c86df3781e1e415ba8dbbd3/logdissect/parsers/linejson.py#L41-L59
|
parse json file
|
python
|
def __parse_json_data(self, data):
"""Process Json data
:@param data
:@type data: json/dict
:throws TypeError
"""
if isinstance(data, dict) or isinstance(data, list):
self._raw_data = data
self._json_data = copy.deepcopy(self._raw_data)
else:
raise TypeError("Provided Data is not json")
|
https://github.com/s1s1ty/py-jsonq/blob/9625597a2578bddcbed4e540174d5253b1fc3b75/pyjsonq/query.py#L32-L44
|
parse json file
|
python
|
def parse_json(json_file):
"""Parse a whole json record from the given file.
Return None if the json file does not exists or exception occurs.
Args:
json_file (str): File path to be parsed.
Returns:
A dict of json info.
"""
if not os.path.exists(json_file):
return None
try:
with open(json_file, "r") as f:
info_str = f.readlines()
info_str = "".join(info_str)
json_info = json.loads(info_str)
return unicode2str(json_info)
except BaseException as e:
logging.error(e.message)
return None
|
https://github.com/ray-project/ray/blob/4eade036a0505e244c976f36aaa2d64386b5129b/python/ray/tune/automlboard/common/utils.py#L33-L55
|
parse json file
|
python
|
def parse_json(self, req, name, field):
"""Pull a json value from the request."""
json_data = self._cache.get("json")
if json_data is None:
try:
self._cache["json"] = json_data = core.parse_json(req.body, req.charset)
except json.JSONDecodeError as e:
if e.doc == "":
return core.missing
else:
return self.handle_invalid_json_error(e, req)
if json_data is None:
return core.missing
return core.get_value(json_data, name, field, allow_many_nested=True)
|
https://github.com/marshmallow-code/webargs/blob/40cc2d25421d15d9630b1a819f1dcefbbf01ed95/src/webargs/pyramidparser.py#L56-L69
|
parse json file
|
python
|
def json2file(data, filename, encoding='utf-8'):
"""
write json in canonical json format
"""
with codecs.open(filename, "w", encoding=encoding) as f:
json.dump(data, f, ensure_ascii=False, indent=4, sort_keys=True)
|
https://github.com/cnschema/cdata/blob/893e2e1e27b61c8551c8b5f5f9bf05ec61490e23/cdata/core.py#L71-L76
|
parse json file
|
python
|
def _parse_json(s):
'''
Parse json string into JsonDict.
>>> r = _parse_json(r'{"name":"Michael","score":95}')
>>> r.name
u'Michael'
>>> r['score']
95
'''
return json.loads(s, object_hook=lambda pairs: JsonDict(pairs.iteritems()))
|
https://github.com/michaelliao/sinaweibopy/blob/0f19dd71c1fbd16ee539620c7e9e986887f5c665/snspy.py#L77-L87
|
parse json file
|
python
|
def parse_json(s, **kwargs):
"""Parse a string into a (nbformat, dict) tuple."""
d = json.loads(s, **kwargs)
nbf = d.get('nbformat', 1)
nbm = d.get('nbformat_minor', 0)
return nbf, nbm, d
|
https://github.com/cloud9ers/gurumate/blob/075dc74d1ee62a8c6b7a8bf2b271364f01629d1e/environment/lib/python2.7/site-packages/IPython/nbformat/current.py#L48-L53
|
parse json file
|
python
|
def json_to_file(data, filename, pretty=False):
'''Dump JSON data to a file'''
kwargs = dict(indent=4) if pretty else {}
dirname = os.path.dirname(filename)
if not os.path.exists(dirname):
os.makedirs(dirname)
dump = json.dumps(api.__schema__, **kwargs)
with open(filename, 'wb') as f:
f.write(dump.encode('utf-8'))
|
https://github.com/opendatateam/udata/blob/f016585af94b0ff6bd73738c700324adc8ba7f8f/udata/api/commands.py#L24-L32
|
parse json file
|
python
|
def read_json_file(path):
"""
Reads and return the data from the json file at the given path.
Parameters:
path (str): Path to read
Returns:
dict,list: The read json as dict/list.
"""
with open(path, 'r', encoding='utf-8') as f:
data = json.load(f)
return data
|
https://github.com/ynop/audiomate/blob/61727920b23a708293c3d526fa3000d4de9c6c21/audiomate/utils/jsonfile.py#L20-L34
|
parse json file
|
python
|
def get_json(filename):
""" Return a json value of the exif
Get a filename and return a JSON object
Arguments:
filename {string} -- your filename
Returns:
[JSON] -- Return a JSON object
"""
check_if_this_file_exist(filename)
#Process this function
filename = os.path.abspath(filename)
s = command_line(['exiftool', '-G', '-j', '-sort', filename])
if s:
#convert bytes to string
s = s.decode('utf-8').rstrip('\r\n')
return json.loads(s)
else:
return s
|
https://github.com/guinslym/pyexifinfo/blob/56e5b44e77ee17b018a530ec858f19a9c6c07018/pyexifinfo/pyexifinfo.py#L100-L121
|
parse json file
|
python
|
def from_file(file_path) -> dict:
""" Load JSON file """
with io.open(file_path, 'r', encoding='utf-8') as json_stream:
return Json.parse(json_stream, True)
|
https://github.com/AtomHash/evernode/blob/b2fb91555fb937a3f3eba41db56dee26f9b034be/evernode/classes/json.py#L55-L58
|
parse json file
|
python
|
def parse_json_feed_file(filename: str) -> JSONFeed:
"""Parse a JSON feed from a local json file."""
with open(filename) as f:
try:
root = json.load(f)
except json.decoder.JSONDecodeError:
raise FeedJSONError('Not a valid JSON document')
return parse_json_feed(root)
|
https://github.com/NicolasLM/atoma/blob/16c6956112f975eb2ce774b2d5f8e9ddffde569f/atoma/json_feed.py#L205-L213
|
parse json file
|
python
|
def _parse(self, content):
"""
Parse data request to data from python.
@param content: Context of request.
@raise ParseError:
"""
if content:
stream = BytesIO(str(content))
data = json.loads(stream.getvalue())
return data
|
https://github.com/globocom/GloboNetworkAPI-client-python/blob/cf34f913da48d9abbf750114f5d2ac4b2dde137d/networkapiclient/ApiGenericClient.py#L233-L246
|
parse json file
|
python
|
def parse_json(content):
"""Tries to parse a string into a json object.
This also performs a trim of all values, recursively removing leading and trailing whitespace.
Parameters
----------
content: A JSON format string.
Returns
-------
obj:
The object represented by the json string.
Raises
------
InvalidContent
If the content is not a valid json string.
"""
try:
json_content = json.loads(content)
return _recursive_strip(json_content)
except json.JSONDecodeError:
raise InvalidContent("content is not a json string.")
|
https://github.com/Galarzaa90/tibia.py/blob/02ba1a8f1e18177ef5c7dcd44affc8d761d59e12/tibiapy/utils.py#L294-L317
|
parse json file
|
python
|
def _parse_json(s):
' parse str into JsonDict '
def _obj_hook(pairs):
' convert json object to python object '
o = JsonDict()
for k, v in pairs.iteritems():
o[str(k)] = v
return o
return json.loads(s, object_hook=_obj_hook)
|
https://github.com/michaelliao/sinaweibopy/blob/0f19dd71c1fbd16ee539620c7e9e986887f5c665/weibo.py#L46-L55
|
parse json file
|
python
|
def parse(self):
""" parses args json """
data = json.loads(sys.argv[1])
self.config_path = self.decode(data['config_path'])
self.subject = self.decode(data['subject'])
self.text = self.decode(data['text'])
self.html = self.decode(data['html'])
self.send_as_one = data['send_as_one']
if 'files' in data:
self.parse_files(data['files'])
self.ccs = data['ccs']
self.addresses = data['addresses']
if not self.addresses:
raise ValueError(
'Atleast one email address is required to send an email')
|
https://github.com/AtomHash/evernode/blob/b2fb91555fb937a3f3eba41db56dee26f9b034be/evernode/scripts/sendemail.py#L47-L61
|
parse json file
|
python
|
def read_json(fh, byteorder, dtype, count, offsetsize):
"""Read JSON tag data from file and return as object."""
data = fh.read(count)
try:
return json.loads(unicode(stripnull(data), 'utf-8'))
except ValueError:
log.warning('read_json: invalid JSON')
|
https://github.com/nion-software/nionswift-io/blob/e9ae37f01faa9332c48b647f93afd5ef2166b155/nionswift_plugin/TIFF_IO/tifffile.py#L8141-L8147
|
parse json file
|
python
|
def from_json(data):
"""
Convert JSON into a in memory file storage.
Args:
data (str): valid JSON with path and filenames and
the base64 encoding of the file content.
Returns:
InMemoryFiles: in memory file storage
"""
memfiles = InMemoryFiles()
memfiles.files = json.loads(data)
return memfiles
|
https://github.com/Nachtfeuer/pipeline/blob/04ca18c4e95e4349532bb45b768206393e1f2c13/spline/tools/memfiles.py#L84-L97
|
parse json file
|
python
|
def read_json_from_file(filename):
"""
Import the JSON data from target file.
:param str filename: Target File
:return dict: JSON data
"""
logger_jsons.info("enter read_json_from_file")
d = OrderedDict()
try:
# Load and decode
d = demjson.decode_file(filename, decode_float=float)
logger_jsons.info("successful read from json file")
except FileNotFoundError:
# Didn't find a jsonld file. Maybe it's a json file instead?
try:
d = demjson.decode_file(os.path.splitext(filename)[0] + '.json', decode_float=float)
except FileNotFoundError as e:
# No json or jsonld file. Exit
print("Error: jsonld file not found: {}".format(filename))
logger_jsons.debug("read_json_from_file: FileNotFound: {}, {}".format(filename, e))
except Exception:
print("Error: unable to read jsonld file")
if d:
d = rm_empty_fields(d)
logger_jsons.info("exit read_json_from_file")
return d
|
https://github.com/nickmckay/LiPD-utilities/blob/5dab6bbeffc5effd68e3a6beaca6b76aa928e860/Python/lipd/jsons.py#L51-L77
|
parse json file
|
python
|
def _parse(self, stream):
"""Parse a JSON BUILD file.
Args:
builddata: dictionary of buildfile data
reponame: name of the repo that it came from
path: directory path within the repo
"""
builddata = json.load(stream)
log.debug('This is a JSON build file.')
if 'targets' not in builddata:
log.warn('Warning: No targets defined here.')
return
for tdata in builddata['targets']:
# TODO: validate name
target = address.new(target=tdata.pop('name'),
repo=self.target.repo,
path=self.target.path)
# Duplicate target definition? Uh oh.
if target in self.node and 'target_obj' in self.node[target]:
raise error.ButcherError(
'Target is defined more than once: %s', target)
rule_obj = targets.new(name=target,
ruletype=tdata.pop('type'),
**tdata)
log.debug('New target: %s', target)
self.add_node(target, {'target_obj': rule_obj})
# dep could be ":blabla" or "//foo:blabla" or "//foo/bar:blabla"
for dep in rule_obj.composed_deps() or []:
d_target = address.new(dep)
if not d_target.repo: # ":blabla"
d_target.repo = self.target.repo
if d_target.repo == self.target.repo and not d_target.path:
d_target.path = self.target.path
if d_target not in self.nodes():
self.add_node(d_target)
log.debug('New dep: %s -> %s', target, d_target)
self.add_edge(target, d_target)
|
https://github.com/benley/butcher/blob/8b18828ea040af56b7835beab5fd03eab23cc9ee/butcher/buildfile.py#L135-L177
|
parse json file
|
python
|
def parse_json(data, name="JSON", exception=PluginError, schema=None):
"""Wrapper around json.loads.
Wraps errors in custom exception with a snippet of the data in the message.
"""
try:
json_data = json.loads(data)
except ValueError as err:
snippet = repr(data)
if len(snippet) > 35:
snippet = snippet[:35] + " ..."
else:
snippet = data
raise exception("Unable to parse {0}: {1} ({2})".format(name, err, snippet))
if schema:
json_data = schema.validate(json_data, name=name, exception=exception)
return json_data
|
https://github.com/streamlink/streamlink/blob/c8ed1daff14ac03195870238b9b900c1109dd5c1/src/streamlink/utils/__init__.py#L52-L71
|
parse json file
|
python
|
def parse(self, f):
'''parse a JSON stream.
:type f: file
:param f: stream to parse JSON from
:type context: ctypes.POINTER
:raises YajlError: When invalid JSON in input stream found
'''
self._listener.parse_start()
while len(f):
data = f.read(self._buffer_size).encode('utf-8')
status = yajl.yajl_parse(self._handler, data, len(data))
self._listener.parse_buf()
if status != OK.value:
if status == CLIENT_CANCELLED.value:
if self._exc_info:
exc_info = self._exc_info
raise exc_info[0](exc_info[1]).with_traceback(exc_info[2])
else:
raise YajlError("Client probably cancelled callback")
else:
yajl.yajl_get_error.restype = c_char_p
error = yajl.yajl_get_error(self._handler, 1, data, len(data))
raise YajlError(error)
if not data: return
|
https://github.com/kashifrazzaqui/json-streamer/blob/f87527d57557d11682c12727a1a4eeda9cca3c8f/jsonstreamer/yajl/parse.py#L229-L253
|
parse json file
|
python
|
def _parse_json(self, doc, exactly_one):
"""
Parse JSON response body.
"""
if doc.get('error'):
raise GeocoderServiceError(doc['error']['message'])
try:
places = doc['response']['GeoObjectCollection']['featureMember']
except KeyError:
raise GeocoderParseError('Failed to parse server response')
def parse_code(place):
"""
Parse each record.
"""
try:
place = place['GeoObject']
except KeyError:
raise GeocoderParseError('Failed to parse server response')
longitude, latitude = [
float(_) for _ in place['Point']['pos'].split(' ')
]
name_elements = ['name', 'description']
location = ', '.join([place[k] for k in name_elements if place.get(k)])
return Location(location, (latitude, longitude), place)
if exactly_one:
try:
return parse_code(places[0])
except IndexError:
return None
else:
return [parse_code(place) for place in places]
|
https://github.com/geopy/geopy/blob/02c838d965e76497f3c3d61f53808c86b5c58224/geopy/geocoders/yandex.py#L186-L222
|
parse json file
|
python
|
def _parse(self, data):
"""Parse API response.
:param dict data: Parsed JSON response from API 'data' key.
"""
super(Image, self)._parse(data)
self.height = data['height']
self.type = data['type']
self.width = data['width']
|
https://github.com/Robpol86/sphinxcontrib-imgur/blob/5c178481d645147d10acb096793eda41c12c57af/sphinxcontrib/imgur/imgur_api.py#L156-L164
|
parse json file
|
python
|
def _parse_file(self, file):
""" Parses the given file-like object.
"""
case = Case()
file.seek(0)
line = file.readline().split()
if line[0] != "function":
logger.error("Invalid data file header.")
return case
if line[1] != "mpc":
self._is_struct = False
base = ""
else:
base = "mpc."
case.name = line[-1]
for line in file:
if line.startswith("%sbaseMVA" % base):
case_data = line.rstrip(";\n").split()
case.base_mva = float(case_data[-1])
elif line.startswith("%sbus" % base):
self._parse_buses(case, file)
elif line.startswith("%sgencost" % base):
self._parse_gencost(case, file)
elif line.startswith("%sgen" % base):
self._parse_generators(case, file)
elif line.startswith("%sbranch" % base):
self._parse_branches(case, file)
return case
|
https://github.com/rwl/pylon/blob/916514255db1ae1661406f0283df756baf960d14/pylon/io/matpower.py#L95-L125
|
parse json file
|
python
|
def read_json(filename, mode='r'):
'''read_json reads in a json file and returns
the data structure as dict.
'''
with open(filename, mode) as filey:
data = json.load(filey)
return data
|
https://github.com/singularityhub/sregistry-cli/blob/abc96140a1d15b5e96d83432e1e0e1f4f8f36331/sregistry/utils/fileio.py#L323-L329
|
parse json file
|
python
|
def parse_JSON(self, JSON_string):
"""
Parses an *NO2Index* instance out of raw JSON data. Only certain
properties of the data are used: if these properties are not found or
cannot be parsed, an error is issued.
:param JSON_string: a raw JSON string
:type JSON_string: str
:returns: an *NO2Index* instance or ``None`` if no data is available
:raises: *ParseResponseError* if it is impossible to find or parse the
data needed to build the result, *APIResponseError* if the JSON
string embeds an HTTP status error
"""
if JSON_string is None:
raise parse_response_error.ParseResponseError('JSON data is None')
d = json.loads(JSON_string)
try:
# -- reference time (strip away Z and T on ISO8601 format)
t = d['time'].replace('Z', '+00').replace('T', ' ')
reference_time = timeformatutils._ISO8601_to_UNIXtime(t)
# -- reception time (now)
reception_time = timeutils.now('unix')
# -- location
lon = float(d['location']['longitude'])
lat = float(d['location']['latitude'])
place = location.Location(None, lon, lat, None)
# -- CO samples
no2_samples = [dict(label=key,
precision=d['data'][key]['precision'],
value=d['data'][key]['value']) for key in d['data']]
except KeyError:
raise parse_response_error.ParseResponseError(
''.join([__name__, ': impossible to parse NO2Index']))
return no2index.NO2Index(reference_time, place, None, no2_samples,
reception_time)
|
https://github.com/csparpa/pyowm/blob/cdd59eb72f32f7238624ceef9b2e2329a5ebd472/pyowm/pollutionapi30/parsers.py#L73-L113
|
parse json file
|
python
|
def read_json(file_path):
""" Read in a json file and return a dictionary representation """
try:
with open(file_path, 'r') as f:
config = json_tricks.load(f)
except ValueError:
print(' '+'!'*58)
print(' Woops! Looks the JSON syntax is not valid in:')
print(' {}'.format(file_path))
print(' Note: commonly this is a result of having a trailing comma \n in the file')
print(' '+'!'*58)
raise
return config
|
https://github.com/LIVVkit/LIVVkit/blob/680120cd437e408673e62e535fc0a246c7fc17db/livvkit/util/functions.py#L136-L149
|
parse json file
|
python
|
def parse(self, data, lexer=None, *args, **kwargs):
"""Parse the input JSON data string into a python data structure.
Args:
data: An input data string
lexer: An optional ply.lex instance that overrides the default lexer.
Returns:
A python dict or list representing the input JSON data.
"""
if lexer is None:
lexer = self.lexer
return self.parser.parse(data, lexer=lexer, *args, **kwargs)
|
https://github.com/EnigmaBridge/jbossply/blob/44b30b15982cae781f0c356fab7263751b20b4d0/jbossply/jbossparser.py#L409-L419
|
parse json file
|
python
|
def parse_JSON(self, JSON_string):
"""
Parses a `pyowm.alertapi30.alert.Alert` instance out of raw JSON data.
:param JSON_string: a raw JSON string
:type JSON_string: str
:return: a `pyowm.alertapi30.alert.Alert` instance or ``None``
if no data is available
:raises: *ParseResponseError* if it is impossible to find or parse the
data needed to build the result
"""
if JSON_string is None:
raise parse_response_error.ParseResponseError('JSON data is None')
d = json.loads(JSON_string)
try:
alert_id = d['_id']
t = d['last_update'].split('.')[0].replace('T', ' ') + '+00'
alert_last_update = timeformatutils._ISO8601_to_UNIXtime(t)
alert_trigger_id = d['triggerId']
alert_met_conds = [
dict(current_value=c['current_value']['min'], condition=Condition.from_dict(c['condition']))
for c in d['conditions']
]
alert_coords = d['coordinates']
return Alert(alert_id, alert_trigger_id, alert_met_conds, alert_coords, last_update=alert_last_update)
except ValueError as e:
raise parse_response_error.ParseResponseError('Impossible to parse JSON: %s' % e)
except KeyError as e:
raise parse_response_error.ParseResponseError('Impossible to parse JSON: %s' % e)
|
https://github.com/csparpa/pyowm/blob/cdd59eb72f32f7238624ceef9b2e2329a5ebd472/pyowm/alertapi30/parsers.py#L127-L157
|
parse json file
|
python
|
def json_get_data(filename):
"""Get data from json file
"""
with open(filename) as fp:
json_data = json.load(fp)
return json_data
return False
|
https://github.com/josuebrunel/yahoo-oauth/blob/40eff7809366850c46e1a3340469044f33cd1713/yahoo_oauth/utils.py#L51-L58
|
parse json file
|
python
|
def is_json_file(abspath):
"""Parse file extension.
- *.json: uncompressed, utf-8 encode json file
- *.gz: compressed, utf-8 encode json file
"""
abspath = abspath.lower()
fname, ext = os.path.splitext(abspath)
if ext in [".json", ".js"]:
is_json = True
elif ext == ".gz":
is_json = False
elif ext == ".tmp":
return is_json_file(fname)
else:
raise JsonExtError(
"'%s' is not a valid json file. "
"extension has to be '.json' for uncompressed, '.gz' "
"for compressed." % abspath)
return is_json
|
https://github.com/MacHu-GWU/dataIO-project/blob/7e1cc192b5e53426eed6dbd742918619b8fd60ab/dataIO/js.py#L49-L68
|
parse json file
|
python
|
def parse_json(self, data, encoding="utf-8"):
"""
Parse raw response from Overpass service.
:param data: Raw JSON Data
:type data: String or Bytes
:param encoding: Encoding to decode byte string
:type encoding: String
:return: Result object
:rtype: overpy.Result
"""
if isinstance(data, bytes):
data = data.decode(encoding)
data = json.loads(data, parse_float=Decimal)
if "remark" in data:
self._handle_remark_msg(msg=data.get("remark"))
return Result.from_json(data, api=self)
|
https://github.com/DinoTools/python-overpy/blob/db8f80eeb1b4d1405816bd62c16ddb3364e0c46d/overpy/__init__.py#L204-L220
|
parse json file
|
python
|
def read_json(file_or_path):
"""Parse json contents of string or file object or file path and return python nested dict/lists"""
try:
with (open(file_or_path, 'r') if isinstance(file_or_path, (str, bytes)) else file_or_path) as f:
obj = json.load(f)
except IOError:
obj = json.loads(file_or_path)
return obj
|
https://github.com/totalgood/pugnlp/blob/c43445b14afddfdeadc5f3076675c9e8fc1ee67c/src/pugnlp/futil.py#L443-L450
|
parse json file
|
python
|
def read_json_file(fpath):
"""
Read a JSON file from ``fpath``; raise an exception if it doesn't exist.
:param fpath: path to file to read
:type fpath: str
:return: deserialized JSON
:rtype: dict
"""
if not os.path.exists(fpath):
raise Exception('ERROR: file %s does not exist.' % fpath)
with open(fpath, 'r') as fh:
raw = fh.read()
res = json.loads(raw)
return res
|
https://github.com/jantman/webhook2lambda2sqs/blob/c80c18d5a908ba8b8ee624dc3a977c633fba2b7c/webhook2lambda2sqs/utils.py#L47-L61
|
parse json file
|
python
|
def _parse_json(self, jstring=None):
"""Parse jstring and return a Python data structure.
'jstring' a string of JSON. May not be None.
Returns a Python data structure.
If jstring couldn't be parsed, raises an APIDataException."""
# Argument error checking.
assert jstring is not None
result = None
try:
result = json.loads(jstring)
except (ValueError) as exception:
msg = 'Unable to convert JSON string to Python data structure.'
raise APIDataException(exception, jstring, msg)
return result
|
https://github.com/Clarify/clarify_python/blob/1a00a5e39f77af9ad7f2e08480a3ab14e7d72aeb/clarify_python/clarify.py#L972-L992
|
parse json file
|
python
|
def parse_JSON(self, JSON_string):
"""
Parses an *Observation* instance out of raw JSON data. Only certain
properties of the data are used: if these properties are not found or
cannot be parsed, an error is issued.
:param JSON_string: a raw JSON string
:type JSON_string: str
:returns: an *Observation* instance or ``None`` if no data is available
:raises: *ParseResponseError* if it is impossible to find or parse the
data needed to build the result, *APIResponseError* if the JSON
string embeds an HTTP status error
"""
if JSON_string is None:
raise parse_response_error.ParseResponseError('JSON data is None')
d = loads(JSON_string)
# Check if server returned errors: this check overcomes the lack of use
# of HTTP error status codes by the OWM API 2.5. This mechanism is
# supposed to be deprecated as soon as the API fully adopts HTTP for
# conveying errors to the clients
if 'message' in d and 'cod' in d:
if d['cod'] == "404":
print("OWM API: observation data not available - response " \
"payload: " + dumps(d))
return None
else:
raise api_response_error.APIResponseError(
"OWM API: error - response payload: " + dumps(d), d['cod'])
try:
place = location.location_from_dictionary(d)
except KeyError:
raise parse_response_error.ParseResponseError(
''.join([__name__, ': impossible to ' \
'read location info from JSON data']))
try:
w = weather.weather_from_dictionary(d)
except KeyError:
raise parse_response_error.ParseResponseError(
''.join([__name__, ': impossible to ' \
'read weather info from JSON data']))
current_time = int(round(time()))
return observation.Observation(current_time, place, w)
|
https://github.com/csparpa/pyowm/blob/cdd59eb72f32f7238624ceef9b2e2329a5ebd472/pyowm/weatherapi25/parsers/observationparser.py#L25-L67
|
parse json file
|
python
|
def dump_json(token_dict, dump_path):
"""write json data to file
"""
if sys.version > '3':
with open(dump_path, 'w', encoding='utf-8') as output_file:
json.dump(token_dict, output_file, indent=4)
else:
with open(dump_path, 'w') as output_file:
json.dump(token_dict, output_file, indent=4)
|
https://github.com/fbngrm/babelpy/blob/ff305abecddd66aed40c32f0010485cf192e5f17/babelpy/dump.py#L7-L15
|
parse json file
|
python
|
def _parse(res, params, n, api, **kwds):
"""Auxiliary function to download results and parse json."""
cursor = "cursor" in params
if not cursor:
start = params["start"]
if n == 0:
return ""
_json = res.get('search-results', {}).get('entry', [])
# Download the remaining information in chunks
while n > 0:
n -= params["count"]
if cursor:
pointer = res['search-results']['cursor'].get('@next')
params.update({'cursor': pointer})
else:
start += params["count"]
params.update({'start': start})
res = download(url=URL[api], params=params, accept="json", **kwds).json()
_json.extend(res.get('search-results', {}).get('entry', []))
return _json
|
https://github.com/scopus-api/scopus/blob/27ce02dd3095bfdab9d3e8475543d7c17767d1ab/scopus/classes/search.py#L120-L139
|
parse json file
|
python
|
def to_json(self, filename):
"""
Writes the experimental setup to a JSON file
Parameters
----------
filename : str
Absolute path where to write the JSON file
"""
with open(filename, 'w') as fp:
json.dump(dict(stimuli=self.stimuli, inhibitors=self.inhibitors, readouts=self.readouts), fp)
|
https://github.com/bioasp/caspo/blob/a68d1eace75b9b08f23633d1fb5ce6134403959e/caspo/core/setup.py#L130-L140
|
parse json file
|
python
|
def _parse_json(self, doc, exactly_one):
"""
Parse JSON response body.
"""
places = doc.get('geonames', [])
self._raise_for_error(doc)
if not len(places):
return None
def parse_code(place):
"""
Parse each record.
"""
latitude = place.get('lat', None)
longitude = place.get('lng', None)
if latitude and longitude:
latitude = float(latitude)
longitude = float(longitude)
else:
return None
placename = place.get('name')
state = place.get('adminName1', None)
country = place.get('countryName', None)
location = ', '.join(
[x for x in [placename, state, country] if x]
)
return Location(location, (latitude, longitude), place)
if exactly_one:
return parse_code(places[0])
else:
return [parse_code(place) for place in places]
|
https://github.com/geopy/geopy/blob/02c838d965e76497f3c3d61f53808c86b5c58224/geopy/geocoders/geonames.py#L387-L421
|
parse json file
|
python
|
def parse_JSON(self, JSON_string):
"""
Parses a *pyowm.stationsapi30.measurement.AggregatedMeasurement*
instance out of raw JSON data.
:param JSON_string: a raw JSON string
:type JSON_string: str
:return: a *pyowm.stationsapi30.measurement.AggregatedMeasurement*
instance or ``None`` if no data is available
:raises: *ParseResponseError* if it is impossible to find or parse the
data needed to build the result
"""
if JSON_string is None:
raise parse_response_error.ParseResponseError('JSON data is None')
d = json.loads(JSON_string)
station_id = d.get('station_id', None)
ts = d.get('date', None)
if ts is not None:
ts = int(ts)
aggregated_on = d.get('type', None)
temp = d.get('temp', dict())
humidity = d.get('humidity', dict())
wind = d.get('wind', dict())
pressure = d.get('pressure', dict())
precipitation = d.get('precipitation', dict())
return AggregatedMeasurement(station_id, ts, aggregated_on, temp=temp,
humidity=humidity, wind=wind,
pressure=pressure, precipitation=precipitation)
|
https://github.com/csparpa/pyowm/blob/cdd59eb72f32f7238624ceef9b2e2329a5ebd472/pyowm/stationsapi30/aggregated_measurement_parser.py#L35-L63
|
parse json file
|
python
|
def parse_json(self, req, name, field):
"""Pull a json value from the request."""
if not (req.body and is_json_request(req)):
return core.missing
json_data = req.json
if json_data is None:
return core.missing
return core.get_value(json_data, name, field, allow_many_nested=True)
|
https://github.com/EndurantDevs/webargs-sanic/blob/8861a3b7d16d43a0b7e6669115eb93b0553f1b63/webargs_sanic/sanicparser.py#L74-L81
|
parse json file
|
python
|
def get_json_request(self):
"""
friendly method to parse a proper json request (i.e.: **application/json** or **text/json**)
:returns: a dict with the parsed data, unless the request data does not contain a valid json string or the content-type do not portray a json request.
"""
content_type = request.headers.get('Content-Type', 'text/plain')
if content_type not in ('application/json', 'text/json'):
self.log.error('get_json_request() called in a request that does not have a json content-type: %s. Refusing to parse the request data', content_type)
return {}
try:
data = json.loads(request.data)
except ValueError:
self.log.exception(
"Trying to parse json body in the %s to %s",
request.method, request.url,
)
data = {}
return data
|
https://github.com/c0ntrol-x/p4rr0t007/blob/6fe88ec1231a778b9f1d13bc61332581715d646e/p4rr0t007/web.py#L119-L138
|
parse json file
|
python
|
def fromjson(source, *args, **kwargs):
"""
Extract data from a JSON file. The file must contain a JSON array as
the top level object, and each member of the array will be treated as a
row of data. E.g.::
>>> import petl as etl
>>> data = '''
... [{"foo": "a", "bar": 1},
... {"foo": "b", "bar": 2},
... {"foo": "c", "bar": 2}]
... '''
>>> with open('example.json', 'w') as f:
... f.write(data)
...
74
>>> table1 = etl.fromjson('example.json', header=['foo', 'bar'])
>>> table1
+-----+-----+
| foo | bar |
+=====+=====+
| 'a' | 1 |
+-----+-----+
| 'b' | 2 |
+-----+-----+
| 'c' | 2 |
+-----+-----+
If your JSON file does not fit this structure, you will need to parse it
via :func:`json.load` and select the array to treat as the data, see also
:func:`petl.io.json.fromdicts`.
.. versionchanged:: 1.1.0
If no `header` is specified, fields will be discovered by sampling keys
from the first `sample` objects in `source`. The header will be
constructed from keys in the order discovered. Note that this
ordering may not be stable, and therefore it may be advisable to specify
an explicit `header` or to use another function like
:func:`petl.transform.headers.sortheader` on the resulting table to
guarantee stability.
"""
source = read_source_from_arg(source)
return JsonView(source, *args, **kwargs)
|
https://github.com/petl-developers/petl/blob/1d33ca055f7e04e0d28a772041c9fd30c8d415d6/petl/io/json.py#L17-L62
|
parse json file
|
python
|
def parse_files(self, req, name, field):
"""Pull a file from the request."""
files = ((k, v) for k, v in req.POST.items() if hasattr(v, "file"))
return core.get_value(MultiDict(files), name, field)
|
https://github.com/marshmallow-code/webargs/blob/40cc2d25421d15d9630b1a819f1dcefbbf01ed95/src/webargs/pyramidparser.py#L79-L82
|
parse json file
|
python
|
def parse(string, is_file=False, obj=False):
""" Convert a JSON string to dict/object """
try:
if obj is False:
if is_file:
return system_json.load(string)
return system_json.loads(string, encoding='utf8')
else:
if is_file:
return system_json.load(
string,
object_hook=lambda d: namedtuple('j', d.keys())
(*d.values()), ensure_ascii=False, encoding='utf8')
return system_json.loads(
string,
object_hook=lambda d: namedtuple('j', d.keys())
(*d.values()), encoding='utf8')
except (Exception, BaseException) as error:
try:
if current_app.config['DEBUG']:
raise error
except RuntimeError as flask_error:
raise error
return None
|
https://github.com/AtomHash/evernode/blob/b2fb91555fb937a3f3eba41db56dee26f9b034be/evernode/classes/json.py#L29-L52
|
parse json file
|
python
|
def parse_files(self, req, name, field):
"""Pull a file from the request."""
return core.get_value(req.FILES, name, field)
|
https://github.com/marshmallow-code/webargs/blob/40cc2d25421d15d9630b1a819f1dcefbbf01ed95/src/webargs/djangoparser.py#L68-L70
|
parse json file
|
python
|
def parse(self, body):
"""Parse JSON request, storing content in object attributes.
Args:
body: str. HTTP request body.
Returns:
self
"""
if isinstance(body, six.string_types):
body = json.loads(body)
# version
version = body['version']
self.version = version
# session
session = body['session']
self.session.new = session['new']
self.session.session_id = session['sessionId']
application_id = session['application']['applicationId']
self.session.application.application_id = application_id
if 'attributes' in session and session['attributes']:
self.session.attributes = session.get('attributes', {})
else:
self.session.attributes = {}
self.session.user.user_id = session['user']['userId']
self.session.user.access_token = session['user'].get('accessToken', 0)
# request
request = body['request']
# launch request
if request['type'] == 'LaunchRequest':
self.request = LaunchRequest()
# intent request
elif request['type'] == 'IntentRequest':
self.request = IntentRequest()
self.request.intent = Intent()
intent = request['intent']
self.request.intent.name = intent['name']
if 'slots' in intent and intent['slots']:
for name, slot in six.iteritems(intent['slots']):
self.request.intent.slots[name] = Slot()
self.request.intent.slots[name].name = slot['name']
self.request.intent.slots[name].value = slot.get('value')
# session ended request
elif request['type'] == 'SessionEndedRequest':
self.request = SessionEndedRequest()
self.request.reason = request['reason']
# common - keep after specific requests to prevent param overwrite
self.request.type = request['type']
self.request.request_id = request['requestId']
self.request.timestamp = request['timestamp']
return self
|
https://github.com/bmweiner/skillful/blob/8646f54faf62cb63f165f7699b8ace5b4a08233c/skillful/interface.py#L112-L170
|
parse json file
|
python
|
def _read_json(self, path, name):
"""
Load a json into a dictionary from a file.
:param path: path to file
:param name: name of file
:return: dict
"""
with open(os.path.join(path, name), 'r') as fil:
output = json.load(fil)
self.logger.info("Read contents of {}".format(name))
return output
|
https://github.com/ARMmbed/icetea/blob/b2b97ac607429830cf7d62dae2e3903692c7c778/icetea_lib/tools/file/SessionFiles.py#L205-L216
|
parse json file
|
python
|
async def parse_json(self, req: Request, name: str, field: Field) -> typing.Any:
"""Pull a json value from the request."""
json_data = self._cache.get("json")
if json_data is None:
if not (req.body_exists and is_json_request(req)):
return core.missing
try:
json_data = await req.json(loads=json.loads)
except json.JSONDecodeError as e:
if e.doc == "":
return core.missing
else:
return self.handle_invalid_json_error(e, req)
self._cache["json"] = json_data
return core.get_value(json_data, name, field, allow_many_nested=True)
|
https://github.com/marshmallow-code/webargs/blob/40cc2d25421d15d9630b1a819f1dcefbbf01ed95/src/webargs/aiohttpparser.py#L92-L106
|
parse json file
|
python
|
def parse_JSON(self, JSON_string):
"""
Parses an *COIndex* instance out of raw JSON data. Only certain
properties of the data are used: if these properties are not found or
cannot be parsed, an error is issued.
:param JSON_string: a raw JSON string
:type JSON_string: str
:returns: an *COIndex* instance or ``None`` if no data is available
:raises: *ParseResponseError* if it is impossible to find or parse the
data needed to build the result, *APIResponseError* if the JSON
string embeds an HTTP status error
"""
if JSON_string is None:
raise parse_response_error.ParseResponseError('JSON data is None')
d = json.loads(JSON_string)
try:
# -- reference time (strip away Z and T on ISO8601 format)
t = d['time'].replace('Z', '+00').replace('T', ' ')
reference_time = timeformatutils._ISO8601_to_UNIXtime(t)
# -- reception time (now)
reception_time = timeutils.now('unix')
# -- location
lon = float(d['location']['longitude'])
lat = float(d['location']['latitude'])
place = location.Location(None, lon, lat, None)
# -- CO samples
co_samples = d['data']
except KeyError:
raise parse_response_error.ParseResponseError(
''.join([__name__, ': impossible to parse COIndex']))
return coindex.COIndex(reference_time, place, None, co_samples,
reception_time)
|
https://github.com/csparpa/pyowm/blob/cdd59eb72f32f7238624ceef9b2e2329a5ebd472/pyowm/pollutionapi30/parsers.py#L19-L57
|
parse json file
|
python
|
def parse_files(self, req, name, field):
"""Pull a file from the request."""
return get_value(req.files, name, field)
|
https://github.com/marshmallow-code/webargs/blob/40cc2d25421d15d9630b1a819f1dcefbbf01ed95/src/webargs/tornadoparser.py#L118-L120
|
parse json file
|
python
|
def parse(binary, **params):
"""Turns a JSON structure into a python object."""
encoding = params.get('charset', 'UTF-8')
return json.loads(binary, encoding=encoding)
|
https://github.com/monkeython/scriba/blob/fb8e7636ed07c3d035433fdd153599ac8b24dfc4/scriba/content_types/scriba_json.py#L6-L9
|
parse json file
|
python
|
def parse_json(json_file, include_date=False):
""" Yield screen_name, text tuples from a json file. """
if json_file[-2:] == 'gz':
fh = gzip.open(json_file, 'rt')
else:
fh = io.open(json_file, mode='rt', encoding='utf8')
for line in fh:
try:
jj = json.loads(line)
if type(jj) is not list:
jj = [jj]
for j in jj:
if include_date:
yield (j['user']['screen_name'].lower(), j['text'], j['created_at'])
else:
if 'full_text' in j: # get untruncated text if available.
yield (j['user']['screen_name'].lower(), j['full_text'])
else:
yield (j['user']['screen_name'].lower(), j['text'])
except Exception as e:
sys.stderr.write('skipping json error: %s\n' % e)
|
https://github.com/tapilab/brandelion/blob/40a5a5333cf704182c8666d1fbbbdadc7ff88546/brandelion/cli/analyze.py#L50-L71
|
parse json file
|
python
|
def parse_JSON(self, JSON_string):
"""
Parses a *pyowm.stationsapi30.station.Station* instance out of raw JSON
data.
:param JSON_string: a raw JSON string
:type JSON_string: str
:return: a *pyowm.stationsapi30.station.Station** instance or ``None``
if no data is available
:raises: *ParseResponseError* if it is impossible to find or parse the
data needed to build the result
"""
if JSON_string is None:
raise parse_response_error.ParseResponseError('JSON data is None')
d = json.loads(JSON_string)
try:
id = d.get('ID', None) or d.get('id', None)
external_id = d.get('external_id', None)
lon = d.get('longitude', None)
lat = d.get('latitude', None)
alt = d.get('altitude', None)
except KeyError as e:
raise parse_response_error.ParseResponseError('Impossible to parse JSON: %s' % e)
name = d.get('name', None)
rank = d.get('rank', None)
created_at = d.get('created_at', None)
updated_at = d.get('updated_at', None)
return Station(id, created_at, updated_at, external_id, name, lon, lat,
alt, rank)
|
https://github.com/csparpa/pyowm/blob/cdd59eb72f32f7238624ceef9b2e2329a5ebd472/pyowm/stationsapi30/station_parser.py#L34-L63
|
parse json file
|
python
|
def _parse(self, data):
"""Parse API response.
:param dict data: Parsed JSON response from API 'data' key.
"""
self.description = data['description']
self.in_gallery = data['in_gallery']
self.mod_time = int(time.time())
self.title = data['title']
|
https://github.com/Robpol86/sphinxcontrib-imgur/blob/5c178481d645147d10acb096793eda41c12c57af/sphinxcontrib/imgur/imgur_api.py#L96-L104
|
parse json file
|
python
|
def parse(self):
"""
Parse file specified by constructor.
"""
f = open(self.parse_log_path, "r")
self.parse2(f)
f.close()
|
https://github.com/h2oai/h2o-3/blob/dd62aaa1e7f680a8b16ee14bc66b0fb5195c2ad8/h2o-r/scripts/build_dataset_from_parse_log.py#L100-L106
|
parse json file
|
python
|
def parse(self, data, mimetype):
"""
Parses a byte array containing a JSON document and returns a Python object.
:param data: The byte array containing a JSON document.
:param MimeType mimetype: The mimetype chose to parse the data.
:return: A Python object.
"""
encoding = mimetype.params.get('charset') or 'utf-8'
return json.loads(data.decode(encoding))
|
https://github.com/viniciuschiele/flask-io/blob/4e559419b3d8e6859f83fa16557b00542d5f3aa7/flask_io/parsers.py#L35-L44
|
parse json file
|
python
|
def parse_json(filename):
""" Parse a JSON file
First remove comments and then use the json module package
Comments look like :
// ...
or
/*
...
*/
"""
# Regular expression for comments
comment_re = re.compile(
'(^)?[^\S\n]*/(?:\*(.*?)\*/[^\S\n]*|/[^\n]*)($)?',
re.DOTALL | re.MULTILINE
)
with open(filename) as f:
content = ''.join(f.readlines())
## Looking for comments
match = comment_re.search(content)
while match:
# single line comment
content = content[:match.start()] + content[match.end():]
match = comment_re.search(content)
# Return json file
return json.loads(content)
|
https://github.com/joeferraro/mm/blob/43dce48a2249faab4d872c228ada9fbdbeec147b/mm/util.py#L294-L321
|
parse json file
|
python
|
def download_json(local_filename, url, clobber=False):
"""Download the given JSON file, and pretty-print before we output it."""
with open(local_filename, 'w') as json_file:
json_file.write(json.dumps(requests.get(url).json(), sort_keys=True, indent=2, separators=(',', ': ')))
|
https://github.com/bibanon/BASC-py4chan/blob/88e4866d73853e1025e549fbbe9744e750522359/examples/example6-download-thread.py#L35-L38
|
parse json file
|
python
|
def parse_json(path):
# type: (str) -> List[FunctionInfo]
"""Deserialize a JSON file containing runtime collected types.
The input JSON is expected to to have a list of RawEntry items.
"""
with open(path) as f:
data = json.load(f) # type: List[RawEntry]
result = []
def assert_type(value, typ):
# type: (object, type) -> None
assert isinstance(value, typ), '%s: Unexpected type %r' % (path, type(value).__name__)
def assert_dict_item(dictionary, key, typ):
# type: (Mapping[Any, Any], str, type) -> None
assert key in dictionary, '%s: Missing dictionary key %r' % (path, key)
value = dictionary[key]
assert isinstance(value, typ), '%s: Unexpected type %r for key %r' % (
path, type(value).__name__, key)
assert_type(data, list)
for item in data:
assert_type(item, dict)
assert_dict_item(item, 'path', Text)
assert_dict_item(item, 'line', int)
assert_dict_item(item, 'func_name', Text)
assert_dict_item(item, 'type_comments', list)
for comment in item['type_comments']:
assert_type(comment, Text)
assert_type(item['samples'], int)
info = FunctionInfo(encode(item['path']),
item['line'],
encode(item['func_name']),
[encode(comment) for comment in item['type_comments']],
item['samples'])
result.append(info)
return result
|
https://github.com/dropbox/pyannotate/blob/d128c76b8a86f208e5c78716f2a917003650cebc/pyannotate_tools/annotations/parse.py#L96-L133
|
parse json file
|
python
|
def parse_json_path(self, jsonpath):
"""
Parse a jsonpath
Args:
jsonpath: str
Returns: a parsed json path
"""
if jsonpath not in self.parsed:
try:
self.parsed[jsonpath] = self.parser(jsonpath)
except Exception:
self.log("Invalid Json Path: " + jsonpath, "error")
raise InvalidJsonPathError("Invalid Json Path")
return self.parsed[jsonpath]
|
https://github.com/usc-isi-i2/etk/blob/aab077c984ea20f5e8ae33af622fe11d3c4df866/etk/etk.py#L85-L103
|
parse json file
|
python
|
def extract_json(fileobj, keywords, comment_tags, options):
"""
Supports: gettext, ngettext. See package README or github ( https://github.com/tigrawap/pybabel-json ) for more usage info.
"""
data=fileobj.read()
json_extractor=JsonExtractor(data)
strings_data=json_extractor.get_lines_data()
for item in strings_data:
messages = [item['content']]
if item.get('funcname') == 'ngettext':
messages.append(item['alt_content'])
yield item['line_number'],item.get('funcname','gettext'),tuple(messages),[]
|
https://github.com/tigrawap/pybabel-json/blob/432b5726c61afb906bd6892366a6b20e89dc566f/pybabel_json/extractor.py#L109-L121
|
parse json file
|
python
|
def read_json (self, mode='rt', **kwargs):
"""Use the :mod:`json` module to read in this file as a JSON-formatted data
structure. Keyword arguments are passed to :func:`json.load`. Returns the
read-in data structure.
"""
import json
with self.open (mode=mode) as f:
return json.load (f, **kwargs)
|
https://github.com/pkgw/pwkit/blob/d40957a1c3d2ea34e7ceac2267ee9635135f2793/pwkit/io.py#L791-L800
|
parse json file
|
python
|
def _parse_json(self, response, exactly_one=True):
"""
Parse responses as JSON objects.
"""
if not len(response):
return None
if exactly_one:
return self._format_structured_address(response[0])
else:
return [self._format_structured_address(c) for c in response]
|
https://github.com/geopy/geopy/blob/02c838d965e76497f3c3d61f53808c86b5c58224/geopy/geocoders/smartystreets.py#L172-L181
|
parse json file
|
python
|
def load_json_file(file, decoder=None):
"""
Load data from json file
:param file: Readable object or path to file
:type file: FileIO | str
:param decoder: Use custom json decoder
:type decoder: T <= DateTimeDecoder
:return: Json data
:rtype: None | int | float | str | list | dict
"""
if decoder is None:
decoder = DateTimeDecoder
if not hasattr(file, "read"):
with io.open(file, "r", encoding="utf-8") as f:
return json.load(f, object_hook=decoder.decode)
return json.load(file, object_hook=decoder.decode)
|
https://github.com/the01/python-flotils/blob/5954712776bb590107e5b2f4362d010bf74f77a1/flotils/loadable.py#L147-L163
|
parse json file
|
python
|
def load_json(filename: str) -> Union[List, Dict]:
"""Load JSON data from a file and return as dict or list.
Defaults to returning empty dict if file is not found.
"""
try:
with open(filename, encoding='utf-8') as fdesc:
return json.loads(fdesc.read())
except FileNotFoundError:
# This is not a fatal error
_LOGGER.debug('JSON file not found: %s', filename)
except ValueError as error:
_LOGGER.exception('Could not parse JSON content: %s', filename)
raise PytradfriError(error)
except OSError as error:
_LOGGER.exception('JSON file reading failed: %s', filename)
raise PytradfriError(error)
return {}
|
https://github.com/ggravlingen/pytradfri/blob/63750fa8fb27158c013d24865cdaa7fb82b3ab53/pytradfri/util.py#L12-L29
|
parse json file
|
python
|
def _parse(self):
"""
The function for parsing the JSON response to the vars dictionary.
"""
try:
self.vars['status'] = self.json['status']
except (KeyError, ValueError, TypeError):
pass
for v in ['remarks', 'notices']:
try:
self.vars[v] = self.summarize_notices(self.json[v])
except (KeyError, ValueError, TypeError):
pass
try:
self.vars['links'] = self.summarize_links(self.json['links'])
except (KeyError, ValueError, TypeError):
pass
try:
self.vars['events'] = self.summarize_events(self.json['events'])
except (KeyError, ValueError, TypeError):
pass
|
https://github.com/secynic/ipwhois/blob/b5d634d36b0b942d538d38d77b3bdcd815f155a0/ipwhois/rdap.py#L419-L456
|
parse json file
|
python
|
def parse_JSON(self, JSON_string):
"""
Parses a *Forecast* instance out of raw JSON data. Only certain
properties of the data are used: if these properties are not found or
cannot be parsed, an error is issued.
:param JSON_string: a raw JSON string
:type JSON_string: str
:returns: a *Forecast* instance or ``None`` if no data is available
:raises: *ParseResponseError* if it is impossible to find or parse the
data needed to build the result, *APIResponseError* if the JSON
string embeds an HTTP status error
"""
if JSON_string is None:
raise parse_response_error.ParseResponseError('JSON data is None')
d = json.loads(JSON_string)
# Check if server returned errors: this check overcomes the lack of use
# of HTTP error status codes by the OWM API 2.5. This mechanism is
# supposed to be deprecated as soon as the API fully adopts HTTP for
# conveying errors to the clients
if 'message' in d and 'cod' in d:
if d['cod'] == "404":
print("OWM API: data not found - response payload: " + json.dumps(d), d['cod'])
return None
elif d['cod'] != "200":
raise api_response_error.APIResponseError("OWM API: error - response payload: " + json.dumps(d), d['cod'])
try:
place = location.location_from_dictionary(d)
except KeyError:
raise parse_response_error.ParseResponseError(''.join([__name__,
': impossible to read location info from JSON data']))
# Handle the case when no results are found
if 'count' in d and d['count'] == "0":
weathers = []
elif 'cnt' in d and d['cnt'] == 0:
weathers = []
else:
if 'list' in d:
try:
weathers = [weather.weather_from_dictionary(item) \
for item in d['list']]
except KeyError:
raise parse_response_error.ParseResponseError(
''.join([__name__, ': impossible to read weather ' \
'info from JSON data'])
)
else:
raise parse_response_error.ParseResponseError(
''.join([__name__, ': impossible to read weather ' \
'list from JSON data'])
)
current_time = int(round(time.time()))
return forecast.Forecast(None, current_time, place, weathers)
|
https://github.com/csparpa/pyowm/blob/cdd59eb72f32f7238624ceef9b2e2329a5ebd472/pyowm/weatherapi25/parsers/forecastparser.py#L25-L78
|
parse json file
|
python
|
def _load_json_file(self, file, decoder=None):
"""
Load data from json file
:param file: Readable file or path to file
:type file: FileIO | str | unicode
:param decoder: Use custom json decoder
:type decoder: T <= flotils.loadable.DateTimeDecoder
:return: Json data
:rtype: None | int | float | str | list | dict
:raises IOError: Failed to load
"""
try:
res = load_json_file(file, decoder=decoder)
except ValueError as e:
if "{}".format(e) == "No JSON object could be decoded":
raise IOError("Decoding JSON failed")
self.exception("Failed to load from {}".format(file))
raise IOError("Loading file failed")
except:
self.exception("Failed to load from {}".format(file))
raise IOError("Loading file failed")
return res
|
https://github.com/the01/python-flotils/blob/5954712776bb590107e5b2f4362d010bf74f77a1/flotils/loadable.py#L470-L492
|
parse json file
|
python
|
def _parse_request(self):
'''
Parse the request
'''
self.req_method = 'unknown'
self.req_params = {}
self.req_rpc_version = '2.0'
self.req_id = 0
self.data = self.rfile.read(int(self.headers.get('content-length')))
data_dict = json.loads(self.data)
self.req_method = data_dict['method']
self.req_params = decode_data(data_dict['params'])
self.req_rpc_version = data_dict['jsonrpc']
self.req_id = data_dict['id']
|
https://github.com/cisco-sas/kitty/blob/cb0760989dcdfe079e43ac574d872d0b18953a32/kitty/remote/rpc.py#L207-L220
|
parse json file
|
python
|
def parsefile(self, filename):
"""Parse from the file
"""
with open(filename, 'rb') as fd:
return self.parse(fd.read())
|
https://github.com/lipixun/pymime/blob/4762cf2e51ba80c21d872f26b8e408b6a6863d26/src/mime/tools/specxmlparser.py#L44-L48
|
parse json file
|
python
|
def parse_file(self, filename=None, buffer=None, fileobj=None):
"""Completely parse a file, extracting all tags."""
if filename:
self.file = GzipFile(filename, 'rb')
elif buffer:
if hasattr(buffer, 'name'):
self.filename = buffer.name
self.file = buffer
elif fileobj:
if hasattr(fileobj, 'name'):
self.filename = fileobj.name
self.file = GzipFile(fileobj=fileobj)
if self.file:
try:
type = TAG_Byte(buffer=self.file)
if type.value == self.id:
name = TAG_String(buffer=self.file).value
self._parse_buffer(self.file)
self.name = name
self.file.close()
else:
raise MalformedFileError(
"First record is not a Compound Tag")
except StructError as e:
raise MalformedFileError(
"Partial File Parse: file possibly truncated.")
else:
raise ValueError(
"NBTFile.parse_file(): Need to specify either a "
"filename or a file object"
)
|
https://github.com/twoolie/NBT/blob/b06dd6cc8117d2788da1d8416e642d58bad45762/nbt/nbt.py#L641-L671
|
parse json file
|
python
|
def parse_multiple_json(json_file, offset=None):
"""Parse multiple json records from the given file.
Seek to the offset as the start point before parsing
if offset set. return empty list if the json file does
not exists or exception occurs.
Args:
json_file (str): File path to be parsed.
offset (int): Initial seek position of the file.
Returns:
A dict of json info.
New offset after parsing.
"""
json_info_list = []
if not os.path.exists(json_file):
return json_info_list
try:
with open(json_file, "r") as f:
if offset:
f.seek(offset)
for line in f:
if line[-1] != "\n":
# Incomplete line
break
json_info = json.loads(line)
json_info_list.append(json_info)
offset += len(line)
except BaseException as e:
logging.error(e.message)
return json_info_list, offset
|
https://github.com/ray-project/ray/blob/4eade036a0505e244c976f36aaa2d64386b5129b/python/ray/tune/automlboard/common/utils.py#L58-L92
|
parse json file
|
python
|
def file2json(self, jsonfile=None):
""" Convert entire lte file into json like format
USAGE: 1: kwsdictstr = file2json()
2: kwsdictstr = file2json(jsonfile = 'somefile')
show pretty format with pipeline: | jshon, or | pjson
if jsonfile is defined, dump to defined file before returning json string
:param jsonfile: filename to dump json strings
"""
kwslist = self.detectAllKws()
kwsdict = {}
idx = 0
for kw in sorted(kwslist, key=str.lower):
# print kw
idx += 1
tdict = self.getKwAsDict(kw)
self.rpn2val(tdict)
kwsdict.update(tdict)
if kw not in self.ctrlconf_dict:
ctrlconf = self.getKwCtrlConf(kw, fmt='dict')
if ctrlconf is not None:
self.ctrlconf_dict.update({kw: ctrlconf})
kwsdict.update(self.prestrdict)
ctrlconfdict = {'_epics': self.ctrlconf_dict} # all epics contrl config in self.ctrlconfdict
kwsdict.update(ctrlconfdict)
try:
with open(os.path.expanduser(jsonfile), 'w') as outfile:
json.dump(kwsdict, outfile)
except:
pass
return json.dumps(kwsdict)
|
https://github.com/archman/beamline/blob/417bc5dc13e754bc89d246427984590fced64d07/beamline/lattice.py#L319-L350
|
parse json file
|
python
|
def parse_file(self, inputstring, addhash=True):
"""Parse file code."""
if addhash:
use_hash = self.genhash(False, inputstring)
else:
use_hash = None
return self.parse(inputstring, self.file_parser, {"nl_at_eof_check": True}, {"header": "file", "use_hash": use_hash})
|
https://github.com/evhub/coconut/blob/ff97177344e7604e89a0a98a977a87ed2a56fc6d/coconut/compiler/compiler.py#L1961-L1967
|
parse json file
|
python
|
def jsonl(self, jsonl_file):
"""
Reads and parses the input of a jsonl file stream or file.
Jsonl formatted files must have a single valid json value on each line which is parsed by
the python json module.
>>> seq.jsonl('examples/chat_logs.jsonl').first()
{u'date': u'10/09', u'message': u'hello anyone there?', u'user': u'bob'}
:param jsonl_file: path or file containing jsonl content
:return: Sequence wrapping jsonl file
"""
if isinstance(jsonl_file, str):
file_open = get_read_function(jsonl_file, self.disable_compression)
input_file = file_open(jsonl_file)
else:
input_file = jsonl_file
return self(input_file).map(jsonapi.loads).cache(delete_lineage=True)
|
https://github.com/EntilZha/PyFunctional/blob/ac04e4a8552b0c464a7f492f7c9862424867b63e/functional/streams.py#L154-L172
|
parse json file
|
python
|
def read_json(fp, local_files, dir_files, name_bytes):
"""
Read json properties from the zip file
:param fp: a file pointer
:param local_files: the local files structure
:param dir_files: the directory headers
:param name: the name of the json file to read
:return: the json properites as a dictionary, if found
The file pointer will be at a location following the
local file entry after this method.
The local_files and dir_files should be passed from
the results of parse_zip.
"""
if name_bytes in dir_files:
json_pos = local_files[dir_files[name_bytes][1]][1]
json_len = local_files[dir_files[name_bytes][1]][2]
fp.seek(json_pos)
json_properties = fp.read(json_len)
return json.loads(json_properties.decode("utf-8"))
return None
|
https://github.com/nion-software/nionswift/blob/d43693eaf057b8683b9638e575000f055fede452/nion/swift/model/NDataHandler.py#L300-L322
|
parse json file
|
python
|
def json(self, *, # type: ignore
loads: Callable[[Any], Any]=json.loads) -> None:
"""Return parsed JSON data.
.. versionadded:: 0.22
"""
return loads(self.data)
|
https://github.com/aio-libs/aiohttp/blob/9504fe2affaaff673fa4f3754c1c44221f8ba47d/aiohttp/http_websocket.py#L85-L91
|
parse json file
|
python
|
def parse(self, stream, media_type=None, parser_context=None):
"""
Treats the incoming bytestream as a raw file upload and returns
a `DataAndFiles` object.
`.data` will be None (we expect request body to be a file content).
`.files` will be a `QueryDict` containing one 'file' element.
"""
parser_context = parser_context or {}
request = parser_context['request']
encoding = parser_context.get('encoding', settings.DEFAULT_CHARSET)
meta = request.META
upload_handlers = request.upload_handlers
filename = self.get_filename(stream, media_type, parser_context)
# Note that this code is extracted from Django's handling of
# file uploads in MultiPartParser.
content_type = meta.get('HTTP_CONTENT_TYPE',
meta.get('CONTENT_TYPE', ''))
try:
content_length = int(meta.get('HTTP_CONTENT_LENGTH',
meta.get('CONTENT_LENGTH', 0)))
except (ValueError, TypeError):
content_length = None
# See if the handler will want to take care of the parsing.
for handler in upload_handlers:
result = handler.handle_raw_input(None,
meta,
content_length,
None,
encoding)
if result is not None:
return DataAndFiles({}, {'file': result[1]})
# This is the standard case.
possible_sizes = [x.chunk_size for x in upload_handlers if x.chunk_size]
chunk_size = min([2 ** 31 - 4] + possible_sizes)
chunks = ChunkIter(stream, chunk_size)
counters = [0] * len(upload_handlers)
for index, handler in enumerate(upload_handlers):
try:
handler.new_file(None, filename, content_type,
content_length, encoding)
except StopFutureHandlers:
upload_handlers = upload_handlers[:index + 1]
break
for chunk in chunks:
for index, handler in enumerate(upload_handlers):
chunk_length = len(chunk)
chunk = handler.receive_data_chunk(chunk, counters[index])
counters[index] += chunk_length
if chunk is None:
break
for index, handler in enumerate(upload_handlers):
file_obj = handler.file_complete(counters[index])
if file_obj:
return DataAndFiles({}, {'file': file_obj})
raise ParseError("FileUpload parse error - "
"none of upload handlers can handle the stream")
|
https://github.com/ronaldguillen/wave/blob/20bb979c917f7634d8257992e6d449dc751256a9/wave/parsers.py#L126-L189
|
parse json file
|
python
|
def parse_JSON(self, JSON_string):
"""
Parses a *Station* instance out of raw JSON data. Only certain
properties of the data are used: if these properties are not found or
cannot be parsed, an error is issued.
:param JSON_string: a raw JSON string
:type JSON_string: str
:returns: a *Station* instance or ``None`` if no data is available
:raises: *ParseResponseError* if it is impossible to find or parse the
data needed to build the result, *APIResponseError* if the JSON
string embeds an HTTP status error
"""
if JSON_string is None:
raise parse_response_error.ParseResponseError('JSON data is None')
d = json.loads(JSON_string)
try:
name = d['station']['name']
station_ID = d['station']['id']
station_type = d['station']['type']
status = d['station']['status']
lat = d['station']['coord']['lat']
if 'lon' in d['station']['coord']:
lon = d['station']['coord']['lon']
elif 'lng' in d['station']['coord']:
lon = d['station']['coord']['lng']
else:
lon = None
if 'distance' in d:
distance = d['distance']
else:
distance = None
except KeyError as e:
error_msg = ''.join((__name__, ': unable to read JSON data', ))
raise parse_response_error.ParseResponseError(error_msg)
else:
if 'last' in d:
last_weather = weather.weather_from_dictionary(d['last'])
else:
last_weather = None
return station.Station(name, station_ID, station_type, status, lat, lon,
distance, last_weather)
|
https://github.com/csparpa/pyowm/blob/cdd59eb72f32f7238624ceef9b2e2329a5ebd472/pyowm/weatherapi25/parsers/stationparser.py#L22-L66
|
parse json file
|
python
|
def parse_json_structure(string_item):
"""
Given a raw representation of a json structure, returns the parsed corresponding data
structure (``JsonRpcRequest`` or ``JsonRpcRequestBatch``)
:param string_item:
:return:
"""
if not isinstance(string_item, str):
raise TypeError("Expected str but got {} instead".format(type(string_item).__name__))
try:
item = json.loads(string_item)
except json.JSONDecodeError:
raise JsonRpcParseError()
if isinstance(item, dict):
return JsonRpcRequest.from_dict(item)
elif isinstance(item, list):
if len(item) == 0:
raise JsonRpcInvalidRequestError()
request_batch = JsonRpcRequestBatch([])
for d in item:
try:
# handles the case of valid batch but with invalid
# requests.
if not isinstance(d, dict):
raise JsonRpcInvalidRequestError()
# is dict, all fine
parsed_entry = JsonRpcRequest.from_dict(d)
except JsonRpcInvalidRequestError:
parsed_entry = GenericResponse.INVALID_REQUEST
request_batch.add_item(parsed_entry)
return request_batch
|
https://github.com/vladcalin/gemstone/blob/325a49d17621b9d45ffd2b5eca6f0de284de8ba4/gemstone/core/structs.py#L195-L229
|
parse json file
|
python
|
def parse(self, file, outfile=None):
"""Parse a BGI (basic gene info) JSON file
"""
file = self._ensure_file(file)
obj = json.load(file)
items = obj['data']
return [self.transform_item(item) for item in items]
|
https://github.com/biolink/ontobio/blob/4e512a7831cfe6bc1b32f2c3be2ba41bc5cf7345/ontobio/io/entityparser.py#L197-L203
|
parse json file
|
python
|
def load_json(filename):
"""Load a json file as a dictionary"""
try:
if PY2:
args = 'rb'
else:
args = 'r'
with open(filename, args) as fid:
data = json.load(fid)
return data, None
except Exception as err:
return None, str(err)
|
https://github.com/spyder-ide/spyder-kernels/blob/2c5b36cdb797b8aba77bc406ca96f5e079c4aaca/spyder_kernels/utils/iofuncs.py#L273-L284
|
parse json file
|
python
|
def json(cls, res, *args, **kwargs):
"""Parses JSON from a response."""
# if an encoding is already set then use the provided encoding
if res.encoding is None:
res.encoding = cls.determine_json_encoding(res.content[:4])
return parse_json(res.text, *args, **kwargs)
|
https://github.com/streamlink/streamlink/blob/c8ed1daff14ac03195870238b9b900c1109dd5c1/src/streamlink/plugin/api/http_session.py#L98-L103
|
parse json file
|
python
|
def parse_JSON(self, JSON_string):
"""
Parses an *UVIndex* instance out of raw JSON data. Only certain
properties of the data are used: if these properties are not found or
cannot be parsed, an error is issued.
:param JSON_string: a raw JSON string
:type JSON_string: str
:returns: an *UVIndex* instance or ``None`` if no data is available
:raises: *ParseResponseError* if it is impossible to find or parse the
data needed to build the result, *APIResponseError* if the JSON
string embeds an HTTP status error
"""
if JSON_string is None:
raise parse_response_error.ParseResponseError('JSON data is None')
d = json.loads(JSON_string)
try:
# -- reference time
reference_time = d['date']
# -- reception time (now)
reception_time = timeutils.now('unix')
# -- location
lon = float(d['lon'])
lat = float(d['lat'])
place = location.Location(None, lon, lat, None)
# -- UV intensity
uv_intensity = float(d['value'])
except KeyError:
raise parse_response_error.ParseResponseError(
''.join([__name__, ': impossible to parse UV Index']))
return uvindex.UVIndex(reference_time, place, uv_intensity,
reception_time)
|
https://github.com/csparpa/pyowm/blob/cdd59eb72f32f7238624ceef9b2e2329a5ebd472/pyowm/uvindexapi30/parsers.py#L24-L61
|
parse json file
|
python
|
def cache_json(filename):
"""Caches the JSON-serializable output of the function to a given file
Args:
filename (str) The filename (sans directory) to store the output
Returns: decorator, applicable to a function that produces JSON-serializable output
"""
def cache_decorator(cacheable_function):
@wraps(cacheable_function)
def cache_wrapper(*args, **kwargs):
path = CACHE_DIRECTORY + filename
check_create_folder(path)
if os.path.exists(path):
with open(path) as infile:
return json.load(infile)
else:
function_output = cacheable_function(*args, **kwargs)
with open(path, 'w') as outfile:
json.dump(function_output, outfile)
return function_output
return cache_wrapper
return cache_decorator
|
https://github.com/workforce-data-initiative/skills-utils/blob/4cf9b7c2938984f34bbcc33d45482d23c52c7539/skills_utils/fs.py#L10-L32
|
parse json file
|
python
|
def read_json_file(cls, path):
"""
Read an instance from a JSON-formatted file.
:return: A new instance
"""
with open(path, 'r') as f:
return cls.from_dict(json.load(f))
|
https://github.com/vecnet/vecnet.simulation/blob/3a4b3df7b12418c6fa8a7d9cd49656a1c031fc0e/vecnet/simulation/__init__.py#L32-L39
|
parse json file
|
python
|
def loadJSON(self, filename):
"""Adds the data from a JSON file. The file is expected to be in datapoint format::
d = DatapointArray().loadJSON("myfile.json")
"""
with open(filename, "r") as f:
self.merge(json.load(f))
return self
|
https://github.com/connectordb/connectordb-python/blob/2092b0cb30898139a247176bcf433d5a4abde7cb/connectordb/_datapointarray.py#L86-L93
|
parse json file
|
python
|
def json(self):
"""Custom JSON encoder"""
output = {}
for filename in self.data:
output[filename] = []
for secret_hash in self.data[filename]:
tmp = self.data[filename][secret_hash].json()
del tmp['filename'] # Because filename will map to the secrets
output[filename].append(tmp)
return output
|
https://github.com/Yelp/detect-secrets/blob/473923ea71f1ac2b5ea1eacc49b98f97967e3d05/detect_secrets/core/secrets_collection.py#L336-L348
|
parse json file
|
python
|
def parse_file(self, file_or_fname):
"""
Parse a file or a filename
"""
with self._context():
if hasattr(file_or_fname, 'read'):
self.filename = getattr(
file_or_fname, 'name', file_or_fname.__class__.__name__)
self.p.ParseFile(file_or_fname)
else:
self.filename = file_or_fname
with open(file_or_fname, 'rb') as f:
self.p.ParseFile(f)
return self._root
|
https://github.com/gem/oq-engine/blob/8294553a0b8aba33fd96437a35065d03547d0040/openquake/baselib/node.py#L826-L839
|
parse json file
|
python
|
def _parse(self, data):
"""Parse API response.
:param dict data: Parsed JSON response from API 'data' key.
:return: Image instances.
:rtype: list.
"""
super(Album, self)._parse(data)
self.cover_id = data['cover']
images = [Image(i['id'], i) for i in data['images']]
self.image_ids[:] = [i.imgur_id for i in images]
return images
|
https://github.com/Robpol86/sphinxcontrib-imgur/blob/5c178481d645147d10acb096793eda41c12c57af/sphinxcontrib/imgur/imgur_api.py#L231-L243
|
parse json file
|
python
|
def json(self, **kwargs):
"""Decodes response as JSON."""
encoding = detect_encoding(self.content[:4])
value = self.content.decode(encoding)
return simplejson.loads(value, **kwargs)
|
https://github.com/davidwtbuxton/notrequests/blob/e48ee6107a58c2f373c33f78e3302608edeba7f3/notrequests.py#L121-L126
|
parse json file
|
python
|
def get_jsonparsed_data(url):
"""Receive the content of ``url``, parse it as JSON and return the
object.
"""
response = urlopen(url)
data = response.read().decode('utf-8')
return json.loads(data)
|
https://github.com/benjaoming/simple-pypi-statistics/blob/de93b94877004ae18a5c8e5b96b213aa38fb29a8/simple_pypi_statistics/api.py#L90-L96
|
parse json file
|
python
|
def json(self, path, schema=None, primitivesAsString=None, prefersDecimal=None,
allowComments=None, allowUnquotedFieldNames=None, allowSingleQuotes=None,
allowNumericLeadingZero=None, allowBackslashEscapingAnyCharacter=None,
mode=None, columnNameOfCorruptRecord=None, dateFormat=None, timestampFormat=None,
multiLine=None, allowUnquotedControlChars=None, lineSep=None, locale=None,
dropFieldIfAllNull=None, encoding=None):
"""
Loads a JSON file stream and returns the results as a :class:`DataFrame`.
`JSON Lines <http://jsonlines.org/>`_ (newline-delimited JSON) is supported by default.
For JSON (one record per file), set the ``multiLine`` parameter to ``true``.
If the ``schema`` parameter is not specified, this function goes
through the input once to determine the input schema.
.. note:: Evolving.
:param path: string represents path to the JSON dataset,
or RDD of Strings storing JSON objects.
:param schema: an optional :class:`pyspark.sql.types.StructType` for the input schema
or a DDL-formatted string (For example ``col0 INT, col1 DOUBLE``).
:param primitivesAsString: infers all primitive values as a string type. If None is set,
it uses the default value, ``false``.
:param prefersDecimal: infers all floating-point values as a decimal type. If the values
do not fit in decimal, then it infers them as doubles. If None is
set, it uses the default value, ``false``.
:param allowComments: ignores Java/C++ style comment in JSON records. If None is set,
it uses the default value, ``false``.
:param allowUnquotedFieldNames: allows unquoted JSON field names. If None is set,
it uses the default value, ``false``.
:param allowSingleQuotes: allows single quotes in addition to double quotes. If None is
set, it uses the default value, ``true``.
:param allowNumericLeadingZero: allows leading zeros in numbers (e.g. 00012). If None is
set, it uses the default value, ``false``.
:param allowBackslashEscapingAnyCharacter: allows accepting quoting of all character
using backslash quoting mechanism. If None is
set, it uses the default value, ``false``.
:param mode: allows a mode for dealing with corrupt records during parsing. If None is
set, it uses the default value, ``PERMISSIVE``.
* ``PERMISSIVE`` : when it meets a corrupted record, puts the malformed string \
into a field configured by ``columnNameOfCorruptRecord``, and sets malformed \
fields to ``null``. To keep corrupt records, an user can set a string type \
field named ``columnNameOfCorruptRecord`` in an user-defined schema. If a \
schema does not have the field, it drops corrupt records during parsing. \
When inferring a schema, it implicitly adds a ``columnNameOfCorruptRecord`` \
field in an output schema.
* ``DROPMALFORMED`` : ignores the whole corrupted records.
* ``FAILFAST`` : throws an exception when it meets corrupted records.
:param columnNameOfCorruptRecord: allows renaming the new field having malformed string
created by ``PERMISSIVE`` mode. This overrides
``spark.sql.columnNameOfCorruptRecord``. If None is set,
it uses the value specified in
``spark.sql.columnNameOfCorruptRecord``.
:param dateFormat: sets the string that indicates a date format. Custom date formats
follow the formats at ``java.time.format.DateTimeFormatter``. This
applies to date type. If None is set, it uses the
default value, ``yyyy-MM-dd``.
:param timestampFormat: sets the string that indicates a timestamp format.
Custom date formats follow the formats at
``java.time.format.DateTimeFormatter``.
This applies to timestamp type. If None is set, it uses the
default value, ``yyyy-MM-dd'T'HH:mm:ss.SSSXXX``.
:param multiLine: parse one record, which may span multiple lines, per file. If None is
set, it uses the default value, ``false``.
:param allowUnquotedControlChars: allows JSON Strings to contain unquoted control
characters (ASCII characters with value less than 32,
including tab and line feed characters) or not.
:param lineSep: defines the line separator that should be used for parsing. If None is
set, it covers all ``\\r``, ``\\r\\n`` and ``\\n``.
:param locale: sets a locale as language tag in IETF BCP 47 format. If None is set,
it uses the default value, ``en-US``. For instance, ``locale`` is used while
parsing dates and timestamps.
:param dropFieldIfAllNull: whether to ignore column of all null values or empty
array/struct during schema inference. If None is set, it
uses the default value, ``false``.
:param encoding: allows to forcibly set one of standard basic or extended encoding for
the JSON files. For example UTF-16BE, UTF-32LE. If None is set,
the encoding of input JSON will be detected automatically
when the multiLine option is set to ``true``.
>>> json_sdf = spark.readStream.json(tempfile.mkdtemp(), schema = sdf_schema)
>>> json_sdf.isStreaming
True
>>> json_sdf.schema == sdf_schema
True
"""
self._set_opts(
schema=schema, primitivesAsString=primitivesAsString, prefersDecimal=prefersDecimal,
allowComments=allowComments, allowUnquotedFieldNames=allowUnquotedFieldNames,
allowSingleQuotes=allowSingleQuotes, allowNumericLeadingZero=allowNumericLeadingZero,
allowBackslashEscapingAnyCharacter=allowBackslashEscapingAnyCharacter,
mode=mode, columnNameOfCorruptRecord=columnNameOfCorruptRecord, dateFormat=dateFormat,
timestampFormat=timestampFormat, multiLine=multiLine,
allowUnquotedControlChars=allowUnquotedControlChars, lineSep=lineSep, locale=locale,
dropFieldIfAllNull=dropFieldIfAllNull, encoding=encoding)
if isinstance(path, basestring):
return self._df(self._jreader.json(path))
else:
raise TypeError("path can be only a single string")
|
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/sql/streaming.py#L403-L503
|
parse json file
|
python
|
def jsonFile(self, path, schema=None, sampling_ratio=1.0):
"""Loads a text file storing one JSON object per line as a
L{DataFrame}.
Parameters
----------
path: string
The path of the json files to load. Should be Hadoop style
paths (e.g. hdfs://..., file://... etc.).
schema: StructType, optional
If you know the schema of your input data you can specify it. The
schema is specified using Spark SQL's schema format. If not
specified will sample the json records to determine the schema.
Spark SQL's schema format is documented (somewhat) in the
"Programmatically Specifying the Schema" of the Spark SQL
programming guide at: http://bit.ly/sparkSQLprogrammingGuide
sampling_ratio: int, default=1.0
Percentage of the records to sample when infering schema.
Defaults to all records for safety, but you may be able to set to
a lower ratio if the same fields are present accross records or
your input is of sufficient size.
Returns
-------
A L{DataFrame} of the contents of the json files.
"""
schema_rdd = self.sql_ctx.jsonFile(path, schema, sampling_ratio)
return self.from_spark_rdd(schema_rdd)
|
https://github.com/sparklingpandas/sparklingpandas/blob/7d549df4348c979042b683c355aa778fc6d3a768/sparklingpandas/pcontext.py#L171-L196
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.