query
stringlengths
9
60
language
stringclasses
1 value
code
stringlengths
105
25.7k
url
stringlengths
91
217
convert json to csv
python
def convert(csv, json, **kwargs): '''Convert csv to json. csv: filename or file-like object json: filename or file-like object if csv is '-' or None: stdin is used for input if json is '-' or None: stdout is used for output ''' csv_local, json_local = None, None try: if csv == '-' or csv is None: csv = sys.stdin elif isinstance(csv, str): csv = csv_local = open(csv, 'r') if json == '-' or json is None: json = sys.stdout elif isinstance(json, str): json = json_local = open(json, 'w') data = load_csv(csv, **kwargs) save_json(data, json, **kwargs) finally: if csv_local is not None: csv_local.close() if json_local is not None: json_local.close()
https://github.com/oplatek/csv2json/blob/f2f95db71ba2ce683fd6d0d3e2f13c9d0a77ceb6/csv2json/__init__.py#L20-L51
convert json to csv
python
def json_to_csv(json_input): ''' Convert simple JSON to CSV Accepts a JSON string or JSON object ''' try: json_input = json.loads(json_input) except: pass # If loads fails, it's probably already parsed headers = set() for json_row in json_input: headers.update(json_row.keys()) csv_io = StringIO.StringIO() csv_out = csv.DictWriter(csv_io,headers) csv_out.writeheader() for json_row in json_input: csv_out.writerow(json_row) csv_io.seek(0) return csv_io.read()
https://github.com/mosesschwartz/scrypture/blob/d51eb0c9835a5122a655078268185ce8ab9ec86a/scrypture/demo_scripts/Utils/json_to_csv.py#L28-L47
convert json to csv
python
def _to_json(self, sort_keys=False, indent=4): """Convert :class:`~ctfile.ctfile.CTfile` into JSON string. :return: ``JSON`` formatted string. :rtype: :py:class:`str`. """ return json.dumps(self, sort_keys=sort_keys, indent=indent, cls=CtabAtomBondEncoder)
https://github.com/MoseleyBioinformaticsLab/ctfile/blob/eae864126cd9102207df5d363a3222256a0f1396/ctfile/ctfile.py#L129-L135
convert json to csv
python
def to_json_str(self): """Convert data to json string representation. Returns: json representation as string. """ _json = self.to_json() try: return json.dumps(_json, sort_keys=True, cls=JsonEncoder) except: logging.exception("Could not serialize JSON: %r", _json) raise
https://github.com/GoogleCloudPlatform/appengine-mapreduce/blob/2045eb3605b6ecb40c83d11dd5442a89fe5c5dd6/python/src/mapreduce/json_util.py#L135-L146
convert json to csv
python
def csv_to_json(csv_filepath, json_filepath, fieldnames, ignore_first_line=True): """ Convert a CSV file in `csv_filepath` into a JSON file in `json_filepath`. Parameters ---------- csv_filepath: str Path to the input CSV file. json_filepath: str Path to the output JSON file. Will be overwritten if exists. fieldnames: List[str] Names of the fields in the CSV file. ignore_first_line: bool """ import csv import json csvfile = open(csv_filepath, 'r') jsonfile = open(json_filepath, 'w') reader = csv.DictReader(csvfile, fieldnames) rows = [] if ignore_first_line: next(reader) for row in reader: rows.append(row) json.dump(rows, jsonfile) jsonfile.close() csvfile.close()
https://github.com/PythonSanSebastian/docstamp/blob/b43808f2e15351b0b2f0b7eade9c7ef319c9e646/docstamp/file_utils.py#L138-L170
convert json to csv
python
def _convert_html_to_csv( self): """ *contert html to csv* **Key Arguments:** # - **Return:** - None .. todo:: - @review: when complete, clean _convert_html_to_csv method - @review: when complete add logging """ self.log.info('starting the ``_convert_html_to_csv`` method') import codecs allData = "" regex1 = re.compile( r'.*<PRE><strong> (.*?)</strong>(.*?)</PRE></TABLE>.*', re.I | re.S) regex2 = re.compile(r'\|(\w)\|', re.I | re.S) for thisFile in self.nedResults: pathToReadFile = thisFile try: self.log.debug("attempting to open the file %s" % (pathToReadFile,)) readFile = codecs.open( pathToReadFile, encoding='utf-8', mode='r') thisData = readFile.read() readFile.close() except IOError, e: message = 'could not open the file %s' % (pathToReadFile,) self.log.critical(message) raise IOError(message) except: if pathToReadFile == None: message = 'we have no file to open' self.log.error(message) continue readFile.close() self.log.debug("regex 1 - sub") thisData = regex1.sub("\g<1>\g<2>", thisData) self.log.debug("regex 2 - sub") thisData = regex2.sub("abs(\g<1>)", thisData) self.log.debug("replace text") thisData = thisData.replace("|b|", "abs(b)") writeFile = codecs.open(pathToReadFile, encoding='utf-8', mode='w') writeFile.write(thisData) writeFile.close() self.log.info('completed the ``_convert_html_to_csv`` method') return None
https://github.com/thespacedoctor/neddy/blob/f32653b7d6a39a2c46c5845f83b3a29056311e5e/neddy/_basesearch.py#L237-L292
convert json to csv
python
def csv_to_obj(cls, file_path=None, text='', columns=None, remove_empty_rows=True, key_on=None, deliminator=',', eval_cells=True): """ This will convert a csv file or csv text into a seaborn table and return it :param file_path: str of the path to the file :param text: str of the csv text :param columns: list of str of columns to use :param remove_empty_rows: bool if True will remove empty rows which can happen in non-trimmed file :param key_on: list of str of columns to key on :param deliminator: str to use as a deliminator, defaults to , :param eval_cells: bool if True will try to evaluate numbers :return: SeabornTable """ lines = cls._get_lines(file_path, text, replace=u'\ufeff') for i in range(len(lines)): lines[i] = lines[i].replace('\r', '\n') lines[i] = lines[i].replace('\\r', '\r').split(',') data = cls._merge_quoted_cells(lines, deliminator, remove_empty_rows, eval_cells) row_columns = data[0] if len(row_columns) != len(set(row_columns)): # make unique for i, col in enumerate(row_columns): count = row_columns[:i].count(col) row_columns[i] = '%s_%s' % (col, count) if count else col return cls.list_to_obj(data[1:], columns=columns, row_columns=row_columns, key_on=key_on)
https://github.com/SeabornGames/Table/blob/0c474ef2fb00db0e7cf47e8af91e3556c2e7485a/seaborn_table/table.py#L244-L273
convert json to csv
python
def to_json(self, data): """ Converts the given object to a pretty-formatted JSON string :param data: the object to convert to JSON :return: A pretty-formatted JSON string """ # Don't forget the empty line at the end of the file return ( json.dumps( data, sort_keys=True, indent=4, separators=(",", ": "), default=self.json_converter, ) + "\n" )
https://github.com/tcalmant/ipopo/blob/2f9ae0c44cd9c34ef1a9d50837b3254e75678eb1/pelix/shell/report.py#L665-L682
convert json to csv
python
def to_json(value): """ Converts a value to a jsonable type. """ if type(value) in JSON_TYPES: return value elif hasattr(value, "to_json"): return value.to_json() elif isinstance(value, list) or isinstance(value, set) or \ isinstance(value, deque) or isinstance(value, tuple): return [to_json(v) for v in value] elif isinstance(value, dict): return {str(k): to_json(v) for k, v in value.items()} else: raise TypeError("{0} is not json serializable.".format(type(value)))
https://github.com/halfak/python-jsonable/blob/70a53aedaca84d078228b3564fdd8f60a586d43f/jsonable/functions.py#L6-L20
convert json to csv
python
def conv_json(self, uri_format="sparql_uri", add_ids=False): """ converts the class to a json compatable python dictionary Args: uri_format('sparql_uri','pyuri'): The format that uri values will be returned Returns: dict: a json compatabile python dictionary """ def convert_item(ivalue): """ converts an idividual value to a json value Args: ivalue: value of the item to convert Returns: JSON serializable value """ nvalue = ivalue if isinstance(ivalue, BaseRdfDataType): if ivalue.type == 'uri': if ivalue.startswith("pyuri") and uri_format == "pyuri": nvalue = getattr(ivalue, "sparql") else: nvalue = getattr(ivalue, uri_format) else: nvalue = ivalue.to_json elif isinstance(ivalue, RdfClassBase): if ivalue.subject.type == "uri": nvalue = ivalue.conv_json(uri_format, add_ids) elif ivalue.subject.type == "bnode": nvalue = ivalue.conv_json(uri_format, add_ids) elif isinstance(ivalue, list): nvalue = [] for item in ivalue: temp = convert_item(item) nvalue.append(temp) return nvalue rtn_val = {key: convert_item(value) for key, value in self.items()} #pdb.set_trace() if add_ids: if self.subject.type == 'uri': rtn_val['uri'] = self.subject.sparql_uri rtn_val['id'] = sha1(rtn_val['uri'].encode()).hexdigest() #return {key: convert_item(value) for key, value in self.items()} return rtn_val
https://github.com/KnowledgeLinks/rdfframework/blob/9ec32dcc4bed51650a4b392cc5c15100fef7923a/rdfframework/rdfclass/rdfclass.py#L245-L293
convert json to csv
python
def _get_json(value): """Convert the given value to a JSON object.""" if hasattr(value, 'replace'): value = value.replace('\n', ' ') try: return json.loads(value) except json.JSONDecodeError: # Escape double quotes. if hasattr(value, 'replace'): value = value.replace('"', '\\"') # try putting the value into a string return json.loads('"{}"'.format(value))
https://github.com/genialis/resolwe-runtime-utils/blob/5657d7cf981972a5259b9b475eae220479401001/resolwe_runtime_utils.py#L37-L48
convert json to csv
python
def to_json(self): """ Convert to a json-serializable representation. Returns: dict: A dict representation that is json-serializable. """ return { "xblock_id": six.text_type(self.xblock_id), "messages": [message.to_json() for message in self.messages], "empty": self.empty }
https://github.com/edx/XBlock/blob/368bf46e2c0ee69bbb21817f428c4684936e18ee/xblock/validation.py#L111-L122
convert json to csv
python
def to_json(value, **kwargs): """Convert instance to JSON""" if isinstance(value, HasProperties): return value.serialize(**kwargs) try: return json.loads(json.dumps(value)) except TypeError: raise TypeError( "Cannot convert type {} to JSON without calling 'serialize' " "on an instance of Instance Property and registering a custom " "serializer".format(value.__class__.__name__) )
https://github.com/seequent/properties/blob/096b07012fff86b0a880c8c018320c3b512751b9/properties/base/instance.py#L161-L172
convert json to csv
python
def json(body, charset='utf-8', **kwargs): """Takes JSON formatted data, converting it into native Python objects""" return json_converter.loads(text(body, charset=charset))
https://github.com/hugapi/hug/blob/080901c81576657f82e2432fd4a82f1d0d2f370c/hug/input_format.py#L41-L43
convert json to csv
python
def to_json_str(self): """Convert data to json string representation. Returns: json representation as string. """ adict = dict(vars(self), sort_keys=True) adict['type'] = self.__class__.__name__ return json.dumps(adict)
https://github.com/PythonSanSebastian/docstamp/blob/b43808f2e15351b0b2f0b7eade9c7ef319c9e646/docstamp/model.py#L62-L70
convert json to csv
python
def _convert_dict_to_json(array): """ Converts array to a json string """ return json.dumps( array, skipkeys=False, allow_nan=False, indent=None, separators=(",", ":"), sort_keys=True, default=lambda o: o.__dict__, )
https://github.com/Azure/azure-kusto-python/blob/92466a2ae175d6353d1dee3496a02517b2a71a86/azure-kusto-ingest/azure/kusto/ingest/_ingestion_blob_info.py#L70-L80
convert json to csv
python
def to_json(self, path, root_array=True, mode=WRITE_MODE, compression=None): """ Saves the sequence to a json file. If root_array is True, then the sequence will be written to json with an array at the root. If it is False, then the sequence will be converted from a sequence of (Key, Value) pairs to a dictionary so that the json root is a dictionary. :param path: path to write file :param root_array: write json root as an array or dictionary :param mode: file open mode """ with universal_write_open(path, mode=mode, compression=compression) as output: if root_array: json.dump(self.to_list(), output) else: json.dump(self.to_dict(), output)
https://github.com/EntilZha/PyFunctional/blob/ac04e4a8552b0c464a7f492f7c9862424867b63e/functional/pipeline.py#L1509-L1523
convert json to csv
python
def to_json_(self) -> str: """Convert the main dataframe to json :return: json data :rtype: str :example: ``ds.to_json_()`` """ try: renderer = pytablewriter.JsonTableWriter data = self._build_export(renderer) return data except Exception as e: self.err(e, "Can not convert data to json")
https://github.com/synw/dataswim/blob/4a4a53f80daa7cd8e8409d76a19ce07296269da2/dataswim/data/export.py#L26-L39
convert json to csv
python
def from_json(data): """ Convert JSON into a in memory file storage. Args: data (str): valid JSON with path and filenames and the base64 encoding of the file content. Returns: InMemoryFiles: in memory file storage """ memfiles = InMemoryFiles() memfiles.files = json.loads(data) return memfiles
https://github.com/Nachtfeuer/pipeline/blob/04ca18c4e95e4349532bb45b768206393e1f2c13/spline/tools/memfiles.py#L84-L97
convert json to csv
python
def export_json(self, filename, orient='records'): """ Writes an SFrame to a JSON file. Parameters ---------- filename : string The location to save the JSON file. orient : string, optional. Either "records" or "lines" If orient="records" the file is saved as a single JSON array. If orient="lines", the file is saves as a JSON value per line. Examples -------- The orient parameter describes the expected input format of the JSON file. If orient="records", the output will be a single JSON Array where each array element is a dictionary describing the row. >>> g Columns: a int b int Rows: 3 Data: +---+---+ | a | b | +---+---+ | 1 | 1 | | 2 | 2 | | 3 | 3 | +---+---+ >>> g.export('output.json', orient='records') >>> !cat output.json [ {'a':1,'b':1}, {'a':2,'b':2}, {'a':3,'b':3}, ] If orient="rows", each row will be emitted as a JSON dictionary to each file line. >>> g Columns: a int b int Rows: 3 Data: +---+---+ | a | b | +---+---+ | 1 | 1 | | 2 | 2 | | 3 | 3 | +---+---+ >>> g.export('output.json', orient='rows') >>> !cat output.json {'a':1,'b':1} {'a':2,'b':2} {'a':3,'b':3} """ if orient == "records": self.pack_columns(dtype=dict).export_csv( filename, file_header='[', file_footer=']', header=False, double_quote=False, quote_level=csv.QUOTE_NONE, line_prefix=',', _no_prefix_on_first_value=True) elif orient == "lines": self.pack_columns(dtype=dict).export_csv( filename, header=False, double_quote=False, quote_level=csv.QUOTE_NONE) else: raise ValueError("Invalid value for orient parameter (" + str(orient) + ")")
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/data_structures/sframe.py#L2919-L2996
convert json to csv
python
def to_json(obj): """ Convert obj to json. Used mostly to convert the classes in json_span.py until we switch to nested dicts (or something better) :param obj: the object to serialize to json :return: json string """ try: return json.dumps(obj, default=lambda obj: {k.lower(): v for k, v in obj.__dict__.items()}, sort_keys=False, separators=(',', ':')).encode() except Exception as e: logger.info("to_json: ", e, obj)
https://github.com/instana/python-sensor/blob/58aecb90924c48bafcbc4f93bd9b7190980918bc/instana/util.py#L75-L87
convert json to csv
python
def to_json(self, validate=False, pretty_print=True, data_path=None): """Convert data to JSON Parameters ---------- data_path : string If not None, then data is written to a separate file at the specified path. Note that the ``url`` attribute if the data must be set independently for the data to load correctly. Returns ------- string Valid Vega JSON. """ # TODO: support writing to separate file return super(self.__class__, self).to_json(validate=validate, pretty_print=pretty_print)
https://github.com/wrobstory/vincent/blob/c5a06e50179015fbb788a7a42e4570ff4467a9e9/vincent/data.py#L462-L479
convert json to csv
python
def to_json(self, *args, **kwargs): """ Generate a schema and convert it directly to serialized JSON. :rtype: ``str`` """ return json.dumps(self.to_schema(), *args, **kwargs)
https://github.com/wolverdude/GenSON/blob/76552d23cf9202e8e7c262cb018eb3cb3df686b9/genson/schema/builder.py#L76-L82
convert json to csv
python
def to_json(self, version=Version.latest): """Tries to convert an object into a JSON representation and return the resulting string An Object can define how it is serialized by overriding the as_version() implementation. A caller may further define how the object is serialized by passing in a custom encoder. The default encoder will ignore properties of an object that are None at the time of serialization. :param version: The version to which the object must be serialized to. This will default to the latest version supported by the library. :type version: str | unicode """ return json.dumps(self.as_version(version))
https://github.com/RusticiSoftware/TinCanPython/blob/424eedaa6d19221efb1108edb915fc332abbb317/tincan/serializable_base.py#L91-L105
convert json to csv
python
def _data_to_json(data): """Convert to json if it isn't already a string. Args: data (str): data to convert to json """ if type(data) not in [str, unicode]: data = json.dumps(data) return data
https://github.com/mitodl/PyLmod/blob/b798b86c33d1eb615e7cd4f3457b5c15da1d86e0/pylmod/base.py#L69-L77
convert json to csv
python
def json(cls, res, *args, **kwargs): """Parses JSON from a response.""" # if an encoding is already set then use the provided encoding if res.encoding is None: res.encoding = cls.determine_json_encoding(res.content[:4]) return parse_json(res.text, *args, **kwargs)
https://github.com/streamlink/streamlink/blob/c8ed1daff14ac03195870238b9b900c1109dd5c1/src/streamlink/plugin/api/http_session.py#L98-L103
convert json to csv
python
def transcode(input_filename, output_filename=None, date_format=None): """ Convert a JSON or CSV file of input to a JSON stream (.jsons). This kind of file can be easily uploaded using `luminoso_api.upload`. """ if output_filename is None: # transcode to standard output output = sys.stdout else: if output_filename.endswith('.json'): logger.warning("Changing .json to .jsons, because this program " "outputs a JSON stream format that is not " "technically JSON itself.") output_filename += 's' output = open(output_filename, 'w') for entry in open_json_or_csv_somehow(input_filename, date_format=date_format): output.write(json.dumps(entry, ensure_ascii=False).encode('utf-8')) output.write('\n') output.close()
https://github.com/LuminosoInsight/luminoso-api-client-python/blob/3bedf2a454aee39214c11fbf556ead3eecc27881/luminoso_api/v4_json_stream.py#L36-L56
convert json to csv
python
def to_json(self, X, y): ''' Reads dataset to csv. :param X: dataset as list of dict. :param y: labels. ''' with gzip.open('%s.gz' % self.path, 'wt') if self.gz else open( self.path, 'w') as file: json.dump(list(zip(y, X)), file)
https://github.com/MuhammedHasan/sklearn_utils/blob/337c3b7a27f4921d12da496f66a2b83ef582b413/sklearn_utils/utils/skutils_io.py#L48-L57
convert json to csv
python
def encode_json(self, serializable): """ Serialize to json a serializable object (Search, Query, Filter, etc). """ return json.dumps(serializable.serialize(), cls=self.encoder)
https://github.com/aparo/pyes/blob/712eb6095961755067b2b5baa262008ade6584b3/pyes/es.py#L1257-L1261
convert json to csv
python
def json2file(data, filename, encoding='utf-8'): """ write json in canonical json format """ with codecs.open(filename, "w", encoding=encoding) as f: json.dump(data, f, ensure_ascii=False, indent=4, sort_keys=True)
https://github.com/cnschema/cdata/blob/893e2e1e27b61c8551c8b5f5f9bf05ec61490e23/cdata/core.py#L71-L76
convert json to csv
python
def to_json(self, *args, **kwargs): """Convert Entity to JSON.""" return json.dumps(self.serialize(), *args, **kwargs)
https://github.com/mcs07/ChemDataExtractor/blob/349a3bea965f2073141d62043b89319222e46af1/chemdataextractor/scrape/entity.py#L120-L122
convert json to csv
python
def csv( self, filepath=None ): """*Render the data in CSV format* **Key Arguments:** - ``filepath`` -- path to the file to write the csv content to. Default *None* **Return:** - ``renderedData`` -- the data rendered in csv format **Usage:** To render the data set as csv: .. code-block:: python print dataSet.csv() .. code-block:: text owner,pet,address daisy,dog,"belfast, uk" john,snake,the moon susan,crocodile,larne and to save the csv rendering to file: .. code-block:: python dataSet.csv("/path/to/myfile.csv") """ self.log.debug('starting the ``csv`` method') renderedData = self._list_of_dictionaries_to_csv("machine") if filepath and renderedData != "NO MATCH": # RECURSIVELY CREATE MISSING DIRECTORIES if not os.path.exists(os.path.dirname(filepath)): os.makedirs(os.path.dirname(filepath)) writeFile = codecs.open(filepath, encoding='utf-8', mode='w') writeFile.write(renderedData) writeFile.close() self.log.debug('completed the ``csv`` method') return renderedData
https://github.com/thespacedoctor/fundamentals/blob/1d2c007ac74442ec2eabde771cfcacdb9c1ab382/fundamentals/renderer/list_of_dictionaries.py#L94-L142
convert json to csv
python
def to_json(self, path_or_buf=None, orient=None, date_format=None, double_precision=10, force_ascii=True, date_unit='ms', default_handler=None, lines=False, compression='infer', index=True): """ Convert the object to a JSON string. Note NaN's and None will be converted to null and datetime objects will be converted to UNIX timestamps. Parameters ---------- path_or_buf : string or file handle, optional File path or object. If not specified, the result is returned as a string. orient : string Indication of expected JSON string format. * Series - default is 'index' - allowed values are: {'split','records','index','table'} * DataFrame - default is 'columns' - allowed values are: {'split','records','index','columns','values','table'} * The format of the JSON string - 'split' : dict like {'index' -> [index], 'columns' -> [columns], 'data' -> [values]} - 'records' : list like [{column -> value}, ... , {column -> value}] - 'index' : dict like {index -> {column -> value}} - 'columns' : dict like {column -> {index -> value}} - 'values' : just the values array - 'table' : dict like {'schema': {schema}, 'data': {data}} describing the data, and the data component is like ``orient='records'``. .. versionchanged:: 0.20.0 date_format : {None, 'epoch', 'iso'} Type of date conversion. 'epoch' = epoch milliseconds, 'iso' = ISO8601. The default depends on the `orient`. For ``orient='table'``, the default is 'iso'. For all other orients, the default is 'epoch'. double_precision : int, default 10 The number of decimal places to use when encoding floating point values. force_ascii : bool, default True Force encoded string to be ASCII. date_unit : string, default 'ms' (milliseconds) The time unit to encode to, governs timestamp and ISO8601 precision. One of 's', 'ms', 'us', 'ns' for second, millisecond, microsecond, and nanosecond respectively. default_handler : callable, default None Handler to call if object cannot otherwise be converted to a suitable format for JSON. Should receive a single argument which is the object to convert and return a serialisable object. lines : bool, default False If 'orient' is 'records' write out line delimited json format. Will throw ValueError if incorrect 'orient' since others are not list like. .. versionadded:: 0.19.0 compression : {'infer', 'gzip', 'bz2', 'zip', 'xz', None} A string representing the compression to use in the output file, only used when the first argument is a filename. By default, the compression is inferred from the filename. .. versionadded:: 0.21.0 .. versionchanged:: 0.24.0 'infer' option added and set to default index : bool, default True Whether to include the index values in the JSON string. Not including the index (``index=False``) is only supported when orient is 'split' or 'table'. .. versionadded:: 0.23.0 See Also -------- read_json Examples -------- >>> df = pd.DataFrame([['a', 'b'], ['c', 'd']], ... index=['row 1', 'row 2'], ... columns=['col 1', 'col 2']) >>> df.to_json(orient='split') '{"columns":["col 1","col 2"], "index":["row 1","row 2"], "data":[["a","b"],["c","d"]]}' Encoding/decoding a Dataframe using ``'records'`` formatted JSON. Note that index labels are not preserved with this encoding. >>> df.to_json(orient='records') '[{"col 1":"a","col 2":"b"},{"col 1":"c","col 2":"d"}]' Encoding/decoding a Dataframe using ``'index'`` formatted JSON: >>> df.to_json(orient='index') '{"row 1":{"col 1":"a","col 2":"b"},"row 2":{"col 1":"c","col 2":"d"}}' Encoding/decoding a Dataframe using ``'columns'`` formatted JSON: >>> df.to_json(orient='columns') '{"col 1":{"row 1":"a","row 2":"c"},"col 2":{"row 1":"b","row 2":"d"}}' Encoding/decoding a Dataframe using ``'values'`` formatted JSON: >>> df.to_json(orient='values') '[["a","b"],["c","d"]]' Encoding with Table Schema >>> df.to_json(orient='table') '{"schema": {"fields": [{"name": "index", "type": "string"}, {"name": "col 1", "type": "string"}, {"name": "col 2", "type": "string"}], "primaryKey": "index", "pandas_version": "0.20.0"}, "data": [{"index": "row 1", "col 1": "a", "col 2": "b"}, {"index": "row 2", "col 1": "c", "col 2": "d"}]}' """ from pandas.io import json if date_format is None and orient == 'table': date_format = 'iso' elif date_format is None: date_format = 'epoch' return json.to_json(path_or_buf=path_or_buf, obj=self, orient=orient, date_format=date_format, double_precision=double_precision, force_ascii=force_ascii, date_unit=date_unit, default_handler=default_handler, lines=lines, compression=compression, index=index)
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/generic.py#L2158-L2302
convert json to csv
python
def _parsecsv(x): """Deserialize file-like object containing csv to a Python generator. """ for line in x: # decode as utf-8, whitespace-strip and split on delimiter yield line.decode('utf-8').strip().split(config.DELIMITER)
https://github.com/gisgroup/statbank-python/blob/3678820d8da35f225d706ea5096c1f08bf0b9c68/statbank/request.py#L62-L67
convert json to csv
python
def json2csv(json_str, show_header=False, separator='\t'): """ Format a json string to csv like. :param json_str: json object string :param show_header: can returns csv header line :param separator: csv column format separator :return: if show_header=False: a string like csv formatting; if show_header=True: a tuple (header, csv string) """ json_obj = json.loads(json_str) cols = [col for col in json_obj.keys()] vals = [str(json_obj.get(col)) for col in cols] header = None if show_header: header = separator.join(cols) values = separator.join(vals) return (header, values) if show_header else values
https://github.com/Vito2015/pyextend/blob/36861dfe1087e437ffe9b5a1da9345c85b4fa4a1/pyextend/core/json2csv.py#L6-L23
convert json to csv
python
def to_json(self, path: Optional[str] = None, **kwargs) -> str: """Convert to JSON representation. Parameters ---------- path: Where to write the JSON. Returns ------- The JSON representation. """ from .io import save_json return save_json(self, path, **kwargs)
https://github.com/janpipek/physt/blob/6dd441b073514e7728235f50b2352d56aacf38d4/physt/histogram_collection.py#L171-L183
convert json to csv
python
def csv_format(csv_data, c_headers=None, r_headers=None, rows=None, **kwargs): """ Format csv rows parsed to Dict or Array """ result = None c_headers = [] if c_headers is None else c_headers r_headers = [] if r_headers is None else r_headers rows = [] if rows is None else rows result_format = kwargs.get('result_format', ARRAY_RAW_FORMAT) # DICT FORMAT if result_format == DICT_FORMAT: result = csv_dict_format(csv_data, c_headers, r_headers) # ARRAY_RAW_FORMAT elif result_format == ARRAY_RAW_FORMAT: result = rows # ARRAY_CLEAN_FORMAT elif result_format == ARRAY_CLEAN_FORMAT: result = csv_array_clean_format(csv_data, c_headers, r_headers) else: result = None # DEFAULT if result and result_format < DICT_FORMAT: result = [result] return result
https://github.com/Datary/scrapbag/blob/3a4f9824ab6fe21121214ba9963690618da2c9de/scrapbag/csvs.py#L385-L415
convert json to csv
python
def to_json(self): """ Returns a json representation """ data = {} for k, v in self.__dict__.items(): if not k.startswith('_'): # values not serializable, should be converted to strings if isinstance(v, datetime): v = utils.datetime_to_string(v) elif isinstance(v, date): v = utils.date_to_string(v) elif isinstance(v, uuid.UUID): v = str(v) elif isinstance(v, Decimal): v = str(v) data[k] = v return data
https://github.com/Riffstation/flask-philo/blob/76c9d562edb4a77010c8da6dfdb6489fa29cbc9e/flask_philo/serializers.py#L102-L119
convert json to csv
python
def csv(self, text=TEXT, sep=',', index=True, float_fmt="%.2g"): """ Generate a CSV table from the table data. """ return self._data.to_csv(sep=sep, index=index, float_format=float_fmt)
https://github.com/dhondta/tinyscript/blob/624a0718db698899e7bc3ba6ac694baed251e81d/tinyscript/report/__init__.py#L191-L193
convert json to csv
python
def sobject_to_json(obj, key_to_lower=False): """ Converts a suds object to a JSON string. :param obj: suds object :param key_to_lower: If set, changes index key name to lower case. :return: json object """ import json data = sobject_to_dict(obj, key_to_lower=key_to_lower, json_serialize=True) return json.dumps(data)
https://github.com/python-fedex-devs/python-fedex/blob/7ea2ca80c362f5dbbc8d959ab47648c7a4ab24eb/fedex/tools/conversion.py#L84-L93
convert json to csv
python
def to_json(value, pretty=False): """ Serializes the given value to JSON. :param value: the value to serialize :param pretty: whether or not to format the output in a more human-readable way; if not specified, defaults to ``False`` :type pretty: bool :rtype: str """ options = { 'sort_keys': False, 'cls': BasicJSONEncoder, } if pretty: options['indent'] = 2 options['separators'] = (',', ': ') return json.dumps(value, **options)
https://github.com/jayclassless/basicserial/blob/da779edd955ba1009d14fae4e5926e29ad112b9d/src/basicserial/__init__.py#L80-L100
convert json to csv
python
def convert_json_node(self, json_input): """ Dispatch JSON input according to the outermost type and process it to generate the super awesome HTML format. We try to adhere to duck typing such that users can just pass all kinds of funky objects to json2html that *behave* like dicts and lists and other basic JSON types. """ if type(json_input) in text_types: if self.escape: return cgi.escape(text(json_input)) else: return text(json_input) if hasattr(json_input, 'items'): return self.convert_object(json_input) if hasattr(json_input, '__iter__') and hasattr(json_input, '__getitem__'): return self.convert_list(json_input) return text(json_input)
https://github.com/softvar/json2html/blob/7070939172f1afd5c11c664e6cfece280cfde7e6/json2html/jsonconv.py#L86-L103
convert json to csv
python
def _convert_json(obj): ''' Converts from the JSON output provided by ovs-vsctl into a usable Python object tree. In particular, sets and maps are converted from lists to actual sets or maps. Args: obj: Object that shall be recursively converted. Returns: Converted version of object. ''' if isinstance(obj, dict): return {_convert_json(key): _convert_json(val) for (key, val) in six.iteritems(obj)} elif isinstance(obj, list) and len(obj) == 2: first = obj[0] second = obj[1] if first == 'set' and isinstance(second, list): return [_convert_json(elem) for elem in second] elif first == 'map' and isinstance(second, list): for elem in second: if not isinstance(elem, list) or len(elem) != 2: return obj return {elem[0]: _convert_json(elem[1]) for elem in second} else: return obj elif isinstance(obj, list): return [_convert_json(elem) for elem in obj] else: return obj
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/openvswitch.py#L99-L129
convert json to csv
python
def to_json(data, pretty): """ Converts object to JSON formatted string with typeToken adapter :param data: A dictionary to convert to JSON string :param pretty: A boolean deciding whether or not to pretty format the JSON string :return: The JSON string """ if pretty: return json.dumps(data, sort_keys=True, indent=4, separators=(',', ': ')) return json.dumps(data)
https://github.com/draperunner/fjlc/blob/d2cc8cf1244984e7caf0bf95b11ed1677a94c994/fjlc/utils/json_utils.py#L6-L15
convert json to csv
python
def to_json(self, drop_null=True, camel=False, indent=None, sort_keys=False): """Serialize self as JSON Args: drop_null: bool, default True. Remove 'empty' attributes. See to_dict. camel: bool, default True. Convert keys to camelCase. indent: int, default None. See json built-in. sort_keys: bool, default False. See json built-in. Return: str: object params. """ return json.dumps(self.to_dict(drop_null, camel), indent=indent, sort_keys=sort_keys)
https://github.com/bmweiner/skillful/blob/8646f54faf62cb63f165f7699b8ace5b4a08233c/skillful/interface.py#L29-L43
convert json to csv
python
def to_json(data, compress=False): """Converts the output of `generate(...)` to formatted json. Floats are rounded to three decimals and positional vectors are printed on one line with some whitespace buffer. """ return json.compress(data) if compress else json.dumps(data)
https://github.com/patrickfuller/imolecule/blob/07e91600c805123935a78782871414754bd3696d/imolecule/notebook.py#L179-L185
convert json to csv
python
def to_json(self, obj, host=None, indent=None): """Recursively encode `obj` and convert it to a JSON string. :param obj: Object to encode. :param host: hostname where this object is being encoded. :type host: str""" if indent: return json.dumps(deep_map(lambda o: self.encode(o, host), obj), indent=indent) else: return json.dumps(deep_map(lambda o: self.encode(o, host), obj))
https://github.com/NLeSC/noodles/blob/3759e24e6e54a3a1a364431309dbb1061f617c04/noodles/serial/registry.py#L236-L249
convert json to csv
python
def from_csv(self, input_data): """ Reads csv format input data and converts to json. """ reformatted_data = [] for (i,row) in enumerate(input_data): if i==0: headers = row else: data_row = {} for (j,h) in enumerate(headers): data_row.update({h : row[j]}) reformatted_data.append(data_row) return reformatted_data
https://github.com/VikParuchuri/percept/blob/90304ba82053e2a9ad2bacaab3479403d3923bcf/percept/datahandlers/formatters.py#L90-L103
convert json to csv
python
def file2json(self, jsonfile=None): """ Convert entire lte file into json like format USAGE: 1: kwsdictstr = file2json() 2: kwsdictstr = file2json(jsonfile = 'somefile') show pretty format with pipeline: | jshon, or | pjson if jsonfile is defined, dump to defined file before returning json string :param jsonfile: filename to dump json strings """ kwslist = self.detectAllKws() kwsdict = {} idx = 0 for kw in sorted(kwslist, key=str.lower): # print kw idx += 1 tdict = self.getKwAsDict(kw) self.rpn2val(tdict) kwsdict.update(tdict) if kw not in self.ctrlconf_dict: ctrlconf = self.getKwCtrlConf(kw, fmt='dict') if ctrlconf is not None: self.ctrlconf_dict.update({kw: ctrlconf}) kwsdict.update(self.prestrdict) ctrlconfdict = {'_epics': self.ctrlconf_dict} # all epics contrl config in self.ctrlconfdict kwsdict.update(ctrlconfdict) try: with open(os.path.expanduser(jsonfile), 'w') as outfile: json.dump(kwsdict, outfile) except: pass return json.dumps(kwsdict)
https://github.com/archman/beamline/blob/417bc5dc13e754bc89d246427984590fced64d07/beamline/lattice.py#L319-L350
convert json to csv
python
def format_to_csv(filename, skiprows=0, delimiter=""): """Convert a file to a .csv file""" if not delimiter: delimiter = "\t" input_file = open(filename, "r") if skiprows: [input_file.readline() for _ in range(skiprows)] new_filename = os.path.splitext(filename)[0] + ".csv" output_file = open(new_filename, "w") header = input_file.readline().split() reader = csv.DictReader(input_file, fieldnames=header, delimiter=delimiter) writer = csv.DictWriter(output_file, fieldnames=header, delimiter=",") # Write header writer.writerow(dict((x, x) for x in header)) # Write rows for line in reader: if None in line: del line[None] writer.writerow(line) input_file.close() output_file.close() print "Saved %s." % new_filename
https://github.com/TaurusOlson/incisive/blob/25bb9f53495985c1416c82e26f54158df4050cb0/incisive/core.py#L155-L182
convert json to csv
python
def to_json(value, **kwargs): """Convert array to JSON list nan values are converted to string 'nan', inf values to 'inf'. """ def _recurse_list(val): if val and isinstance(val[0], list): return [_recurse_list(v) for v in val] return [str(v) if np.isnan(v) or np.isinf(v) else v for v in val] return _recurse_list(value.tolist())
https://github.com/seequent/properties/blob/096b07012fff86b0a880c8c018320c3b512751b9/properties/math.py#L227-L236
convert json to csv
python
def to_json(obj, pretty=False): """Converts an object to JSON, using the defaults specified in register_json_default. :obj: the object to convert to JSON :pretty: if True, extra whitespace is added to make the output easier to read """ sort_keys = False indent = None separators = (",", ":") if isinstance(pretty, tuple): sort_keys, indent, separators = pretty elif pretty is True: sort_keys = True indent = 2 separators = (", ", ": ") return json.dumps(obj, sort_keys=sort_keys, indent=indent, separators=separators, default=json_default)
https://github.com/treycucco/bidon/blob/d9f24596841d0e69e8ac70a1d1a1deecea95e340/bidon/util/__init__.py#L78-L96
convert json to csv
python
def format_json(json_object, indent): """ Pretty-format json data """ indent_str = "\n" + " " * indent json_str = json.dumps(json_object, indent=2, default=serialize_json_var) return indent_str.join(json_str.split("\n"))
https://github.com/stevearc/dql/blob/e9d3aa22873076dae5ebd02e35318aa996b1e56a/dql/output.py#L55-L59
convert json to csv
python
def get_csv_from_json(d): """ Get CSV values when mixed into json data. Pull out the CSV data and put it into a dictionary. :param dict d: JSON with CSV values :return dict: CSV values. (i.e. { CSVFilename1: { Column1: [Values], Column2: [Values] }, CSVFilename2: ... } """ logger_jsons.info("enter get_csv_from_json") csv_data = OrderedDict() if "paleoData" in d: csv_data = _get_csv_from_section(d, "paleoData", csv_data) if "chronData" in d: csv_data = _get_csv_from_section(d, "chronData", csv_data) logger_jsons.info("exit get_csv_from_json") return csv_data
https://github.com/nickmckay/LiPD-utilities/blob/5dab6bbeffc5effd68e3a6beaca6b76aa928e860/Python/lipd/jsons.py#L231-L247
convert json to csv
python
def arg_to_json(arg): """ Perform necessary JSON conversion on the arg. """ conversion = json_conversions.get(type(arg)) if conversion: return conversion(arg) for type_ in subclass_conversions: if isinstance(arg, type_): return json_conversions[type_](arg) return json_conversions[str](arg)
https://github.com/django-salesforce/django-salesforce/blob/6fd5643dba69d49c5881de50875cf90204a8f808/salesforce/dbapi/driver.py#L748-L758
convert json to csv
python
def csv_to_numpy(string_like, dtype=None): # type: (str) -> np.array """Convert a CSV object to a numpy array. Args: string_like (str): CSV string. dtype (dtype, optional): Data type of the resulting array. If None, the dtypes will be determined by the contents of each column, individually. This argument can only be used to 'upcast' the array. For downcasting, use the .astype(t) method. Returns: (np.array): numpy array """ stream = StringIO(string_like) return np.genfromtxt(stream, dtype=dtype, delimiter=',')
https://github.com/aws/sagemaker-containers/blob/0030f07abbaf22a55d986d97274d7a8d1aa1f10c/src/sagemaker_containers/_encoders.py#L89-L101
convert json to csv
python
def _as_json(self, **kwargs) -> str: """ Convert a JsonObj into straight json text :param kwargs: json.dumps arguments :return: JSON formatted str """ return json.dumps(self, default=self._default, **kwargs)
https://github.com/hsolbrig/jsonasobj/blob/13ac0838bb85d47bb6b8a667a4c2c69dba93e87d/src/jsonasobj/jsonobj.py#L56-L62
convert json to csv
python
def rec2csv(r, filename): """Export a recarray *r* to a CSV file *filename*""" names = r.dtype.names def translate(x): if x is None or str(x).lower == "none": x = "" return str(x) with open(filename, "w") as csv: csv.write(",".join([str(x) for x in names])+"\n") for data in r: csv.write(",".join([translate(x) for x in data])+"\n") #print "Wrote CSV table %r" % filename return filename
https://github.com/orbeckst/RecSQL/blob/6acbf821022361719391697c9c2f0822f9f8022a/recsql/export.py#L24-L36
convert json to csv
python
def json_decode(s: str) -> Any: """ Decodes an object from JSON using our custom decoder. """ try: return json.JSONDecoder(object_hook=json_class_decoder_hook).decode(s) except json.JSONDecodeError: log.warning("Failed to decode JSON (returning None): {!r}", s) return None
https://github.com/RudolfCardinal/pythonlib/blob/0b84cb35f38bd7d8723958dae51b480a829b7227/cardinal_pythonlib/json/serialize.py#L636-L644
convert json to csv
python
def iterjson(text): """Decode JSON stream.""" decoder = json.JSONDecoder() while text: obj, ndx = decoder.raw_decode(text) if not isinstance(obj, dict): raise ValueError() text = text[ndx:].lstrip('\r\n') yield obj
https://github.com/genialis/resolwe/blob/f7bb54932c81ec0cfc5b5e80d238fceaeaa48d86/resolwe/flow/executors/run.py#L32-L42
convert json to csv
python
def convert_from_json(self, md, entityname, rsaprivatekey): # type: (EncryptionMetadata, dict, str, # cryptography.hazmat.primitives.asymmetric.rsa.RSAPrivateKey) # -> None """Read metadata json into objects :param EncryptionMetadata self: this :param dict md: metadata dictionary :param str entityname: entity name :param rsaprivatekey: RSA private key :type rsaprivatekey: cryptography.hazmat.primitives.asymmetric.rsa.RSAPrivateKey """ # populate from encryption data ed = json.loads(md[EncryptionMetadata._METADATA_KEY_NAME]) try: self.blobxfer_extensions = EncryptionBlobxferExtensions( pre_encrypted_content_md5=ed[ EncryptionMetadata._JSON_KEY_BLOBXFER_EXTENSIONS][ EncryptionMetadata._JSON_KEY_PREENCRYPTED_MD5], ) except KeyError: pass self.content_encryption_iv = base64.b64decode( ed[EncryptionMetadata._JSON_KEY_CONTENT_IV]) self.encryption_agent = EncryptionAgent( encryption_algorithm=ed[ EncryptionMetadata._JSON_KEY_ENCRYPTION_AGENT][ EncryptionMetadata._JSON_KEY_ENCRYPTION_ALGORITHM], protocol=ed[ EncryptionMetadata._JSON_KEY_ENCRYPTION_AGENT][ EncryptionMetadata._JSON_KEY_PROTOCOL], ) if (self.encryption_agent.encryption_algorithm != EncryptionMetadata._ENCRYPTION_ALGORITHM): raise RuntimeError('{}: unknown block cipher: {}'.format( entityname, self.encryption_agent.encryption_algorithm)) if (self.encryption_agent.protocol != EncryptionMetadata._ENCRYPTION_PROTOCOL_VERSION): raise RuntimeError('{}: unknown encryption protocol: {}'.format( entityname, self.encryption_agent.protocol)) self.encryption_authentication = EncryptionAuthentication( algorithm=ed[ EncryptionMetadata._JSON_KEY_INTEGRITY_AUTH][ EncryptionMetadata._JSON_KEY_ALGORITHM], message_authentication_code=ed[ EncryptionMetadata._JSON_KEY_INTEGRITY_AUTH][ EncryptionMetadata._JSON_KEY_MAC], ) if (self.encryption_authentication.algorithm != EncryptionMetadata._AUTH_ALGORITHM): raise RuntimeError( '{}: unknown integrity/auth method: {}'.format( entityname, self.encryption_authentication.algorithm)) self.encryption_mode = ed[ EncryptionMetadata._JSON_KEY_ENCRYPTION_MODE] if self.encryption_mode != EncryptionMetadata._ENCRYPTION_MODE: raise RuntimeError( '{}: unknown encryption mode: {}'.format( entityname, self.encryption_mode)) try: _eak = ed[EncryptionMetadata._JSON_KEY_WRAPPEDCONTENTKEY][ EncryptionMetadata._JSON_KEY_ENCRYPTED_AUTHKEY] except KeyError: _eak = None self.wrapped_content_key = EncryptionWrappedContentKey( algorithm=ed[ EncryptionMetadata._JSON_KEY_WRAPPEDCONTENTKEY][ EncryptionMetadata._JSON_KEY_ALGORITHM], encrypted_authentication_key=_eak, encrypted_key=ed[ EncryptionMetadata._JSON_KEY_WRAPPEDCONTENTKEY][ EncryptionMetadata._JSON_KEY_ENCRYPTED_KEY], key_id=ed[ EncryptionMetadata._JSON_KEY_WRAPPEDCONTENTKEY][ EncryptionMetadata._JSON_KEY_KEYID], ) if (self.wrapped_content_key.algorithm != EncryptionMetadata._ENCRYPTED_KEY_SCHEME): raise RuntimeError('{}: unknown key encryption scheme: {}'.format( entityname, self.wrapped_content_key.algorithm)) # if RSA key is a public key, stop here as keys cannot be decrypted if rsaprivatekey is None: return # decrypt symmetric key self._symkey = blobxfer.operations.crypto.\ rsa_decrypt_base64_encoded_key( rsaprivatekey, self.wrapped_content_key.encrypted_key) # decrypt signing key, if it exists if blobxfer.util.is_not_empty( self.wrapped_content_key.encrypted_authentication_key): self._signkey = blobxfer.operations.crypto.\ rsa_decrypt_base64_encoded_key( rsaprivatekey, self.wrapped_content_key.encrypted_authentication_key) else: self._signkey = None # populate from encryption data authentication try: eda = json.loads(md[EncryptionMetadata._METADATA_KEY_AUTH_NAME]) except KeyError: pass else: self.encryption_metadata_authentication = \ EncryptionMetadataAuthentication( algorithm=eda[ EncryptionMetadata._JSON_KEY_AUTH_METAAUTH][ EncryptionMetadata._JSON_KEY_ALGORITHM], encoding=eda[ EncryptionMetadata._JSON_KEY_AUTH_METAAUTH][ EncryptionMetadata._JSON_KEY_AUTH_ENCODING], message_authentication_code=eda[ EncryptionMetadata._JSON_KEY_AUTH_METAAUTH][ EncryptionMetadata._JSON_KEY_MAC], ) if (self.encryption_metadata_authentication.algorithm != EncryptionMetadata._AUTH_ALGORITHM): raise RuntimeError( '{}: unknown integrity/auth method: {}'.format( entityname, self.encryption_metadata_authentication.algorithm)) # verify hmac authhmac = base64.b64decode( self.encryption_metadata_authentication. message_authentication_code) bmeta = md[EncryptionMetadata._METADATA_KEY_NAME].encode( self.encryption_metadata_authentication.encoding) hmacsha256 = hmac.new(self._signkey, digestmod=hashlib.sha256) hmacsha256.update(bmeta) if hmacsha256.digest() != authhmac: raise RuntimeError( '{}: encryption metadata authentication failed'.format( entityname))
https://github.com/Azure/blobxfer/blob/3eccbe7530cc6a20ab2d30f9e034b6f021817f34/blobxfer/models/crypto.py#L190-L321
convert json to csv
python
def _object_to_json(obj): """Convert objects that cannot be natively serialized into JSON into their string representation For datetime based objects convert them into their ISO formatted string as specified by :meth:`datetime.datetime.isoformat`. :param obj: object to convert into a JSON via getting its string representation. :type obj: object :return: String value representing the given object ready to be encoded into a JSON. :rtype: str """ if isinstance(obj, datetime.datetime): return obj.isoformat() return repr(obj)
https://github.com/severb/graypy/blob/32018c41a792e71a8de9f9e14f770d1bc60c2313/graypy/handler.py#L338-L355
convert json to csv
python
def convert(self, json="", table_attributes='border="1"', clubbing=True, encode=False, escape=True): """ Convert JSON to HTML Table format """ # table attributes such as class, id, data-attr-*, etc. # eg: table_attributes = 'class = "table table-bordered sortable"' self.table_init_markup = "<table %s>" % table_attributes self.clubbing = clubbing self.escape = escape json_input = None if not json: json_input = {} elif type(json) in text_types: try: json_input = json_parser.loads(json, object_pairs_hook=OrderedDict) except ValueError as e: #so the string passed here is actually not a json string # - let's analyze whether we want to pass on the error or use the string as-is as a text node if u"Expecting property name" in text(e): #if this specific json loads error is raised, then the user probably actually wanted to pass json, but made a mistake raise e json_input = json else: json_input = json converted = self.convert_json_node(json_input) if encode: return converted.encode('ascii', 'xmlcharrefreplace') return converted
https://github.com/softvar/json2html/blob/7070939172f1afd5c11c664e6cfece280cfde7e6/json2html/jsonconv.py#L37-L64
convert json to csv
python
def _convert_list_to_json(array): """ Converts array to a json string """ return json.dumps(array, skipkeys=False, allow_nan=False, indent=None, separators=(",", ":"))
https://github.com/Azure/azure-kusto-python/blob/92466a2ae175d6353d1dee3496a02517b2a71a86/azure-kusto-ingest/azure/kusto/ingest/_ingestion_blob_info.py#L65-L67
convert json to csv
python
def is_json_compat(value): """ Check that the value is either a JSON decodable string or a dict that can be encoded into a JSON. Raises ValueError when validation fails. """ try: value = json.loads(value) except ValueError as e: raise ValueError('JSON decoding error: ' + str(e)) except TypeError: # Check that the value can be serialized back into json. try: json.dumps(value) except TypeError as e: raise ValueError( 'must be a JSON serializable object: ' + str(e)) if not isinstance(value, dict): raise ValueError( 'must be specified as a JSON serializable dict or a ' 'JSON deserializable string' ) return True
https://github.com/calmjs/calmjs/blob/b9b407c2b6a7662da64bccba93bb8d92e7a5fafd/src/calmjs/dist.py#L37-L63
convert json to csv
python
def to_json(self, value): """Subclasses should override this method for JSON encoding.""" if not self.is_valid(value): raise ex.SerializeException('Invalid value: {}'.format(value)) return value
https://github.com/okpy/ok-client/blob/517f57dd76284af40ba9766e42d9222b644afd9c/client/sources/common/core.py#L50-L54
convert json to csv
python
def get_json(filename): """ Return a json value of the exif Get a filename and return a JSON object Arguments: filename {string} -- your filename Returns: [JSON] -- Return a JSON object """ check_if_this_file_exist(filename) #Process this function filename = os.path.abspath(filename) s = command_line(['exiftool', '-G', '-j', '-sort', filename]) if s: #convert bytes to string s = s.decode('utf-8').rstrip('\r\n') return json.loads(s) else: return s
https://github.com/guinslym/pyexifinfo/blob/56e5b44e77ee17b018a530ec858f19a9c6c07018/pyexifinfo/pyexifinfo.py#L100-L121
convert json to csv
python
def open_json_or_csv_somehow(filename, date_format=None): """ Deduce the format of a file, within reason. - If the filename ends with .csv or .txt, it's csv. - If the filename ends with .jsons, it's a JSON stream (conveniently the format we want to output). - If the filename ends with .json, it could be a legitimate JSON file, or it could be a JSON stream, following a nonstandard convention that many people including us are guilty of. In that case: - If the first line is a complete JSON document, and there is more in the file besides the first line, then it is a JSON stream. - Otherwise, it is probably really JSON. - If the filename does not end with .json, .jsons, or .csv, we have to guess whether it's still CSV or tab-separated values or something like that. If it's JSON, the first character would almost certainly have to be a bracket or a brace. If it isn't, assume it's CSV or similar. """ fileformat = None if filename.endswith('.csv'): fileformat = 'csv' elif filename.endswith('.jsons'): fileformat = 'jsons' else: with open(filename) as opened: line = opened.readline() if line[0] not in '{[' and not filename.endswith('.json'): fileformat = 'csv' else: if (line.count('{') == line.count('}') and line.count('[') == line.count(']')): # This line contains a complete JSON document. This probably # means it's in linewise JSON ('.jsons') format, unless the # whole file is on one line. char = ' ' while char.isspace(): char = opened.read() if char == '': fileformat = 'json' break if fileformat is None: fileformat = 'jsons' else: fileformat = 'json' if fileformat == 'json': stream = json.load(open(filename), encoding='utf-8') elif fileformat == 'csv': stream = open_csv_somehow(filename) else: stream = stream_json_lines(filename) return _normalize_data(stream, date_format=date_format)
https://github.com/LuminosoInsight/luminoso-api-client-python/blob/3bedf2a454aee39214c11fbf556ead3eecc27881/luminoso_api/v4_json_stream.py#L73-L125
convert json to csv
python
def to_json(self): """ Convert to a JSON string. """ obj = { "vertices": [ { "id": vertex.id, "annotation": vertex.annotation, } for vertex in self.vertices ], "edges": [ { "id": edge.id, "annotation": edge.annotation, "head": edge.head, "tail": edge.tail, } for edge in self._edges ], } # Ensure that we always return unicode output on Python 2. return six.text_type(json.dumps(obj, ensure_ascii=False))
https://github.com/mdickinson/refcycle/blob/627fad74c74efc601209c96405f8118cd99b2241/refcycle/annotated_graph.py#L199-L223
convert json to csv
python
def to_json(self): """Convert to json serializable dictionary. Returns: dict: dictionary of descriptor """ d, ps = self._to_json() if len(ps) == 0: return {"name": d} else: return {"name": d, "args": ps}
https://github.com/mordred-descriptor/mordred/blob/2848b088fd7b6735590242b5e22573babc724f10/mordred/_base/descriptor.py#L94-L105
convert json to csv
python
def to_json(self, content, pretty_print=False): """ Convert a string to a JSON object ``content`` String content to convert into JSON ``pretty_print`` If defined, will output JSON is pretty print format """ if PY3: if isinstance(content, bytes): content = content.decode(encoding='utf-8') if pretty_print: json_ = self._json_pretty_print(content) else: json_ = json.loads(content) logger.info('To JSON using : content=%s ' % (content)) logger.info('To JSON using : pretty_print=%s ' % (pretty_print)) return json_
https://github.com/bulkan/robotframework-requests/blob/11baa3277f1cb728712e26d996200703c15254a8/src/RequestsLibrary/RequestsKeywords.py#L460-L477
convert json to csv
python
def from_json(cls, json_data): """Tries to convert a JSON representation to an object of the same type as self A class can provide a _fromJSON implementation in order to do specific type checking or other custom implementation details. This method will throw a ValueError for invalid JSON, a TypeError for improperly constructed, but valid JSON, and any custom errors that can be be propagated from class constructors. :param json_data: The JSON string to convert :type json_data: str | unicode :raises: TypeError, ValueError, LanguageMapInitError """ data = json.loads(json_data) result = cls(data) if hasattr(result, "_from_json"): result._from_json() return result
https://github.com/RusticiSoftware/TinCanPython/blob/424eedaa6d19221efb1108edb915fc332abbb317/tincan/serializable_base.py#L70-L89
convert json to csv
python
def string2json(self, string): """Convert json into its string representation. Used for writing outputs to markdown.""" kwargs = { 'cls': BytesEncoder, # use the IPython bytes encoder 'indent': 1, 'sort_keys': True, 'separators': (',', ': '), } return cast_unicode(json.dumps(string, **kwargs), 'utf-8')
https://github.com/aaren/notedown/blob/1e920c7e4ecbe47420c12eed3d5bcae735121222/notedown/notedown.py#L461-L470
convert json to csv
python
def to_json(df, x, y): """Format output for json response.""" values = [] for i, row in df.iterrows(): values.append({ "x": row[x], "y": row[y] }) if df.empty: return {"result": [{"x": 0, "y": 0}], "date": False} return {"result": values, "date": False}
https://github.com/stitchfix/pyxley/blob/2dab00022d977d986169cd8a629b3a2f91be893f/pyxley/charts/mg/barchart.py#L58-L70
convert json to csv
python
def format_to_json(data): """Converts `data` into json If stdout is a tty it performs a pretty print. """ if sys.stdout.isatty(): return json.dumps(data, indent=4, separators=(',', ': ')) else: return json.dumps(data)
https://github.com/Yelp/kafka-utils/blob/cdb4d64308f3079ee0873250bf7b34d0d94eca50/kafka_utils/util/__init__.py#L141-L148
convert json to csv
python
def _csv_to_nodes_dict(nodes_csv): """Convert CSV to a list of dicts formatted for os_cloud_config Given a CSV file in the format below, convert it into the structure expected by os_could_config JSON files. pm_type, pm_addr, pm_user, pm_password, mac """ data = [] for row in csv.reader(nodes_csv): node = { "pm_user": row[2], "pm_addr": row[1], "pm_password": row[3], "pm_type": row[0], "mac": [ row[4] ] } data.append(node) return data
https://github.com/rdo-management/python-rdomanager-oscplugin/blob/165a166fb2e5a2598380779b35812b8b8478c4fb/rdomanager_oscplugin/v1/baremetal.py#L35-L58
convert json to csv
python
def object_to_json(obj): """Convert object that cannot be natively serialized by python to JSON representation.""" if isinstance(obj, (datetime.datetime, datetime.date, datetime.time)): return obj.isoformat() return str(obj)
https://github.com/keeprocking/pygelf/blob/c851a277f86a239a6632683fd56dfb2300b353eb/pygelf/gelf.py#L70-L74
convert json to csv
python
def csv(self, path, schema=None, sep=None, encoding=None, quote=None, escape=None, comment=None, header=None, inferSchema=None, ignoreLeadingWhiteSpace=None, ignoreTrailingWhiteSpace=None, nullValue=None, nanValue=None, positiveInf=None, negativeInf=None, dateFormat=None, timestampFormat=None, maxColumns=None, maxCharsPerColumn=None, maxMalformedLogPerPartition=None, mode=None, columnNameOfCorruptRecord=None, multiLine=None, charToEscapeQuoteEscaping=None, enforceSchema=None, emptyValue=None, locale=None, lineSep=None): r"""Loads a CSV file stream and returns the result as a :class:`DataFrame`. This function will go through the input once to determine the input schema if ``inferSchema`` is enabled. To avoid going through the entire data once, disable ``inferSchema`` option or specify the schema explicitly using ``schema``. .. note:: Evolving. :param path: string, or list of strings, for input path(s). :param schema: an optional :class:`pyspark.sql.types.StructType` for the input schema or a DDL-formatted string (For example ``col0 INT, col1 DOUBLE``). :param sep: sets a single character as a separator for each field and value. If None is set, it uses the default value, ``,``. :param encoding: decodes the CSV files by the given encoding type. If None is set, it uses the default value, ``UTF-8``. :param quote: sets a single character used for escaping quoted values where the separator can be part of the value. If None is set, it uses the default value, ``"``. If you would like to turn off quotations, you need to set an empty string. :param escape: sets a single character used for escaping quotes inside an already quoted value. If None is set, it uses the default value, ``\``. :param comment: sets a single character used for skipping lines beginning with this character. By default (None), it is disabled. :param header: uses the first line as names of columns. If None is set, it uses the default value, ``false``. :param inferSchema: infers the input schema automatically from data. It requires one extra pass over the data. If None is set, it uses the default value, ``false``. :param enforceSchema: If it is set to ``true``, the specified or inferred schema will be forcibly applied to datasource files, and headers in CSV files will be ignored. If the option is set to ``false``, the schema will be validated against all headers in CSV files or the first header in RDD if the ``header`` option is set to ``true``. Field names in the schema and column names in CSV headers are checked by their positions taking into account ``spark.sql.caseSensitive``. If None is set, ``true`` is used by default. Though the default value is ``true``, it is recommended to disable the ``enforceSchema`` option to avoid incorrect results. :param ignoreLeadingWhiteSpace: a flag indicating whether or not leading whitespaces from values being read should be skipped. If None is set, it uses the default value, ``false``. :param ignoreTrailingWhiteSpace: a flag indicating whether or not trailing whitespaces from values being read should be skipped. If None is set, it uses the default value, ``false``. :param nullValue: sets the string representation of a null value. If None is set, it uses the default value, empty string. Since 2.0.1, this ``nullValue`` param applies to all supported types including the string type. :param nanValue: sets the string representation of a non-number value. If None is set, it uses the default value, ``NaN``. :param positiveInf: sets the string representation of a positive infinity value. If None is set, it uses the default value, ``Inf``. :param negativeInf: sets the string representation of a negative infinity value. If None is set, it uses the default value, ``Inf``. :param dateFormat: sets the string that indicates a date format. Custom date formats follow the formats at ``java.time.format.DateTimeFormatter``. This applies to date type. If None is set, it uses the default value, ``yyyy-MM-dd``. :param timestampFormat: sets the string that indicates a timestamp format. Custom date formats follow the formats at ``java.time.format.DateTimeFormatter``. This applies to timestamp type. If None is set, it uses the default value, ``yyyy-MM-dd'T'HH:mm:ss.SSSXXX``. :param maxColumns: defines a hard limit of how many columns a record can have. If None is set, it uses the default value, ``20480``. :param maxCharsPerColumn: defines the maximum number of characters allowed for any given value being read. If None is set, it uses the default value, ``-1`` meaning unlimited length. :param maxMalformedLogPerPartition: this parameter is no longer used since Spark 2.2.0. If specified, it is ignored. :param mode: allows a mode for dealing with corrupt records during parsing. If None is set, it uses the default value, ``PERMISSIVE``. * ``PERMISSIVE`` : when it meets a corrupted record, puts the malformed string \ into a field configured by ``columnNameOfCorruptRecord``, and sets malformed \ fields to ``null``. To keep corrupt records, an user can set a string type \ field named ``columnNameOfCorruptRecord`` in an user-defined schema. If a \ schema does not have the field, it drops corrupt records during parsing. \ A record with less/more tokens than schema is not a corrupted record to CSV. \ When it meets a record having fewer tokens than the length of the schema, \ sets ``null`` to extra fields. When the record has more tokens than the \ length of the schema, it drops extra tokens. * ``DROPMALFORMED`` : ignores the whole corrupted records. * ``FAILFAST`` : throws an exception when it meets corrupted records. :param columnNameOfCorruptRecord: allows renaming the new field having malformed string created by ``PERMISSIVE`` mode. This overrides ``spark.sql.columnNameOfCorruptRecord``. If None is set, it uses the value specified in ``spark.sql.columnNameOfCorruptRecord``. :param multiLine: parse one record, which may span multiple lines. If None is set, it uses the default value, ``false``. :param charToEscapeQuoteEscaping: sets a single character used for escaping the escape for the quote character. If None is set, the default value is escape character when escape and quote characters are different, ``\0`` otherwise.. :param emptyValue: sets the string representation of an empty value. If None is set, it uses the default value, empty string. :param locale: sets a locale as language tag in IETF BCP 47 format. If None is set, it uses the default value, ``en-US``. For instance, ``locale`` is used while parsing dates and timestamps. :param lineSep: defines the line separator that should be used for parsing. If None is set, it covers all ``\\r``, ``\\r\\n`` and ``\\n``. Maximum length is 1 character. >>> csv_sdf = spark.readStream.csv(tempfile.mkdtemp(), schema = sdf_schema) >>> csv_sdf.isStreaming True >>> csv_sdf.schema == sdf_schema True """ self._set_opts( schema=schema, sep=sep, encoding=encoding, quote=quote, escape=escape, comment=comment, header=header, inferSchema=inferSchema, ignoreLeadingWhiteSpace=ignoreLeadingWhiteSpace, ignoreTrailingWhiteSpace=ignoreTrailingWhiteSpace, nullValue=nullValue, nanValue=nanValue, positiveInf=positiveInf, negativeInf=negativeInf, dateFormat=dateFormat, timestampFormat=timestampFormat, maxColumns=maxColumns, maxCharsPerColumn=maxCharsPerColumn, maxMalformedLogPerPartition=maxMalformedLogPerPartition, mode=mode, columnNameOfCorruptRecord=columnNameOfCorruptRecord, multiLine=multiLine, charToEscapeQuoteEscaping=charToEscapeQuoteEscaping, enforceSchema=enforceSchema, emptyValue=emptyValue, locale=locale, lineSep=lineSep) if isinstance(path, basestring): return self._df(self._jreader.csv(path)) else: raise TypeError("path can be only a single string")
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/sql/streaming.py#L575-L705
convert json to csv
python
def json(self, path, schema=None, primitivesAsString=None, prefersDecimal=None, allowComments=None, allowUnquotedFieldNames=None, allowSingleQuotes=None, allowNumericLeadingZero=None, allowBackslashEscapingAnyCharacter=None, mode=None, columnNameOfCorruptRecord=None, dateFormat=None, timestampFormat=None, multiLine=None, allowUnquotedControlChars=None, lineSep=None, locale=None, dropFieldIfAllNull=None, encoding=None): """ Loads a JSON file stream and returns the results as a :class:`DataFrame`. `JSON Lines <http://jsonlines.org/>`_ (newline-delimited JSON) is supported by default. For JSON (one record per file), set the ``multiLine`` parameter to ``true``. If the ``schema`` parameter is not specified, this function goes through the input once to determine the input schema. .. note:: Evolving. :param path: string represents path to the JSON dataset, or RDD of Strings storing JSON objects. :param schema: an optional :class:`pyspark.sql.types.StructType` for the input schema or a DDL-formatted string (For example ``col0 INT, col1 DOUBLE``). :param primitivesAsString: infers all primitive values as a string type. If None is set, it uses the default value, ``false``. :param prefersDecimal: infers all floating-point values as a decimal type. If the values do not fit in decimal, then it infers them as doubles. If None is set, it uses the default value, ``false``. :param allowComments: ignores Java/C++ style comment in JSON records. If None is set, it uses the default value, ``false``. :param allowUnquotedFieldNames: allows unquoted JSON field names. If None is set, it uses the default value, ``false``. :param allowSingleQuotes: allows single quotes in addition to double quotes. If None is set, it uses the default value, ``true``. :param allowNumericLeadingZero: allows leading zeros in numbers (e.g. 00012). If None is set, it uses the default value, ``false``. :param allowBackslashEscapingAnyCharacter: allows accepting quoting of all character using backslash quoting mechanism. If None is set, it uses the default value, ``false``. :param mode: allows a mode for dealing with corrupt records during parsing. If None is set, it uses the default value, ``PERMISSIVE``. * ``PERMISSIVE`` : when it meets a corrupted record, puts the malformed string \ into a field configured by ``columnNameOfCorruptRecord``, and sets malformed \ fields to ``null``. To keep corrupt records, an user can set a string type \ field named ``columnNameOfCorruptRecord`` in an user-defined schema. If a \ schema does not have the field, it drops corrupt records during parsing. \ When inferring a schema, it implicitly adds a ``columnNameOfCorruptRecord`` \ field in an output schema. * ``DROPMALFORMED`` : ignores the whole corrupted records. * ``FAILFAST`` : throws an exception when it meets corrupted records. :param columnNameOfCorruptRecord: allows renaming the new field having malformed string created by ``PERMISSIVE`` mode. This overrides ``spark.sql.columnNameOfCorruptRecord``. If None is set, it uses the value specified in ``spark.sql.columnNameOfCorruptRecord``. :param dateFormat: sets the string that indicates a date format. Custom date formats follow the formats at ``java.time.format.DateTimeFormatter``. This applies to date type. If None is set, it uses the default value, ``yyyy-MM-dd``. :param timestampFormat: sets the string that indicates a timestamp format. Custom date formats follow the formats at ``java.time.format.DateTimeFormatter``. This applies to timestamp type. If None is set, it uses the default value, ``yyyy-MM-dd'T'HH:mm:ss.SSSXXX``. :param multiLine: parse one record, which may span multiple lines, per file. If None is set, it uses the default value, ``false``. :param allowUnquotedControlChars: allows JSON Strings to contain unquoted control characters (ASCII characters with value less than 32, including tab and line feed characters) or not. :param lineSep: defines the line separator that should be used for parsing. If None is set, it covers all ``\\r``, ``\\r\\n`` and ``\\n``. :param locale: sets a locale as language tag in IETF BCP 47 format. If None is set, it uses the default value, ``en-US``. For instance, ``locale`` is used while parsing dates and timestamps. :param dropFieldIfAllNull: whether to ignore column of all null values or empty array/struct during schema inference. If None is set, it uses the default value, ``false``. :param encoding: allows to forcibly set one of standard basic or extended encoding for the JSON files. For example UTF-16BE, UTF-32LE. If None is set, the encoding of input JSON will be detected automatically when the multiLine option is set to ``true``. >>> json_sdf = spark.readStream.json(tempfile.mkdtemp(), schema = sdf_schema) >>> json_sdf.isStreaming True >>> json_sdf.schema == sdf_schema True """ self._set_opts( schema=schema, primitivesAsString=primitivesAsString, prefersDecimal=prefersDecimal, allowComments=allowComments, allowUnquotedFieldNames=allowUnquotedFieldNames, allowSingleQuotes=allowSingleQuotes, allowNumericLeadingZero=allowNumericLeadingZero, allowBackslashEscapingAnyCharacter=allowBackslashEscapingAnyCharacter, mode=mode, columnNameOfCorruptRecord=columnNameOfCorruptRecord, dateFormat=dateFormat, timestampFormat=timestampFormat, multiLine=multiLine, allowUnquotedControlChars=allowUnquotedControlChars, lineSep=lineSep, locale=locale, dropFieldIfAllNull=dropFieldIfAllNull, encoding=encoding) if isinstance(path, basestring): return self._df(self._jreader.json(path)) else: raise TypeError("path can be only a single string")
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/sql/streaming.py#L403-L503
convert json to csv
python
def convert(self, json_input): """ Converts JSON to HTML Table format. Parameters ---------- json_input : dict JSON object to convert into HTML. Returns ------- str String of converted HTML. """ html_output = self._table_opening_tag if self._build_top_to_bottom: html_output += self._markup_header_row(json_input.keys()) html_output += "<tr>" for value in json_input.values(): if isinstance(value, list): # check if all keys in the list are identical # and group all values under a common column # heading if so, if not default to normal markup html_output += self._maybe_club(value) else: html_output += self._markup_table_cell(value) html_output += "</tr>" else: for key, value in iter(json_input.items()): html_output += "<tr><th>{:s}</th>".format(self._markup(key)) if isinstance(value, list): html_output += self._maybe_club(value) else: html_output += self._markup_table_cell(value) html_output += "</tr>" html_output += "</table>" return html_output
https://github.com/latture/json2table/blob/8bd1363f54ee4fd608ffb7677761526184a9da83/json2table/json2table.py#L73-L109
convert json to csv
python
def to_json(self, skip_nulls=True): """Convert object to a json string""" return json.dumps(self.to_dict(skip_nulls=skip_nulls))
https://github.com/jcrist/skein/blob/16f8b1d3b3d9f79f36e2f152e45893339a1793e8/skein/objects.py#L243-L245
convert json to csv
python
def to_json(obj, indent=4, sort_keys=True, **kwargs): """ :param obj: object to convert to dictionary and then output to json :param indent: indent json by number of spaces :param sort_keys: sort json output by key if true :param kwargs: arguments to pass to to_dict :return: json string """ obj_dict = to_dict(obj, **kwargs) return json.dumps(obj_dict, indent=indent, sort_keys=sort_keys)
https://github.com/genomoncology/related/blob/be47c0081e60fc60afcde3a25f00ebcad5d18510/src/related/functions.py#L200-L209
convert json to csv
python
def as_json(data, **kwargs): """Writes data as json. :param dict data: data to convert to json :param kwargs kwargs: kwargs for json dumps :return: json string :rtype: str """ if 'sort_keys' not in kwargs: kwargs['sort_keys'] = False if 'ensure_ascii' not in kwargs: kwargs['ensure_ascii'] = False data = json.dumps(data, **kwargs) return data
https://github.com/openstack/monasca-common/blob/61e2e00454734e2881611abec8df0d85bf7655ac/monasca_common/rest/utils.py#L39-L55
convert json to csv
python
def save_json(filename: str, config: Union[List, Dict]): """Save JSON data to a file. Returns True on success. """ try: data = json.dumps(config, sort_keys=True, indent=4) with open(filename, 'w', encoding='utf-8') as fdesc: fdesc.write(data) return True except TypeError as error: _LOGGER.exception('Failed to serialize to JSON: %s', filename) raise PytradfriError(error) except OSError as error: _LOGGER.exception('Saving JSON file failed: %s', filename) raise PytradfriError(error)
https://github.com/ggravlingen/pytradfri/blob/63750fa8fb27158c013d24865cdaa7fb82b3ab53/pytradfri/util.py#L32-L49
convert json to csv
python
def from_json(cls, data): """Decode a JSON string and inflate a node instance.""" # Decode JSON string assert isinstance(data, str) data = json.loads(data) assert isinstance(data, dict) return cls.from_dict(data)
https://github.com/ProjetPP/PPP-datamodel-Python/blob/0c7958fb4df75468fd3137240a5065925c239776/ppp_datamodel/utils/serializableattributesholder.py#L30-L36
convert json to csv
python
def serialize_to_json(result, unpicklable=False): """Serializes output as JSON and writes it to console output wrapped with special prefix and suffix :param result: Result to return :param unpicklable: If True adds JSON can be deserialized as real object. When False will be deserialized as dictionary """ json = jsonpickle.encode(result, unpicklable=unpicklable) result_for_output = str(json) return result_for_output
https://github.com/QualiSystems/cloudshell-networking-devices/blob/009aab33edb30035b52fe10dbb91db61c95ba4d9/cloudshell/devices/networking_utils.py#L30-L40
convert json to csv
python
def dump_json(token_dict, dump_path): """write json data to file """ if sys.version > '3': with open(dump_path, 'w', encoding='utf-8') as output_file: json.dump(token_dict, output_file, indent=4) else: with open(dump_path, 'w') as output_file: json.dump(token_dict, output_file, indent=4)
https://github.com/fbngrm/babelpy/blob/ff305abecddd66aed40c32f0010485cf192e5f17/babelpy/dump.py#L7-L15
convert json to csv
python
def to_json(self): """ Converts segment to a JSON serializable format Returns: :obj:`dict` """ points = [point.to_json() for point in self.points] return { 'points': points, 'transportationModes': self.transportation_modes, 'locationFrom': self.location_from.to_json() if self.location_from != None else None, 'locationTo': self.location_to.to_json() if self.location_to != None else None }
https://github.com/ruipgil/TrackToTrip/blob/5537c14ee9748091b5255b658ab528e1d6227f99/tracktotrip/segment.py#L290-L302
convert json to csv
python
def to_json(df, x, y, timeseries=False): """Format output for json response.""" values = {k: [] for k in y} for i, row in df.iterrows(): for yy in y: values[yy].append({ "x": row[x], "y": row[yy] }) return {"result": [values[k] for k in y], "date": timeseries}
https://github.com/stitchfix/pyxley/blob/2dab00022d977d986169cd8a629b3a2f91be893f/pyxley/charts/mg/line_chart.py#L58-L67
convert json to csv
python
def csv_dict_format(csv_data, c_headers=None, r_headers=None): """ Format csv rows parsed to Dict. """ # format dict if has row_headers if r_headers: result = {} for k_index in range(0, len(csv_data)): if r_headers[k_index]: result[r_headers[k_index]] = collections.OrderedDict( zip(c_headers, csv_data[k_index])) # format list if hasn't row_headers -- square csv else: result = [] for k_index in range(0, len(csv_data)): result.append( collections.OrderedDict(zip(c_headers, csv_data[k_index]))) result = [result] return result
https://github.com/Datary/scrapbag/blob/3a4f9824ab6fe21121214ba9963690618da2c9de/scrapbag/csvs.py#L338-L358
convert json to csv
python
async def json(self, *, encoding: str = None, loads: JSONDecoder = DEFAULT_JSON_DECODER, content_type: Optional[str] = 'application/json') -> Any: """Read and decodes JSON response.""" return await self._aws_json( encoding=encoding, loads=loads, content_type=content_type)
https://github.com/howie6879/ruia/blob/2dc5262fc9c3e902a8faa7d5fa2f046f9d9ee1fa/ruia/response.py#L123-L130
convert json to csv
python
def save_json(self, fname='servers.json'): ''' Write out to a CSV file. ''' rows = sorted(self.keys()) with open(fname, 'wt') as fp: json.dump([self[k] for k in rows], fp, indent=1)
https://github.com/coinkite/connectrum/blob/99948f92cc5c3ecb1a8a70146294014e608e50fc/connectrum/svr_info.py#L238-L244
convert json to csv
python
def _convert_to_json(self, response): """Converts response to JSON. If the response cannot be converted to JSON then `None` is returned. Args: response - An object of type `requests.models.Response` Returns: Response in JSON format if the response can be converted to JSON. `None` otherwise. """ try: return response.json() except ValueError: logging.warning('Expected response in JSON format from {0} but the actual response text is: {1}'.format( response.request.url, response.text, )) return None
https://github.com/Yelp/threat_intel/blob/60eef841d7cca115ec7857aeb9c553b72b694851/threat_intel/util/http.py#L382-L397
convert json to csv
python
def serialize_to_json(data, **kwargs): """ A wrapper for simplejson.dumps with defaults as: cls=LazyJSONEncoder All arguments can be added via kwargs """ kwargs['cls'] = kwargs.get('cls', LazyJSONEncoder) return json.dumps(data, **kwargs)
https://github.com/yceruto/django-ajax/blob/d5af47c0e65571d4729f48781c0b41886b926221/django_ajax/encoder.py#L58-L68
convert json to csv
python
def csv(self, path, schema=None, sep=None, encoding=None, quote=None, escape=None, comment=None, header=None, inferSchema=None, ignoreLeadingWhiteSpace=None, ignoreTrailingWhiteSpace=None, nullValue=None, nanValue=None, positiveInf=None, negativeInf=None, dateFormat=None, timestampFormat=None, maxColumns=None, maxCharsPerColumn=None, maxMalformedLogPerPartition=None, mode=None, columnNameOfCorruptRecord=None, multiLine=None, charToEscapeQuoteEscaping=None, samplingRatio=None, enforceSchema=None, emptyValue=None, locale=None, lineSep=None): r"""Loads a CSV file and returns the result as a :class:`DataFrame`. This function will go through the input once to determine the input schema if ``inferSchema`` is enabled. To avoid going through the entire data once, disable ``inferSchema`` option or specify the schema explicitly using ``schema``. :param path: string, or list of strings, for input path(s), or RDD of Strings storing CSV rows. :param schema: an optional :class:`pyspark.sql.types.StructType` for the input schema or a DDL-formatted string (For example ``col0 INT, col1 DOUBLE``). :param sep: sets a single character as a separator for each field and value. If None is set, it uses the default value, ``,``. :param encoding: decodes the CSV files by the given encoding type. If None is set, it uses the default value, ``UTF-8``. :param quote: sets a single character used for escaping quoted values where the separator can be part of the value. If None is set, it uses the default value, ``"``. If you would like to turn off quotations, you need to set an empty string. :param escape: sets a single character used for escaping quotes inside an already quoted value. If None is set, it uses the default value, ``\``. :param comment: sets a single character used for skipping lines beginning with this character. By default (None), it is disabled. :param header: uses the first line as names of columns. If None is set, it uses the default value, ``false``. :param inferSchema: infers the input schema automatically from data. It requires one extra pass over the data. If None is set, it uses the default value, ``false``. :param enforceSchema: If it is set to ``true``, the specified or inferred schema will be forcibly applied to datasource files, and headers in CSV files will be ignored. If the option is set to ``false``, the schema will be validated against all headers in CSV files or the first header in RDD if the ``header`` option is set to ``true``. Field names in the schema and column names in CSV headers are checked by their positions taking into account ``spark.sql.caseSensitive``. If None is set, ``true`` is used by default. Though the default value is ``true``, it is recommended to disable the ``enforceSchema`` option to avoid incorrect results. :param ignoreLeadingWhiteSpace: A flag indicating whether or not leading whitespaces from values being read should be skipped. If None is set, it uses the default value, ``false``. :param ignoreTrailingWhiteSpace: A flag indicating whether or not trailing whitespaces from values being read should be skipped. If None is set, it uses the default value, ``false``. :param nullValue: sets the string representation of a null value. If None is set, it uses the default value, empty string. Since 2.0.1, this ``nullValue`` param applies to all supported types including the string type. :param nanValue: sets the string representation of a non-number value. If None is set, it uses the default value, ``NaN``. :param positiveInf: sets the string representation of a positive infinity value. If None is set, it uses the default value, ``Inf``. :param negativeInf: sets the string representation of a negative infinity value. If None is set, it uses the default value, ``Inf``. :param dateFormat: sets the string that indicates a date format. Custom date formats follow the formats at ``java.time.format.DateTimeFormatter``. This applies to date type. If None is set, it uses the default value, ``yyyy-MM-dd``. :param timestampFormat: sets the string that indicates a timestamp format. Custom date formats follow the formats at ``java.time.format.DateTimeFormatter``. This applies to timestamp type. If None is set, it uses the default value, ``yyyy-MM-dd'T'HH:mm:ss.SSSXXX``. :param maxColumns: defines a hard limit of how many columns a record can have. If None is set, it uses the default value, ``20480``. :param maxCharsPerColumn: defines the maximum number of characters allowed for any given value being read. If None is set, it uses the default value, ``-1`` meaning unlimited length. :param maxMalformedLogPerPartition: this parameter is no longer used since Spark 2.2.0. If specified, it is ignored. :param mode: allows a mode for dealing with corrupt records during parsing. If None is set, it uses the default value, ``PERMISSIVE``. * ``PERMISSIVE`` : when it meets a corrupted record, puts the malformed string \ into a field configured by ``columnNameOfCorruptRecord``, and sets malformed \ fields to ``null``. To keep corrupt records, an user can set a string type \ field named ``columnNameOfCorruptRecord`` in an user-defined schema. If a \ schema does not have the field, it drops corrupt records during parsing. \ A record with less/more tokens than schema is not a corrupted record to CSV. \ When it meets a record having fewer tokens than the length of the schema, \ sets ``null`` to extra fields. When the record has more tokens than the \ length of the schema, it drops extra tokens. * ``DROPMALFORMED`` : ignores the whole corrupted records. * ``FAILFAST`` : throws an exception when it meets corrupted records. :param columnNameOfCorruptRecord: allows renaming the new field having malformed string created by ``PERMISSIVE`` mode. This overrides ``spark.sql.columnNameOfCorruptRecord``. If None is set, it uses the value specified in ``spark.sql.columnNameOfCorruptRecord``. :param multiLine: parse records, which may span multiple lines. If None is set, it uses the default value, ``false``. :param charToEscapeQuoteEscaping: sets a single character used for escaping the escape for the quote character. If None is set, the default value is escape character when escape and quote characters are different, ``\0`` otherwise. :param samplingRatio: defines fraction of rows used for schema inferring. If None is set, it uses the default value, ``1.0``. :param emptyValue: sets the string representation of an empty value. If None is set, it uses the default value, empty string. :param locale: sets a locale as language tag in IETF BCP 47 format. If None is set, it uses the default value, ``en-US``. For instance, ``locale`` is used while parsing dates and timestamps. :param lineSep: defines the line separator that should be used for parsing. If None is set, it covers all ``\\r``, ``\\r\\n`` and ``\\n``. Maximum length is 1 character. >>> df = spark.read.csv('python/test_support/sql/ages.csv') >>> df.dtypes [('_c0', 'string'), ('_c1', 'string')] >>> rdd = sc.textFile('python/test_support/sql/ages.csv') >>> df2 = spark.read.csv(rdd) >>> df2.dtypes [('_c0', 'string'), ('_c1', 'string')] """ self._set_opts( schema=schema, sep=sep, encoding=encoding, quote=quote, escape=escape, comment=comment, header=header, inferSchema=inferSchema, ignoreLeadingWhiteSpace=ignoreLeadingWhiteSpace, ignoreTrailingWhiteSpace=ignoreTrailingWhiteSpace, nullValue=nullValue, nanValue=nanValue, positiveInf=positiveInf, negativeInf=negativeInf, dateFormat=dateFormat, timestampFormat=timestampFormat, maxColumns=maxColumns, maxCharsPerColumn=maxCharsPerColumn, maxMalformedLogPerPartition=maxMalformedLogPerPartition, mode=mode, columnNameOfCorruptRecord=columnNameOfCorruptRecord, multiLine=multiLine, charToEscapeQuoteEscaping=charToEscapeQuoteEscaping, samplingRatio=samplingRatio, enforceSchema=enforceSchema, emptyValue=emptyValue, locale=locale, lineSep=lineSep) if isinstance(path, basestring): path = [path] if type(path) == list: return self._df(self._jreader.csv(self._spark._sc._jvm.PythonUtils.toSeq(path))) elif isinstance(path, RDD): def func(iterator): for x in iterator: if not isinstance(x, basestring): x = unicode(x) if isinstance(x, unicode): x = x.encode("utf-8") yield x keyed = path.mapPartitions(func) keyed._bypass_serializer = True jrdd = keyed._jrdd.map(self._spark._jvm.BytesToString()) # see SPARK-22112 # There aren't any jvm api for creating a dataframe from rdd storing csv. # We can do it through creating a jvm dataset firstly and using the jvm api # for creating a dataframe from dataset storing csv. jdataset = self._spark._ssql_ctx.createDataset( jrdd.rdd(), self._spark._jvm.Encoders.STRING()) return self._df(self._jreader.csv(jdataset)) else: raise TypeError("path can be only string, list or RDD")
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/sql/readwriter.py#L352-L506
convert json to csv
python
def export_to_json(env): """export_to_json This function takes in a dictionary object and stores it within the config.JSON file. """ json_fl = env['scripts'] + "/config.JSON" with open(json_fl, 'w') as fh: fh.write(json.dumps(env, sort_keys=True, indent=4, separators=(',', ': ')))
https://github.com/F5Networks/f5-common-python/blob/7e67d5acd757a60e3d5f8c88c534bd72208f5494/f5-sdk-dist/scripts/configure.py#L122-L131
convert json to csv
python
def convert_kv_to_dict(data): """ convert text values in format: key1=value1 key2=value2 to dict {'key1':'value1', 'key2':'value2'} :param data: string containing lines with these values :return: dict """ output = {} for line in data.split("\n"): stripped = line.strip() if stripped: key, value = stripped.split("=", 1) output[key] = value return output
https://github.com/user-cont/conu/blob/08caae7bb6bdd265b55bb106c3da6a7946a5a352/conu/utils/__init__.py#L35-L51
convert json to csv
python
def _tsv2json(in_tsv, out_json, index_column, additional_metadata=None, drop_columns=None, enforce_case=True): """ Convert metadata from TSV format to JSON format. Parameters ---------- in_tsv: str Path to the metadata in TSV format. out_json: str Path where the metadata should be saved in JSON format after conversion. If this is None, then a dictionary is returned instead. index_column: str Name of the column in the TSV to be used as an index (top-level key in the JSON). additional_metadata: dict Any additional metadata that should be applied to all entries in the JSON. drop_columns: list List of columns from the input TSV to be dropped from the JSON. enforce_case: bool Indicates whether BIDS case conventions should be followed. Currently, this means that index fields (column names in the associated data TSV) use snake case and other fields use camel case. Returns ------- str Path to the metadata saved in JSON format. """ import pandas as pd # Adapted from https://dev.to/rrampage/snake-case-to-camel-case-and- ... # back-using-regular-expressions-and-python-m9j re_to_camel = r'(.*?)_([a-zA-Z0-9])' re_to_snake = r'(^.+?|.*?)((?<![_A-Z])[A-Z]|(?<![_0-9])[0-9]+)' def snake(match): return '{}_{}'.format(match.group(1).lower(), match.group(2).lower()) def camel(match): return '{}{}'.format(match.group(1), match.group(2).upper()) # from fmriprep def less_breakable(a_string): """ hardens the string to different envs (i.e. case insensitive, no whitespace, '#' """ return ''.join(a_string.split()).strip('#') drop_columns = drop_columns or [] additional_metadata = additional_metadata or {} tsv_data = pd.read_csv(in_tsv, '\t') for k, v in additional_metadata.items(): tsv_data[k] = v for col in drop_columns: tsv_data.drop(labels=col, axis='columns', inplace=True) tsv_data.set_index(index_column, drop=True, inplace=True) if enforce_case: tsv_data.index = [re.sub(re_to_snake, snake, less_breakable(i), 0).lower() for i in tsv_data.index] tsv_data.columns = [re.sub(re_to_camel, camel, less_breakable(i).title(), 0) for i in tsv_data.columns] json_data = tsv_data.to_json(orient='index') json_data = json.JSONDecoder( object_pairs_hook=OrderedDict).decode(json_data) if out_json is None: return json_data with open(out_json, 'w') as f: json.dump(json_data, f, indent=4) return out_json
https://github.com/poldracklab/niworkflows/blob/254f4b4fcc5e6ecb29d2f4602a30786b913ecce5/niworkflows/interfaces/utils.py#L793-L864
convert json to csv
python
def export_json(self, filename): """ Export graph in JSON form to the given file. """ json_graph = self.to_json() with open(filename, 'wb') as f: f.write(json_graph.encode('utf-8'))
https://github.com/mdickinson/refcycle/blob/627fad74c74efc601209c96405f8118cd99b2241/refcycle/annotated_graph.py#L253-L260
convert json to csv
python
def to_json(self): """ Serializes the event to JSON. :returns: a string """ event_as_dict = copy.deepcopy(self.event_body) if self.timestamp: if "keen" in event_as_dict: event_as_dict["keen"]["timestamp"] = self.timestamp.isoformat() else: event_as_dict["keen"] = {"timestamp": self.timestamp.isoformat()} return json.dumps(event_as_dict)
https://github.com/keenlabs/KeenClient-Python/blob/266387c3376d1e000d117e17c45045ae3439d43f/keen/client.py#L33-L44