query
stringlengths
9
60
language
stringclasses
1 value
code
stringlengths
105
25.7k
url
stringlengths
91
217
write csv
python
def writeCSV(data, headers, csvFile): """Write data with column headers to a CSV.""" with open(csvFile, "wb") as f: writer = csv.writer(f, delimiter=",") writer.writerow(headers) writer.writerows(data)
https://github.com/numenta/htmresearch/blob/70c096b09a577ea0432c3f3bfff4442d4871b7aa/htmresearch/support/csv_helper.py#L147-L152
write csv
python
def writeCsv(self, path): """ Writes data one or many streams into one CSV file. :param path: absolute or relative file path """ self._createConfluence() with open(path, "w") as outputFile: writer = csv.writer(outputFile) headers = self.getStreamIds() fieldNames = ["timestamp"] + headers flags = ["T"] + ["" for h in headers] types = ["datetime"] + self._confluence.getDataTypes() writer.writerow(fieldNames) writer.writerow(types) writer.writerow(flags) for row in self._confluence: writer.writerow(row) print "Wrote CSV data to %s." % path
https://github.com/htm-community/menorah/blob/1991b01eda3f6361b22ed165b4a688ae3fb2deaf/menorah/menorah.py#L124-L141
write csv
python
def write_csv(header, contents, sep=",", filename="stdout", thousands=False, tee=False, align=True, comment=False): """ Write csv that are aligned with the column headers. >>> header = ["x_value", "y_value"] >>> contents = [(1, 100), (2, 200)] >>> write_csv(header, contents) x_value, y_value 1, 100 2, 200 """ from jcvi.formats.base import must_open formatted = load_csv(header, contents, sep=sep, thousands=thousands, align=align) if comment: formatted[0] = '#' + formatted[0][1:] formatted = "\n".join(formatted) fw = must_open(filename, "w") print(formatted, file=fw) if tee and filename != "stdout": print(formatted)
https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/utils/table.py#L111-L133
write csv
python
def csv_writer(parser, keep, extract, args): """Writes the data in CSV format.""" # The output output = sys.stdout if args.output == "-" else open(args.output, "w") try: # Getting the samples samples = np.array(parser.get_samples(), dtype=str) k = _get_sample_select(samples=samples, keep=keep) # Writing the CSV header print("sample_id", "variant_id", "chromosome", "position", "reference", "coded", "dosage", "hard_call", sep=",", file=output) # The data generator generator = _get_generator(parser=parser, extract=extract, keep=k, check_maf=args.maf) # The number of markers extracted nb_extracted = 0 for data in generator: # Keeping only the required genotypes genotypes = data.genotypes # The hard call mapping hard_call_mapping = { 0: "{ref}/{ref}".format(ref=data.reference), 1: "{ref}/{alt}".format(ref=data.reference, alt=data.coded), 2: "{alt}/{alt}".format(alt=data.coded), } for sample, geno in zip(samples[k], genotypes): # Is the genotype missing is_missing = np.isnan(geno) # Hard coding (NaN values are empty string) hard_coded = None if is_missing: geno = "" hard_coded = "" else: hard_coded = hard_call_mapping[int(round(geno, 0))] print(sample, data.variant.name, data.variant.chrom, data.variant.pos, data.reference, data.coded, geno, hard_coded, sep=",", file=output) nb_extracted += 1 if nb_extracted == 0: logger.warning("No markers matched the extract list") finally: output.close()
https://github.com/pgxcentre/geneparse/blob/f698f9708af4c7962d384a70a5a14006b1cb7108/geneparse/extract/__main__.py#L187-L241
write csv
python
def csv(file, *args, **kwargs): ''' Write CSV file. Parameters ---------- file : Path *args csv.DictWriter args (except the f arg) **kwargs csv.DictWriter args Examples -------- with write.csv(file) as writer: writer.writerow((1,2,3)) ''' with file.open('w', newline='') as f: yield DictWriter(f, *args, **kwargs)
https://github.com/timdiels/pytil/blob/086a3f8d52caecdd9d1c9f66c8d8a6d38667b00b/pytil/write.py#L29-L47
write csv
python
def write_to_csv(self, filename): ''' Exports to simple csv :param str filename: Path to file for export ''' fid = open(filename, 'wt') # Create header list header_info = ['Longitude', 'Latitude', 'Depth', 'Observed Count', 'Smoothed Rate', 'b-value'] writer = csv.DictWriter(fid, fieldnames=header_info) headers = dict((name0, name0) for name0 in header_info) # Write to file writer.writerow(headers) for row in self.data: # institute crude compression by omitting points with no seismicity # and taking advantage of the %g format if row[4] == 0: continue row_dict = {'Longitude': '%g' % row[0], 'Latitude': '%g' % row[1], 'Depth': '%g' % row[2], 'Observed Count': '%d' % row[3], 'Smoothed Rate': '%.6g' % row[4], 'b-value': '%g' % self.bval} writer.writerow(row_dict) fid.close()
https://github.com/gem/oq-engine/blob/8294553a0b8aba33fd96437a35065d03547d0040/openquake/hmtk/seismicity/smoothing/smoothed_seismicity.py#L491-L518
write csv
python
def csv(self, fh): """Writes the results data to `fh` in CSV format and returns `fh`. :param fh: file to write data to :type fh: file object :rtype: file object """ self._matches.to_csv(fh, encoding='utf-8', float_format='%d', index=False) return fh
https://github.com/ajenhl/tacl/blob/b8a343248e77f1c07a5a4ac133a9ad6e0b4781c2/tacl/results.py#L268-L278
write csv
python
def csv( self, filepath=None ): """*Render the data in CSV format* **Key Arguments:** - ``filepath`` -- path to the file to write the csv content to. Default *None* **Return:** - ``renderedData`` -- the data rendered in csv format **Usage:** To render the data set as csv: .. code-block:: python print dataSet.csv() .. code-block:: text owner,pet,address daisy,dog,"belfast, uk" john,snake,the moon susan,crocodile,larne and to save the csv rendering to file: .. code-block:: python dataSet.csv("/path/to/myfile.csv") """ self.log.debug('starting the ``csv`` method') renderedData = self._list_of_dictionaries_to_csv("machine") if filepath and renderedData != "NO MATCH": # RECURSIVELY CREATE MISSING DIRECTORIES if not os.path.exists(os.path.dirname(filepath)): os.makedirs(os.path.dirname(filepath)) writeFile = codecs.open(filepath, encoding='utf-8', mode='w') writeFile.write(renderedData) writeFile.close() self.log.debug('completed the ``csv`` method') return renderedData
https://github.com/thespacedoctor/fundamentals/blob/1d2c007ac74442ec2eabde771cfcacdb9c1ab382/fundamentals/renderer/list_of_dictionaries.py#L94-L142
write csv
python
def csv_writer(molecules, options, prefix): """ Write a csv file. """ # output file outdir = os.getcwd() filename = prefix + '.csv' outfile = os.path.join(outdir, filename) # initiate csv writer object f = open(outfile, 'w') csv_writer = csv.writer(f) # write csv header mol = molecules[0] write_csv_header(mol, csv_writer) # write csv lines for mol in molecules: write_csv_line(mol, csv_writer, options) # close file f.close()
https://github.com/rvswift/EB/blob/341880b79faf8147dc9fa6e90438531cd09fabcc/EB/builder/splitter/splitter.py#L90-L113
write csv
python
def csv(self, path, schema=None, sep=None, encoding=None, quote=None, escape=None, comment=None, header=None, inferSchema=None, ignoreLeadingWhiteSpace=None, ignoreTrailingWhiteSpace=None, nullValue=None, nanValue=None, positiveInf=None, negativeInf=None, dateFormat=None, timestampFormat=None, maxColumns=None, maxCharsPerColumn=None, maxMalformedLogPerPartition=None, mode=None, columnNameOfCorruptRecord=None, multiLine=None, charToEscapeQuoteEscaping=None, enforceSchema=None, emptyValue=None, locale=None, lineSep=None): r"""Loads a CSV file stream and returns the result as a :class:`DataFrame`. This function will go through the input once to determine the input schema if ``inferSchema`` is enabled. To avoid going through the entire data once, disable ``inferSchema`` option or specify the schema explicitly using ``schema``. .. note:: Evolving. :param path: string, or list of strings, for input path(s). :param schema: an optional :class:`pyspark.sql.types.StructType` for the input schema or a DDL-formatted string (For example ``col0 INT, col1 DOUBLE``). :param sep: sets a single character as a separator for each field and value. If None is set, it uses the default value, ``,``. :param encoding: decodes the CSV files by the given encoding type. If None is set, it uses the default value, ``UTF-8``. :param quote: sets a single character used for escaping quoted values where the separator can be part of the value. If None is set, it uses the default value, ``"``. If you would like to turn off quotations, you need to set an empty string. :param escape: sets a single character used for escaping quotes inside an already quoted value. If None is set, it uses the default value, ``\``. :param comment: sets a single character used for skipping lines beginning with this character. By default (None), it is disabled. :param header: uses the first line as names of columns. If None is set, it uses the default value, ``false``. :param inferSchema: infers the input schema automatically from data. It requires one extra pass over the data. If None is set, it uses the default value, ``false``. :param enforceSchema: If it is set to ``true``, the specified or inferred schema will be forcibly applied to datasource files, and headers in CSV files will be ignored. If the option is set to ``false``, the schema will be validated against all headers in CSV files or the first header in RDD if the ``header`` option is set to ``true``. Field names in the schema and column names in CSV headers are checked by their positions taking into account ``spark.sql.caseSensitive``. If None is set, ``true`` is used by default. Though the default value is ``true``, it is recommended to disable the ``enforceSchema`` option to avoid incorrect results. :param ignoreLeadingWhiteSpace: a flag indicating whether or not leading whitespaces from values being read should be skipped. If None is set, it uses the default value, ``false``. :param ignoreTrailingWhiteSpace: a flag indicating whether or not trailing whitespaces from values being read should be skipped. If None is set, it uses the default value, ``false``. :param nullValue: sets the string representation of a null value. If None is set, it uses the default value, empty string. Since 2.0.1, this ``nullValue`` param applies to all supported types including the string type. :param nanValue: sets the string representation of a non-number value. If None is set, it uses the default value, ``NaN``. :param positiveInf: sets the string representation of a positive infinity value. If None is set, it uses the default value, ``Inf``. :param negativeInf: sets the string representation of a negative infinity value. If None is set, it uses the default value, ``Inf``. :param dateFormat: sets the string that indicates a date format. Custom date formats follow the formats at ``java.time.format.DateTimeFormatter``. This applies to date type. If None is set, it uses the default value, ``yyyy-MM-dd``. :param timestampFormat: sets the string that indicates a timestamp format. Custom date formats follow the formats at ``java.time.format.DateTimeFormatter``. This applies to timestamp type. If None is set, it uses the default value, ``yyyy-MM-dd'T'HH:mm:ss.SSSXXX``. :param maxColumns: defines a hard limit of how many columns a record can have. If None is set, it uses the default value, ``20480``. :param maxCharsPerColumn: defines the maximum number of characters allowed for any given value being read. If None is set, it uses the default value, ``-1`` meaning unlimited length. :param maxMalformedLogPerPartition: this parameter is no longer used since Spark 2.2.0. If specified, it is ignored. :param mode: allows a mode for dealing with corrupt records during parsing. If None is set, it uses the default value, ``PERMISSIVE``. * ``PERMISSIVE`` : when it meets a corrupted record, puts the malformed string \ into a field configured by ``columnNameOfCorruptRecord``, and sets malformed \ fields to ``null``. To keep corrupt records, an user can set a string type \ field named ``columnNameOfCorruptRecord`` in an user-defined schema. If a \ schema does not have the field, it drops corrupt records during parsing. \ A record with less/more tokens than schema is not a corrupted record to CSV. \ When it meets a record having fewer tokens than the length of the schema, \ sets ``null`` to extra fields. When the record has more tokens than the \ length of the schema, it drops extra tokens. * ``DROPMALFORMED`` : ignores the whole corrupted records. * ``FAILFAST`` : throws an exception when it meets corrupted records. :param columnNameOfCorruptRecord: allows renaming the new field having malformed string created by ``PERMISSIVE`` mode. This overrides ``spark.sql.columnNameOfCorruptRecord``. If None is set, it uses the value specified in ``spark.sql.columnNameOfCorruptRecord``. :param multiLine: parse one record, which may span multiple lines. If None is set, it uses the default value, ``false``. :param charToEscapeQuoteEscaping: sets a single character used for escaping the escape for the quote character. If None is set, the default value is escape character when escape and quote characters are different, ``\0`` otherwise.. :param emptyValue: sets the string representation of an empty value. If None is set, it uses the default value, empty string. :param locale: sets a locale as language tag in IETF BCP 47 format. If None is set, it uses the default value, ``en-US``. For instance, ``locale`` is used while parsing dates and timestamps. :param lineSep: defines the line separator that should be used for parsing. If None is set, it covers all ``\\r``, ``\\r\\n`` and ``\\n``. Maximum length is 1 character. >>> csv_sdf = spark.readStream.csv(tempfile.mkdtemp(), schema = sdf_schema) >>> csv_sdf.isStreaming True >>> csv_sdf.schema == sdf_schema True """ self._set_opts( schema=schema, sep=sep, encoding=encoding, quote=quote, escape=escape, comment=comment, header=header, inferSchema=inferSchema, ignoreLeadingWhiteSpace=ignoreLeadingWhiteSpace, ignoreTrailingWhiteSpace=ignoreTrailingWhiteSpace, nullValue=nullValue, nanValue=nanValue, positiveInf=positiveInf, negativeInf=negativeInf, dateFormat=dateFormat, timestampFormat=timestampFormat, maxColumns=maxColumns, maxCharsPerColumn=maxCharsPerColumn, maxMalformedLogPerPartition=maxMalformedLogPerPartition, mode=mode, columnNameOfCorruptRecord=columnNameOfCorruptRecord, multiLine=multiLine, charToEscapeQuoteEscaping=charToEscapeQuoteEscaping, enforceSchema=enforceSchema, emptyValue=emptyValue, locale=locale, lineSep=lineSep) if isinstance(path, basestring): return self._df(self._jreader.csv(path)) else: raise TypeError("path can be only a single string")
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/sql/streaming.py#L575-L705
write csv
python
def write_csv(data, file_name, encoding='utf-8'): ''' Writes out to csv format. Args: data: 2D list of tables/worksheets. file_name: Name of the output file. ''' name_extension = len(data) > 1 root, ext = os.path.splitext(file_name) for i, sheet in enumerate(data): fname = file_name if not name_extension else root+"_"+str(i)+ext with open(fname, 'wb') as date_file: csv_file = csv.writer(date_file, encoding=encoding) for line in sheet: csv_file.writerow(line)
https://github.com/OpenGov/python_data_wrap/blob/7de38bb30d7a500adc336a4a7999528d753e5600/datawrap/tableloader.py#L363-L379
write csv
python
def csv(self, path, schema=None, sep=None, encoding=None, quote=None, escape=None, comment=None, header=None, inferSchema=None, ignoreLeadingWhiteSpace=None, ignoreTrailingWhiteSpace=None, nullValue=None, nanValue=None, positiveInf=None, negativeInf=None, dateFormat=None, timestampFormat=None, maxColumns=None, maxCharsPerColumn=None, maxMalformedLogPerPartition=None, mode=None, columnNameOfCorruptRecord=None, multiLine=None, charToEscapeQuoteEscaping=None, samplingRatio=None, enforceSchema=None, emptyValue=None, locale=None, lineSep=None): r"""Loads a CSV file and returns the result as a :class:`DataFrame`. This function will go through the input once to determine the input schema if ``inferSchema`` is enabled. To avoid going through the entire data once, disable ``inferSchema`` option or specify the schema explicitly using ``schema``. :param path: string, or list of strings, for input path(s), or RDD of Strings storing CSV rows. :param schema: an optional :class:`pyspark.sql.types.StructType` for the input schema or a DDL-formatted string (For example ``col0 INT, col1 DOUBLE``). :param sep: sets a single character as a separator for each field and value. If None is set, it uses the default value, ``,``. :param encoding: decodes the CSV files by the given encoding type. If None is set, it uses the default value, ``UTF-8``. :param quote: sets a single character used for escaping quoted values where the separator can be part of the value. If None is set, it uses the default value, ``"``. If you would like to turn off quotations, you need to set an empty string. :param escape: sets a single character used for escaping quotes inside an already quoted value. If None is set, it uses the default value, ``\``. :param comment: sets a single character used for skipping lines beginning with this character. By default (None), it is disabled. :param header: uses the first line as names of columns. If None is set, it uses the default value, ``false``. :param inferSchema: infers the input schema automatically from data. It requires one extra pass over the data. If None is set, it uses the default value, ``false``. :param enforceSchema: If it is set to ``true``, the specified or inferred schema will be forcibly applied to datasource files, and headers in CSV files will be ignored. If the option is set to ``false``, the schema will be validated against all headers in CSV files or the first header in RDD if the ``header`` option is set to ``true``. Field names in the schema and column names in CSV headers are checked by their positions taking into account ``spark.sql.caseSensitive``. If None is set, ``true`` is used by default. Though the default value is ``true``, it is recommended to disable the ``enforceSchema`` option to avoid incorrect results. :param ignoreLeadingWhiteSpace: A flag indicating whether or not leading whitespaces from values being read should be skipped. If None is set, it uses the default value, ``false``. :param ignoreTrailingWhiteSpace: A flag indicating whether or not trailing whitespaces from values being read should be skipped. If None is set, it uses the default value, ``false``. :param nullValue: sets the string representation of a null value. If None is set, it uses the default value, empty string. Since 2.0.1, this ``nullValue`` param applies to all supported types including the string type. :param nanValue: sets the string representation of a non-number value. If None is set, it uses the default value, ``NaN``. :param positiveInf: sets the string representation of a positive infinity value. If None is set, it uses the default value, ``Inf``. :param negativeInf: sets the string representation of a negative infinity value. If None is set, it uses the default value, ``Inf``. :param dateFormat: sets the string that indicates a date format. Custom date formats follow the formats at ``java.time.format.DateTimeFormatter``. This applies to date type. If None is set, it uses the default value, ``yyyy-MM-dd``. :param timestampFormat: sets the string that indicates a timestamp format. Custom date formats follow the formats at ``java.time.format.DateTimeFormatter``. This applies to timestamp type. If None is set, it uses the default value, ``yyyy-MM-dd'T'HH:mm:ss.SSSXXX``. :param maxColumns: defines a hard limit of how many columns a record can have. If None is set, it uses the default value, ``20480``. :param maxCharsPerColumn: defines the maximum number of characters allowed for any given value being read. If None is set, it uses the default value, ``-1`` meaning unlimited length. :param maxMalformedLogPerPartition: this parameter is no longer used since Spark 2.2.0. If specified, it is ignored. :param mode: allows a mode for dealing with corrupt records during parsing. If None is set, it uses the default value, ``PERMISSIVE``. * ``PERMISSIVE`` : when it meets a corrupted record, puts the malformed string \ into a field configured by ``columnNameOfCorruptRecord``, and sets malformed \ fields to ``null``. To keep corrupt records, an user can set a string type \ field named ``columnNameOfCorruptRecord`` in an user-defined schema. If a \ schema does not have the field, it drops corrupt records during parsing. \ A record with less/more tokens than schema is not a corrupted record to CSV. \ When it meets a record having fewer tokens than the length of the schema, \ sets ``null`` to extra fields. When the record has more tokens than the \ length of the schema, it drops extra tokens. * ``DROPMALFORMED`` : ignores the whole corrupted records. * ``FAILFAST`` : throws an exception when it meets corrupted records. :param columnNameOfCorruptRecord: allows renaming the new field having malformed string created by ``PERMISSIVE`` mode. This overrides ``spark.sql.columnNameOfCorruptRecord``. If None is set, it uses the value specified in ``spark.sql.columnNameOfCorruptRecord``. :param multiLine: parse records, which may span multiple lines. If None is set, it uses the default value, ``false``. :param charToEscapeQuoteEscaping: sets a single character used for escaping the escape for the quote character. If None is set, the default value is escape character when escape and quote characters are different, ``\0`` otherwise. :param samplingRatio: defines fraction of rows used for schema inferring. If None is set, it uses the default value, ``1.0``. :param emptyValue: sets the string representation of an empty value. If None is set, it uses the default value, empty string. :param locale: sets a locale as language tag in IETF BCP 47 format. If None is set, it uses the default value, ``en-US``. For instance, ``locale`` is used while parsing dates and timestamps. :param lineSep: defines the line separator that should be used for parsing. If None is set, it covers all ``\\r``, ``\\r\\n`` and ``\\n``. Maximum length is 1 character. >>> df = spark.read.csv('python/test_support/sql/ages.csv') >>> df.dtypes [('_c0', 'string'), ('_c1', 'string')] >>> rdd = sc.textFile('python/test_support/sql/ages.csv') >>> df2 = spark.read.csv(rdd) >>> df2.dtypes [('_c0', 'string'), ('_c1', 'string')] """ self._set_opts( schema=schema, sep=sep, encoding=encoding, quote=quote, escape=escape, comment=comment, header=header, inferSchema=inferSchema, ignoreLeadingWhiteSpace=ignoreLeadingWhiteSpace, ignoreTrailingWhiteSpace=ignoreTrailingWhiteSpace, nullValue=nullValue, nanValue=nanValue, positiveInf=positiveInf, negativeInf=negativeInf, dateFormat=dateFormat, timestampFormat=timestampFormat, maxColumns=maxColumns, maxCharsPerColumn=maxCharsPerColumn, maxMalformedLogPerPartition=maxMalformedLogPerPartition, mode=mode, columnNameOfCorruptRecord=columnNameOfCorruptRecord, multiLine=multiLine, charToEscapeQuoteEscaping=charToEscapeQuoteEscaping, samplingRatio=samplingRatio, enforceSchema=enforceSchema, emptyValue=emptyValue, locale=locale, lineSep=lineSep) if isinstance(path, basestring): path = [path] if type(path) == list: return self._df(self._jreader.csv(self._spark._sc._jvm.PythonUtils.toSeq(path))) elif isinstance(path, RDD): def func(iterator): for x in iterator: if not isinstance(x, basestring): x = unicode(x) if isinstance(x, unicode): x = x.encode("utf-8") yield x keyed = path.mapPartitions(func) keyed._bypass_serializer = True jrdd = keyed._jrdd.map(self._spark._jvm.BytesToString()) # see SPARK-22112 # There aren't any jvm api for creating a dataframe from rdd storing csv. # We can do it through creating a jvm dataset firstly and using the jvm api # for creating a dataframe from dataset storing csv. jdataset = self._spark._ssql_ctx.createDataset( jrdd.rdd(), self._spark._jvm.Encoders.STRING()) return self._df(self._jreader.csv(jdataset)) else: raise TypeError("path can be only string, list or RDD")
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/sql/readwriter.py#L352-L506
write csv
python
def writeCSV(self, fname = None, splitByTag = None, onlyTheseTags = None, numAuthors = True, genderCounts = True, longNames = False, firstTags = None, csvDelimiter = ',', csvQuote = '"', listDelimiter = '|'): """Writes all the `Records` from the collection into a csv file with each row a record and each column a tag. # Parameters _fname_ : `optional [str]` > Default `None`, the name of the file to write to, if `None` it uses the collections name suffixed by .csv. _splitByTag_ : `optional [str]` > Default `None`, if a tag is given the output will be divided into different files according to the value of the tag, with only the records associated with that tag. For example if `'authorsFull'` is given then each file will only have the lines for `Records` that author is named in. > The file names are the values of the tag followed by a dash then the normale name for the file as given by _fname_, e.g. for the year 2016 the file could be called `'2016-fname.csv'`. _onlyTheseTags_ : `optional [iterable]` > Default `None`, if an iterable (list, tuple, etc) only the tags in _onlyTheseTags_ will be used, if not given then all tags in the records are given. > If you want to use all known tags pass [metaknowledge.knownTagsList](./ExtendedRecord.html#metaknowledge.ExtendedRecord.tagProcessingFunc). _numAuthors_ : `optional [bool]` > Default `True`, if `True` adds the number of authors as the column `'numAuthors'`. _longNames_ : `optional [bool]` > Default `False`, if `True` will convert the tags to their longer names, otherwise the short 2 character ones will be used. _firstTags_ : `optional [iterable]` > Default `None`, if `None` the iterable `['UT', 'PT', 'TI', 'AF', 'CR']` is used. The tags given by the iterable are the first ones in the csv in the order given. > **Note** if tags are in _firstTags_ but not in _onlyTheseTags_, _onlyTheseTags_ will override _firstTags_ _csvDelimiter_ : `optional [str]` > Default `','`, the delimiter used for the cells of the csv file. _csvQuote_ : `optional [str]` > Default `'"'`, the quote character used for the csv. _listDelimiter_ : `optional [str]` > Default `'|'`, the delimiter used between values of the same cell if the tag for that record has multiple outputs. """ if firstTags is None: firstTags = ['id', 'title', 'authorsFull', 'citations', 'keywords', 'DOI'] for i in range(len(firstTags)): if firstTags[i] in fullToTagDict: firstTags[i] = fullToTagDict[firstTags[i]] if onlyTheseTags: for i in range(len(onlyTheseTags)): if onlyTheseTags[i] in fullToTagDict: onlyTheseTags[i] = fullToTagDict[onlyTheseTags[i]] retrievedFields = [t for t in firstTags if t in onlyTheseTags] + [t for t in onlyTheseTags if t not in firstTags] else: retrievedFields = firstTags for R in self: tagsLst = [t for t in R.keys() if t not in retrievedFields] retrievedFields += tagsLst if longNames: try: retrievedFields = [tagToFullDict[t] for t in retrievedFields] except KeyError: raise KeyError("One of the tags could not be converted to a long name.") if fname: baseFileName = fname else: baseFileName = "{}.csv".format(self.name[:200]) if numAuthors: csvWriterFields = retrievedFields + ["num-Authors"] else: csvWriterFields = retrievedFields if genderCounts: csvWriterFields += ['num-Male', 'num-Female', 'num-Unknown'] if splitByTag is None: f = open(baseFileName, mode = 'w', encoding = 'utf-8', newline = '') csvWriter = csv.DictWriter(f, csvWriterFields, delimiter = csvDelimiter, quotechar = csvQuote, quoting=csv.QUOTE_ALL) csvWriter.writeheader() else: filesDict = {} for R in self: if splitByTag: try: splitVal = R[splitByTag] except KeyError: continue else: if not isinstance(splitVal, list): splitVal = [str(splitVal)] recDict = {} for t in retrievedFields: value = R.get(t) if isinstance(value, str): recDict[t] = value elif hasattr(value, '__iter__'): recDict[t] = listDelimiter.join([str(v) for v in value]) elif value is None: recDict[t] = '' else: recDict[t] = str(value) if numAuthors: recDict["num-Authors"] = len(R.get('authorsShort', [])) if genderCounts: recDict['num-Male'], recDict['num-Female'], recDict['num-Unknown'] = R.authGenders(_countsTuple = True) if splitByTag: for sTag in splitVal: if sTag in filesDict: filesDict[sTag][1].writerow(recDict) else: fname = "{}-{}".format(sTag[:200], baseFileName) f = open(fname, mode = 'w', encoding = 'utf-8', newline = '') csvWriter = csv.DictWriter(f, csvWriterFields, delimiter = csvDelimiter, quotechar = csvQuote, quoting=csv.QUOTE_ALL) csvWriter.writeheader() csvWriter.writerow(recDict) filesDict[sTag] = (f, csvWriter) else: csvWriter.writerow(recDict) if splitByTag: for f, c in filesDict.values(): f.close() else: f.close()
https://github.com/networks-lab/metaknowledge/blob/8162bf95e66bb6f9916081338e6e2a6132faff75/metaknowledge/recordCollection.py#L247-L371
write csv
python
def csv(self, filename=None, **format_params): """Generates results in comma-separated form. Write to ``filename`` if given. Any other parameter will be passed on to ``csv.writer``. :param filename: if given, the CSV will be written to filename. Any additional keyword arguments will be passsed through to ``csv.writer``. """ if not self.pretty: return None # no results if filename: outfile = open(filename, 'w') else: outfile = StringIO() writer = UnicodeWriter(outfile, **format_params) writer.writerow(self.field_names) for row in self: writer.writerow(row) if filename: outfile.close() return CsvResultDescriptor(filename) else: return outfile.getvalue()
https://github.com/versae/ipython-cypher/blob/1e88bd8227743e70b78af42e0e713ae8803485e1/src/cypher/run.py#L424-L447
write csv
python
def _write_csv(self, datasets, filename): """ Write CSV :param datasets: Datasets :param filename: File Name """ with open('/'.join([self.output, filename]), mode='w', encoding=self.encoding) as write_file: writer = csv.writer(write_file, delimiter=',') for i, row in enumerate(datasets): if i == 0: # header writer.writerow(list(row.keys())) writer.writerow(list(row.values()))
https://github.com/Shinichi-Nakagawa/pitchpx/blob/5747402a0b3416f5e910b479e100df858f0b6440/pitchpx/mlbam.py#L132-L144
write csv
python
def write_csv(filename, data, delimiter=CSV_DELIMITER): """ Write image data to CSV file :param filename: name of CSV file to write data to :type filename: str :param data: image data to write to CSV file :type data: numpy array :param delimiter: delimiter used in CSV file. Default is ``;`` :type delimiter: str """ with open(filename, 'w') as file: csv_writer = csv.writer(file, delimiter=delimiter) for line in data: csv_writer.writerow(line)
https://github.com/sentinel-hub/sentinelhub-py/blob/08a83b7f1e289187159a643336995d8369860fea/sentinelhub/io_utils.py#L258-L271
write csv
python
def write_csv(dest, data, sep=',', fmt='%.6E', header=None, comment=None): """ :param dest: None, file, filename or io.BytesIO instance :param data: array to save :param sep: separator to use (default comma) :param fmt: formatting string (default '%12.8E') :param header: optional list with the names of the columns to display :param comment: optional first line starting with a # character """ close = True if dest is None: # write on a temporary file fd, dest = tempfile.mkstemp(suffix='.csv') os.close(fd) if hasattr(dest, 'write'): # file-like object in append mode # it must be closed by client code close = False elif not hasattr(dest, 'getvalue'): # not a BytesIO, assume dest is a filename dest = open(dest, 'wb') try: # see if data is a composite numpy array data.dtype.fields except AttributeError: # not a composite array autoheader = [] else: autoheader = build_header(data.dtype) if comment: dest.write(encode('# %s\n' % comment)) someheader = header or autoheader if header != 'no-header' and someheader: dest.write(encode(sep.join(htranslator.write(someheader)) + u'\n')) if autoheader: all_fields = [col.split(':', 1)[0].split('~') for col in autoheader] for record in data: row = [] for fields in all_fields: val = extract_from(record, fields) if fields[0] in ('lon', 'lat', 'depth'): row.append('%.5f' % val) else: row.append(scientificformat(val, fmt)) dest.write(encode(sep.join(row) + u'\n')) else: for row in data: dest.write(encode(sep.join(scientificformat(col, fmt) for col in row) + u'\n')) if hasattr(dest, 'getvalue'): return dest.getvalue()[:-1] # a newline is strangely added elif close: dest.close() return dest.name
https://github.com/gem/oq-engine/blob/8294553a0b8aba33fd96437a35065d03547d0040/openquake/commonlib/writers.py#L195-L253
write csv
python
def write(self, fname=None, filtered=False, header=True, append=False): r""" Write (processed) data to a specified comma-separated values (CSV) file. :param fname: Name of the comma-separated values file to be written. If None the file from which the data originated is overwritten :type fname: FileName_ :param filtered: Filtering type :type filtered: :ref:`CsvFiltered` :param header: If a list, column headers to use in the file. If boolean, flag that indicates whether the input column headers should be written (True) or not (False) :type header: string, list of strings or boolean :param append: Flag that indicates whether data is added to an existing file (or a new file is created if it does not exist) (True), or whether data overwrites the file contents (if the file exists) or creates a new file if the file does not exists (False) :type append: boolean .. [[[cog cog.out(exobj.get_sphinx_autodoc()) ]]] .. Auto-generated exceptions documentation for .. pcsv.csv_file.CsvFile.write :raises: * OSError (File *[fname]* could not be created: *[reason]*) * RuntimeError (Argument \`append\` is not valid) * RuntimeError (Argument \`filtered\` is not valid) * RuntimeError (Argument \`fname\` is not valid) * RuntimeError (Argument \`header\` is not valid) * RuntimeError (Argument \`no_empty\` is not valid) * ValueError (There is no data to save to file) .. [[[end]]] """ # pylint: disable=R0913 write_ex = pexdoc.exh.addex(ValueError, "There is no data to save to file") fname = self._fname if fname is None else fname data = self.data(filtered=filtered) write_ex((len(data) == 0) or ((len(data) == 1) and (len(data[0]) == 0))) if header: header = [header] if isinstance(header, str) else header cfilter = self._gen_col_index(filtered=filtered) filtered_header = ( [self._header[item] for item in cfilter] if self._has_header else cfilter ) file_header = filtered_header if isinstance(header, bool) else header # Convert None's to '' data = [["''" if item is None else item for item in row] for row in data] _write_int(fname, [file_header] + data if header else data, append=append)
https://github.com/pmacosta/pcsv/blob/cd1588c19b0cd58c38bc672e396db940f88ffbd7/pcsv/csv_file.py#L788-L849
write csv
python
def write(file_name, rows, header=None, *args, **kwargs): ''' Write rows data to a CSV file (with or without header) ''' warnings.warn("chirptext.io.CSV is deprecated and will be removed in near future.", DeprecationWarning) write_csv(file_name, rows, fieldnames=header, *args, **kwargs)
https://github.com/letuananh/chirptext/blob/ce60b47257b272a587c8703ea1f86cd1a45553a7/chirptext/chio.py#L216-L219
write csv
python
def _write_csv(self, df, csv_path, chunksize=10**5): """ Parameters ---------- df : pandas.DataFrame csv_path : str chunksize : int Number of rows to write at a time. Helps to limit memory consumption while writing a CSV. """ logger.info("Saving DataFrame to %s", csv_path) df.to_csv(csv_path, index=False, chunksize=chunksize)
https://github.com/openvax/pyensembl/blob/4b995fb72e848206d6fbf11950cf30964cd9b3aa/pyensembl/memory_cache.py#L74-L87
write csv
python
def write_csv(fileobj, rows, encoding=ENCODING, dialect=DIALECT): """Dump rows to ``fileobj`` with the given ``encoding`` and CSV ``dialect``.""" csvwriter = csv.writer(fileobj, dialect=dialect) csv_writerows(csvwriter, rows, encoding)
https://github.com/xflr6/gsheets/blob/ca4f1273044704e529c1138e3f942836fc496e1b/gsheets/export.py#L21-L24
write csv
python
def csv(ctx, dst): """ Flatten a coverage file into a CSV of source_context, testname """ sm = Smother.load(ctx.obj['report']) semantic = ctx.obj['semantic'] writer = _csv.writer(dst, lineterminator='\n') dst.write("source_context, test_context\n") writer.writerows(sm.iter_records(semantic=semantic))
https://github.com/ChrisBeaumont/smother/blob/65d1ea6ae0060d213b0dcbb983c5aa8e7fee07bb/smother/cli.py#L100-L109
write csv
python
def _write_cvvr(self, f, data): ''' Write compressed "data" variable to the end of the file in a CVVR ''' f.seek(0, 2) byte_loc = f.tell() cSize = len(data) block_size = CDF.CVVR_BASE_SIZE64 + cSize section_type = CDF.CVVR_ rfuA = 0 cvvr1 = bytearray(24) cvvr1[0:8] = struct.pack('>q', block_size) cvvr1[8:12] = struct.pack('>i', section_type) cvvr1[12:16] = struct.pack('>i', rfuA) cvvr1[16:24] = struct.pack('>q', cSize) f.write(cvvr1) f.write(data) return byte_loc
https://github.com/MAVENSDC/cdflib/blob/d237c60e5db67db0f92d96054209c25c4042465c/cdflib/cdfwrite.py#L1987-L2006
write csv
python
def csv(self, fh): """Writes the report data to `fh` in CSV format and returns it. :param fh: file to write data to :type fh: file object :rtype: file object """ self._stats.to_csv(fh, encoding='utf-8', index=False) return fh
https://github.com/ajenhl/tacl/blob/b8a343248e77f1c07a5a4ac133a9ad6e0b4781c2/tacl/statistics_report.py#L19-L28
write csv
python
def write(self, iterable): """Writes values from iterable into CSV file""" io_error_text = _("Error writing to file {filepath}.") io_error_text = io_error_text.format(filepath=self.path) try: with open(self.path, "wb") as csvfile: csv_writer = csv.writer(csvfile, self.dialect) for line in iterable: csv_writer.writerow( list(encode_gen(line, encoding=self.encoding))) except IOError: txt = \ _("Error opening file {filepath}.").format(filepath=self.path) try: post_command_event(self.main_window, self.StatusBarMsg, text=txt) except TypeError: # The main window does not exist any more pass return False
https://github.com/manns/pyspread/blob/0e2fd44c2e0f06605efc3058c20a43a8c1f9e7e0/pyspread/src/lib/__csv.py#L434-L459
write csv
python
def to_csv(self, path, mode=WRITE_MODE, dialect='excel', compression=None, newline='', **fmtparams): """ Saves the sequence to a csv file. Each element should be an iterable which will be expanded to the elements of each row. :param path: path to write file :param mode: file open mode :param dialect: passed to csv.writer :param fmtparams: passed to csv.writer """ if 'b' in mode: newline = None with universal_write_open(path, mode=mode, compression=compression, newline=newline) as output: csv_writer = csv.writer(output, dialect=dialect, **fmtparams) for row in self: csv_writer.writerow([six.u(str(element)) for element in row])
https://github.com/EntilZha/PyFunctional/blob/ac04e4a8552b0c464a7f492f7c9862424867b63e/functional/pipeline.py#L1525-L1544
write csv
python
def write_csv_header(mol, csv_writer): """ Write the csv header """ # create line list where line elements for writing will be stored line = [] # ID line.append('id') # status line.append('status') # query labels queryList = mol.properties.keys() for queryLabel in queryList: line.append(queryLabel) # write line csv_writer.writerow(line)
https://github.com/rvswift/EB/blob/341880b79faf8147dc9fa6e90438531cd09fabcc/EB/builder/splitter/splitter.py#L37-L57
write csv
python
def csv(self, output): """Output data as excel-compatible CSV""" import csv csvwriter = csv.writer(self.outfile) csvwriter.writerows(output)
https://github.com/juju/charm-helpers/blob/aa785c40c3b7a8c69dbfbc7921d6b9f30142e171/charmhelpers/cli/__init__.py#L75-L79
write csv
python
def write_csv_files(args, infilenames, outfilename): """Write csv file(s) to disk. Keyword arguments: args -- program arguments (dict) infilenames -- names of user-inputted and/or downloaded files (list) outfilename -- name of output text file (str) """ def csv_convert(line): """Strip punctuation and insert commas""" clean_line = [] for word in line.split(' '): clean_line.append(word.strip(string.punctuation)) return ', '.join(clean_line) if not outfilename.endswith('.csv'): outfilename = outfilename + '.csv' outfilename = overwrite_file_check(args, outfilename) all_text = [] # Text must be aggregated if writing to a single output file for i, infilename in enumerate(infilenames): parsed_text = get_parsed_text(args, infilename) if parsed_text: if args['multiple']: if not args['quiet']: print('Attempting to write to {0}.'.format(outfilename)) csv_text = [csv_convert(x) for x in parsed_text] print(csv_text) write_file(csv_text, outfilename) elif args['single']: all_text += parsed_text # Newline added between multiple files being aggregated if len(infilenames) > 1 and i < len(infilenames) - 1: all_text.append('\n') # Write all text to a single output file if args['single'] and all_text: if not args['quiet']: print('Attempting to write {0} page(s) to {1}.' .format(len(infilenames), outfilename)) csv_text = [csv_convert(x) for x in all_text] print(csv_text) write_file(csv_text, outfilename)
https://github.com/huntrar/scrape/blob/bf877f6da5df3ed0f2bea60a95acf7df63c88002/scrape/utils.py#L578-L622
write csv
python
def _write_data_csv(csv_data): """ CSV data has been parsed by this point, so take it and write it file by file. :return: """ logger_excel.info("enter write_data_csv") # Loop for each file and data that is stored for file in csv_data: for filename, data in file.items(): # Make sure we're working with the right data types before trying to open and write a file if isinstance(filename, str) and isinstance(data, list): try: with open(filename, 'w+') as f: w = csv.writer(f) for line in data: w.writerow(line) except Exception: logger_excel.debug("write_data_csv: Unable to open/write file: {}".format(filename)) logger_excel.info("exit write_data_csv") return
https://github.com/nickmckay/LiPD-utilities/blob/5dab6bbeffc5effd68e3a6beaca6b76aa928e860/Python/lipd/excel.py#L1107-L1127
write csv
python
def write_csv_to_file(d): """ Writes columns of data to a target CSV file. :param dict d: A dictionary containing one list for every data column. Keys: int, Values: list :return None: """ logger_csvs.info("enter write_csv_to_file") try: for filename, data in d.items(): try: l_columns = _reorder_csv(data, filename) rows = zip(*l_columns) with open(filename, 'w+') as f: w = csv.writer(f) for row in rows: row2 = decimal_precision(row) w.writerow(row2) except TypeError as e: print("Error: Unable to write values to CSV file, {}:\n" "(1) The data table may have 2 or more identical variables. Please correct the LiPD file manually\n" "(2) There may have been an error trying to prep the values for file write. The 'number' field in the data columns may be a 'string' instead of an 'integer' data type".format(filename)) print(e) except Exception as e: print("Error: CSV file not written, {}, {}:\n" "The data table may have 2 or more identical variables. Please correct the LiPD file manually".format(filename, e)) except AttributeError as e: logger_csvs.error("write_csv_to_file: Unable to write CSV File: {}".format(e, exc_info=True)) logger_csvs.info("exit write_csv_to_file") return
https://github.com/nickmckay/LiPD-utilities/blob/5dab6bbeffc5effd68e3a6beaca6b76aa928e860/Python/lipd/csvs.py#L247-L277
write csv
python
def write_csv(self, path=None): """Write CSV file. Sorts the sections before calling the superclass write_csv""" # Sort the Sections self.sort_sections(['Root', 'Contacts', 'Documentation', 'References', 'Resources', 'Citations', 'Schema']) # Sort Terms in the root section # Re-wrap the description and abstract if self.description: self.description = self.description if self.abstract: self.description = self.abstract t = self['Root'].get_or_new_term('Root.Modified') t.value = datetime_now() self.sort_by_term() return super().write_csv(str(path))
https://github.com/Metatab/metapack/blob/8365f221fbeaa3c0be9091f2eaf3447fd8e2e8d6/metapack/doc.py#L283-L304
write csv
python
def writer(f): '''CSV writer factory for CADA format''' return unicodecsv.writer(f, encoding='utf-8', delimiter=b',', quotechar=b'"')
https://github.com/etalab/cada/blob/36e8b57514445c01ff7cd59a1c965180baf83d5e/cada/csv.py#L35-L37
write csv
python
def write_csvs(dirname: PathLike, adata: AnnData, skip_data: bool = True, sep: str = ','): """See :meth:`~anndata.AnnData.write_csvs`. """ dirname = Path(dirname) if dirname.suffix == '.csv': dirname = dirname.with_suffix('') logger.info("writing '.csv' files to %s", dirname) if not dirname.is_dir(): dirname.mkdir(parents=True, exist_ok=True) dir_uns = dirname / 'uns' if not dir_uns.is_dir(): dir_uns.mkdir(parents=True, exist_ok=True) d = dict( obs=adata._obs, var=adata._var, obsm=adata._obsm.to_df(), varm=adata._varm.to_df(), ) if not skip_data: d['X'] = pd.DataFrame( adata._X.toarray() if issparse(adata._X) else adata._X) d_write = {**d, **adata._uns} not_yet_raised_sparse_warning = True for key, value in d_write.items(): if issparse(value): if not_yet_raised_sparse_warning: warnings.warn('Omitting to write sparse annotation.') not_yet_raised_sparse_warning = False continue filename = dirname if key not in {'X', 'var', 'obs', 'obsm', 'varm'}: filename = dir_uns filename /= '{}.csv'.format(key) df = value if not isinstance(value, pd.DataFrame): value = np.array(value) if np.ndim(value) == 0: value = value[None] try: df = pd.DataFrame(value) except Exception as e: warnings.warn('Omitting to write {!r}.'.format(key), type(e)) continue df.to_csv( filename, sep=sep, header=key in {'obs', 'var', 'obsm', 'varm'}, index=key in {'obs', 'var'}, )
https://github.com/theislab/anndata/blob/34f4eb63710628fbc15e7050e5efcac1d7806062/anndata/readwrite/write.py#L20-L67
write csv
python
def write_csv(path, rows, dialect='excel', fieldnames=None, quoting=csv.QUOTE_ALL, extrasaction='ignore', *args, **kwargs): ''' Write rows data to a CSV file (with or without fieldnames) ''' if not quoting: quoting = csv.QUOTE_MINIMAL if 'lineterminator' not in kwargs: kwargs['lineterminator'] = '\n' # use \n to fix double-line in Windows with open(path, mode='wt', newline='') as csvfile: if fieldnames: writer = csv.DictWriter(csvfile, fieldnames=fieldnames, dialect=dialect, quoting=quoting, extrasaction=extrasaction, *args, **kwargs) writer.writeheader() for row in rows: writer.writerow(row) else: writer = csv.writer(csvfile, dialect=dialect, quoting=quoting, *args, **kwargs) for row in rows: writer.writerow(row)
https://github.com/letuananh/chirptext/blob/ce60b47257b272a587c8703ea1f86cd1a45553a7/chirptext/chio.py#L177-L192
write csv
python
def write(self, file_or_filename): """ Writes case data as CSV. """ if isinstance(file_or_filename, basestring): file = open(file_or_filename, "wb") else: file = file_or_filename self.writer = csv.writer(file) super(CSVWriter, self).write(file)
https://github.com/rwl/pylon/blob/916514255db1ae1661406f0283df756baf960d14/pylon/io/excel.py#L112-L122
write csv
python
def write_data_csv(fname, frames, preproc): """Write data to csv file""" fdata = open(fname, "w") dr = Parallel()(delayed(get_data)(lst,preproc) for lst in frames) data,result = zip(*dr) for entry in data: fdata.write(','.join(entry)+'\r\n') print("All finished, %d slices in total" % len(data)) fdata.close() result = np.ravel(result) return result
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/example/kaggle-ndsb2/Preprocessing.py#L94-L104
write csv
python
def write_csv(path, data): """This function writes comma-separated <data> to <path>. Parameter <path> is either a pathname or a file-like object that supports the |write()| method.""" fd = _try_open_file(path, 'w', 'The first argument must be a pathname or an object that supports write() method') for v in data: fd.write(",".join([str(x) for x in v])) fd.write("\n") _try_close_file(fd, path)
https://github.com/ska-sa/purr/blob/4c848768d0485d0f88b30850d0d5372221b21b66/Purr/Plugins/local_pychart/chart_data.py#L258-L268
write csv
python
def write_to_csv(self, filename=None, extension='.dat', overwrite=False, stride=1, chunksize=None, **kw): """ write all data to csv with numpy.savetxt Parameters ---------- filename : str, optional filename string, which may contain placeholders {itraj} and {stride}: * itraj will be replaced by trajetory index * stride is stride argument of this method If filename is not given, it is being tried to obtain the filenames from the data source of this iterator. extension : str, optional, default='.dat' filename extension of created files overwrite : bool, optional, default=False shall existing files be overwritten? If a file exists, this method will raise. stride : int omit every n'th frame chunksize: int, default=None how many frames to process at once kw : dict, optional named arguments passed into numpy.savetxt (header, seperator etc.) Example ------- Assume you want to save features calculated by some FeatureReader to ASCII: >>> import numpy as np, pyemma >>> import os >>> from pyemma.util.files import TemporaryDirectory >>> from pyemma.util.contexts import settings >>> data = [np.random.random((10,3))] * 3 >>> reader = pyemma.coordinates.source(data) >>> filename = "distances_{itraj}.dat" >>> with TemporaryDirectory() as td, settings(show_progress_bars=False): ... out = os.path.join(td, filename) ... reader.write_to_csv(out, header='', delimiter=';') ... print(sorted(os.listdir(td))) ['distances_0.dat', 'distances_1.dat', 'distances_2.dat'] """ import os if not filename: assert hasattr(self, 'filenames') # raise RuntimeError("could not determine filenames") filenames = [] for f in self.filenames: base, _ = os.path.splitext(f) filenames.append(base + extension) elif isinstance(filename, str): filename = filename.replace('{stride}', str(stride)) filenames = [filename.replace('{itraj}', str(itraj)) for itraj in range(self.number_of_trajectories())] else: raise TypeError("filename should be str or None") self.logger.debug("write_to_csv, filenames=%s" % filenames) # check files before starting to write import errno for f in filenames: try: st = os.stat(f) raise OSError(errno.EEXIST) except OSError as e: if e.errno == errno.EEXIST: if overwrite: continue elif e.errno == errno.ENOENT: continue raise f = None from pyemma._base.progress import ProgressReporter pg = ProgressReporter() it = self.iterator(stride, chunk=chunksize, return_trajindex=False) pg.register(it.n_chunks, "saving to csv") with it, pg.context(): oldtraj = -1 for X in it: if oldtraj != it.current_trajindex: if f is not None: f.close() fn = filenames[it.current_trajindex] self.logger.debug("opening file %s for writing csv." % fn) f = open(fn, 'wb') oldtraj = it.current_trajindex np.savetxt(f, X, **kw) f.flush() pg.update(1, 0) if f is not None: f.close()
https://github.com/markovmodel/PyEMMA/blob/5c3124398217de05ba5ce9c8fb01519222481ab8/pyemma/coordinates/data/_base/datasource.py#L519-L608
write csv
python
def writeToCSV(self, output_path=None): '''writeToCSV - write the telemetry dictionary to csv ''' header = ['Name', 'First Byte', 'Last Byte', 'Bit Mask', 'Endian', 'Type', 'Description', 'Values'] if output_path is None: output_path = ait.config._directory for pkt_name in self.tlmdict: filename = os.path.join(output_path, pkt_name + '.csv') with open(filename, 'wb') as output: csvwriter = csv.writer(output, quoting=csv.QUOTE_ALL) csvwriter.writerow(header) for fld in self.tlmdict[pkt_name].fields: # Pre-process some fields # Description desc = fld.desc.replace('\n', ' ') if fld.desc is not None else "" # Mask mask = hex(fld.mask) if fld.mask is not None else "" # Enumerations enums = '\n'.join("%s: %s" % (k, fld.enum[k]) for k in fld.enum) if fld.enum is not None else "" # Set row row = [fld.name, fld.slice().start, fld.slice().stop, mask, fld.type.endian, fld.type.name, desc, enums] csvwriter.writerow(row)
https://github.com/NASA-AMMOS/AIT-Core/blob/9d85bd9c738e7a6a6fbdff672bea708238b02a3a/ait/core/tlm.py#L1076-L1109
write csv
python
def write_text_rows(writer, rows): '''Write CSV row data which may include text.''' for row in rows: try: writer.writerow(row) except UnicodeEncodeError: # Python 2 csv does badly with unicode outside of ASCII new_row = [] for item in row: if isinstance(item, text_type): new_row.append(item.encode('utf-8')) else: new_row.append(item) writer.writerow(new_row)
https://github.com/tulsawebdevs/django-multi-gtfs/blob/8c442bfb67e87566c24a7364d8fa0aacd4a0a652/multigtfs/compat.py#L91-L104
write csv
python
def write(self, data, method='w'): """ Export data to CSV file. :param data: Either a list of tuples or a list of lists. :param method: File opening method. """ # Create list of lists from flat list data = data if isinstance(data[0], (list, set, tuple)) else [[d] for d in data] # Open file and write rows with open(self.file_path, method) as write: wr = csv_builtin.writer(write) wr.writerows(data) return self.file_path
https://github.com/mrstephenneal/databasetools/blob/e515c568e80fe990c192eb7df0094ad2f474ee67/databasetools/csv.py#L10-L24
write csv
python
def csv(self, text=TEXT, sep=',', index=True, float_fmt="%.2g"): """ Generate a CSV table from the table data. """ return self._data.to_csv(sep=sep, index=index, float_format=float_fmt)
https://github.com/dhondta/tinyscript/blob/624a0718db698899e7bc3ba6ac694baed251e81d/tinyscript/report/__init__.py#L191-L193
write csv
python
def write(self): """Write csv file of resolved names and txt file of unresolved names. """ csv_file = os.path.join(self.outdir, 'search_results.csv') txt_file = os.path.join(self.outdir, 'unresolved.txt') headers = self.key_terms unresolved = [] with open(csv_file, 'w') as file: writer = csv.writer(file) writer.writerow(headers) for key in list(self._store.keys()): results = self._store[key] if len(results) == 0: unresolved.append(key) else: row = [key] for key_term in headers[1:]: element = results[0][key_term] # GNR returns UTF-8, csv requires ascii # # *** Note *** # According to all docs for csv versions >= 2.6, csv # can handle either UTF-8 or ascii, just not Unicode. # In py3, the following two lines result in csv printing # the element with a bitstring. If GNR is actually # returning UTF-8, it seems easiest to just drop these # if 'encode' in dir(element): # element = element.encode('ascii') row.append(element) writer.writerow(row) if len(unresolved) > 0: with open(txt_file, 'w') as file: for name in unresolved: file.write("{0}\n".format(name))
https://github.com/DomBennett/TaxonNamesResolver/blob/a2556cc0f8b7442d83990715c92fdf6f787e1f41/taxon_names_resolver/resolver.py#L224-L258
write csv
python
def csv(self, path, mode=None, compression=None, sep=None, quote=None, escape=None, header=None, nullValue=None, escapeQuotes=None, quoteAll=None, dateFormat=None, timestampFormat=None, ignoreLeadingWhiteSpace=None, ignoreTrailingWhiteSpace=None, charToEscapeQuoteEscaping=None, encoding=None, emptyValue=None, lineSep=None): r"""Saves the content of the :class:`DataFrame` in CSV format at the specified path. :param path: the path in any Hadoop supported file system :param mode: specifies the behavior of the save operation when data already exists. * ``append``: Append contents of this :class:`DataFrame` to existing data. * ``overwrite``: Overwrite existing data. * ``ignore``: Silently ignore this operation if data already exists. * ``error`` or ``errorifexists`` (default case): Throw an exception if data already \ exists. :param compression: compression codec to use when saving to file. This can be one of the known case-insensitive shorten names (none, bzip2, gzip, lz4, snappy and deflate). :param sep: sets a single character as a separator for each field and value. If None is set, it uses the default value, ``,``. :param quote: sets a single character used for escaping quoted values where the separator can be part of the value. If None is set, it uses the default value, ``"``. If an empty string is set, it uses ``u0000`` (null character). :param escape: sets a single character used for escaping quotes inside an already quoted value. If None is set, it uses the default value, ``\`` :param escapeQuotes: a flag indicating whether values containing quotes should always be enclosed in quotes. If None is set, it uses the default value ``true``, escaping all values containing a quote character. :param quoteAll: a flag indicating whether all values should always be enclosed in quotes. If None is set, it uses the default value ``false``, only escaping values containing a quote character. :param header: writes the names of columns as the first line. If None is set, it uses the default value, ``false``. :param nullValue: sets the string representation of a null value. If None is set, it uses the default value, empty string. :param dateFormat: sets the string that indicates a date format. Custom date formats follow the formats at ``java.time.format.DateTimeFormatter``. This applies to date type. If None is set, it uses the default value, ``yyyy-MM-dd``. :param timestampFormat: sets the string that indicates a timestamp format. Custom date formats follow the formats at ``java.time.format.DateTimeFormatter``. This applies to timestamp type. If None is set, it uses the default value, ``yyyy-MM-dd'T'HH:mm:ss.SSSXXX``. :param ignoreLeadingWhiteSpace: a flag indicating whether or not leading whitespaces from values being written should be skipped. If None is set, it uses the default value, ``true``. :param ignoreTrailingWhiteSpace: a flag indicating whether or not trailing whitespaces from values being written should be skipped. If None is set, it uses the default value, ``true``. :param charToEscapeQuoteEscaping: sets a single character used for escaping the escape for the quote character. If None is set, the default value is escape character when escape and quote characters are different, ``\0`` otherwise.. :param encoding: sets the encoding (charset) of saved csv files. If None is set, the default UTF-8 charset will be used. :param emptyValue: sets the string representation of an empty value. If None is set, it uses the default value, ``""``. :param lineSep: defines the line separator that should be used for writing. If None is set, it uses the default value, ``\\n``. Maximum length is 1 character. >>> df.write.csv(os.path.join(tempfile.mkdtemp(), 'data')) """ self.mode(mode) self._set_opts(compression=compression, sep=sep, quote=quote, escape=escape, header=header, nullValue=nullValue, escapeQuotes=escapeQuotes, quoteAll=quoteAll, dateFormat=dateFormat, timestampFormat=timestampFormat, ignoreLeadingWhiteSpace=ignoreLeadingWhiteSpace, ignoreTrailingWhiteSpace=ignoreTrailingWhiteSpace, charToEscapeQuoteEscaping=charToEscapeQuoteEscaping, encoding=encoding, emptyValue=emptyValue, lineSep=lineSep) self._jwrite.csv(path)
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/sql/readwriter.py#L874-L945
write csv
python
def make_csv(self): """ Get the text representation of a report element as csv. """ import csv try: from StringIO import StringIO # Python 2.7 except ImportError: from io import StringIO out = StringIO() writer = csv.writer(out, delimiter='|', lineterminator='\n', quoting=csv.QUOTE_MINIMAL) if self.function == 'total': writer.writerows(self.results) elif self.function == 'top': rows = [['Value', self.headers.strip('"')]] if self.results[0] is not None: for res in self.results: if res is not None: rows.append(tuple([res[0], ','.join(res[1])])) writer.writerows(rows) elif self.function == 'table': rows = [[header.strip('"') for header in re.split('\s*,\s*', self.headers)]] for res in sorted(self.results, key=lambda x: x[0]): row = list(res[:-1]) lastcol = get_fmt_results(res[-1], limit=10) if lastcol[-1][0] == '[' and lastcol[-1][-1] == ']': row.append(u'{0} {1}'.format(u', '.join(lastcol[:-1]), lastcol[-1])) else: row.append(u', '.join(lastcol)) rows.append(row) writer.writerows(rows) self.csv = out.getvalue()
https://github.com/brunato/lograptor/blob/b1f09fe1b429ed15110610092704ef12d253f3c9/lograptor/report.py#L360-L397
write csv
python
def csv_to_obj(cls, file_path=None, text='', columns=None, remove_empty_rows=True, key_on=None, deliminator=',', eval_cells=True): """ This will convert a csv file or csv text into a seaborn table and return it :param file_path: str of the path to the file :param text: str of the csv text :param columns: list of str of columns to use :param remove_empty_rows: bool if True will remove empty rows which can happen in non-trimmed file :param key_on: list of str of columns to key on :param deliminator: str to use as a deliminator, defaults to , :param eval_cells: bool if True will try to evaluate numbers :return: SeabornTable """ lines = cls._get_lines(file_path, text, replace=u'\ufeff') for i in range(len(lines)): lines[i] = lines[i].replace('\r', '\n') lines[i] = lines[i].replace('\\r', '\r').split(',') data = cls._merge_quoted_cells(lines, deliminator, remove_empty_rows, eval_cells) row_columns = data[0] if len(row_columns) != len(set(row_columns)): # make unique for i, col in enumerate(row_columns): count = row_columns[:i].count(col) row_columns[i] = '%s_%s' % (col, count) if count else col return cls.list_to_obj(data[1:], columns=columns, row_columns=row_columns, key_on=key_on)
https://github.com/SeabornGames/Table/blob/0c474ef2fb00db0e7cf47e8af91e3556c2e7485a/seaborn_table/table.py#L244-L273
write csv
python
def save_csv(self, csv_location): # type: (str) -> None """ Save the csv to a file """ with open(csv_location, 'w') as csv_handle: writer = csv.writer(csv_handle) for row in self.csv_data: writer.writerow(row)
https://github.com/albertyw/csv-ical/blob/cdb55a226cd0cb6cc214d896a6cea41a5b92c9ed/csv_ical/convert.py#L92-L97
write csv
python
def to_csv(self, filename: str, latexify_names: bool = False): """ Exports PDEntries to a csv Args: filename: Filename to write to. entries: PDEntries to export. latexify_names: Format entry names to be LaTex compatible, e.g., Li_{2}O """ elements = set() for entry in self.entries: elements.update(entry.composition.elements) elements = sorted(list(elements), key=lambda a: a.X) writer = csv.writer(open(filename, "w"), delimiter=unicode2str(","), quotechar=unicode2str("\""), quoting=csv.QUOTE_MINIMAL) writer.writerow(["Name"] + elements + ["Energy"]) for entry in self.entries: row = [entry.name if not latexify_names else re.sub(r"([0-9]+)", r"_{\1}", entry.name)] row.extend([entry.composition[el] for el in elements]) row.append(entry.energy) writer.writerow(row)
https://github.com/materialsproject/pymatgen/blob/4ca558cf72f8d5f8a1f21dfdfc0181a971c186da/pymatgen/entries/entry_tools.py#L232-L256
write csv
python
def _write_vvr(self, f, data): ''' Writes a vvr to the end of file "f" with the byte stream "data". ''' f.seek(0, 2) byte_loc = f.tell() block_size = CDF.VVR_BASE_SIZE64 + len(data) section_type = CDF.VVR_ vvr1 = bytearray(12) vvr1[0:8] = struct.pack('>q', block_size) vvr1[8:12] = struct.pack('>i', section_type) f.write(vvr1) f.write(data) return byte_loc
https://github.com/MAVENSDC/cdflib/blob/d237c60e5db67db0f92d96054209c25c4042465c/cdflib/cdfwrite.py#L1948-L1963
write csv
python
def to_csv(data, field_names=None, filename='data.csv', overwrite=True, write_headers=True, append=False, flat=True, primary_fields=None, sort_fields=True): """ DEPRECATED Write a list of dicts to a csv file :param data: List of dicts :param field_names: The list column names :param filename: The name of the file :param overwrite: Overwrite the file if exists :param write_headers: Write the headers to the csv file :param append: Write new rows if the file exists :param flat: Flatten the dictionary before saving :param primary_fields: The first columns of the csv file :param sort_fields: Sort the field names alphabetically :return: None """ # Don't overwrite if not specified if not overwrite and path.isfile(filename): raise FileExistsError('The file already exists') # Replace file if append not specified write_type = 'w' if not append else 'a' # Flatten if flat is specified, or there are no predefined field names if flat or not field_names: data = [flatten(datum) for datum in data] # Fill in gaps between dicts with empty string if not field_names: field_names, data = fill_gaps(data) # Sort fields if specified if sort_fields: field_names.sort() # If there are primary fields, move the field names to the front and sort # based on first field if primary_fields: for key in primary_fields[::-1]: field_names.insert(0, field_names.pop(field_names.index(key))) data = sorted(data, key=lambda k: k[field_names[0]], reverse=True) # Write the file with open(filename, write_type, encoding='utf-8') as f: writer = csv.DictWriter(f, fieldnames=field_names, lineterminator='\n') if not append or write_headers: writer.writeheader() # Write rows containing fields in field names for datum in data: for key in list(datum.keys()): if key not in field_names: del datum[key] elif type(datum[key]) is str: datum[key] = datum[key].strip() datum[key] = str(datum[key]) writer.writerow(datum)
https://github.com/ScriptSmith/socialreaper/blob/87fcc3b74bbed6c4f8e7f49a5f0eb8a616cf38da/socialreaper/tools.py#L132-L194
write csv
python
def write(self, fname, append=True): """ Write detection to csv formatted file. Will append if append==True and file exists :type fname: str :param fname: Full path to file to open and write to. :type append: bool :param append: Set to true to append to an existing file, if True \ and file doesn't exist, will create new file and warn. If False will overwrite old files. """ mode = 'w' if append and os.path.isfile(fname): mode = 'a' header = '; '.join(['Template name', 'Detection time (UTC)', 'Number of channels', 'Channel list', 'Detection value', 'Threshold', 'Threshold type', 'Input threshold', 'Detection type']) print_str = "{0}; {1}; {2}; {3}; {4}; {5}; {6}; {7}; {8}\n".format( self.template_name, self.detect_time, self.no_chans, self.chans, self.detect_val, self.threshold, self.threshold_type, self.threshold_input, self.typeofdet) with open(fname, mode) as _f: _f.write(header + '\n') # Write a header for the file _f.write(print_str)
https://github.com/eqcorrscan/EQcorrscan/blob/3121b4aca801ee5d38f56ca297ce1c0f9515d9ff/eqcorrscan/core/match_filter.py#L3139-L3166
write csv
python
def write_csv(filename, header, data=None, rows=None, mode="w"): """Write the data to the specified filename Usage ----- >>> write_csv(filename, header, data, mode=mode) Parameters ---------- filename : str The name of the file header : list of strings The names of the columns (or fields): (fieldname1, fieldname2, ...) data : list of dictionaries (optional) [ {fieldname1: a1, fieldname2: a2}, {fieldname1: b1, fieldname2: b2}, ... ] rows : list of lists (optional) [ (a1, a2), (b1, b2), ... ] mode : str (optional) "w": write the data to the file by overwriting it "a": write the data to the file by appending them Returns ------- None. A CSV file is written. """ if data == rows == None: msg = "You must specify either data or rows" raise ValueError(msg) elif data != None and rows != None: msg = "You must specify either data or rows. Not both" raise ValueError(msg) data_header = dict((x, x) for x in header) with open(filename, mode) as f: if data: writer = csv.DictWriter(f, fieldnames=header) if mode == "w": writer.writerow(data_header) writer.writerows(data) elif rows: writer = csv.writer(f) if mode == "w": writer.writerow(header) writer.writerows(rows) print "Saved %s." % filename
https://github.com/TaurusOlson/incisive/blob/25bb9f53495985c1416c82e26f54158df4050cb0/incisive/core.py#L91-L152
write csv
python
def writerow(self, row): """Write row to the csv file. Any unicode strings in row are encoded as utf-8.""" encoded_row = [] for s in row: if isinstance(s, unicode): encoded_row.append(s.encode("utf-8")) else: encoded_row.append(s) try: self.writer.writerow(encoded_row) except Exception as e: print('error writing %s as %s' % (row, encoded_row)) raise e
https://github.com/google/transitfeed/blob/eb2991a3747ba541b2cb66502b305b6304a1f85f/transitfeed/util.py#L549-L562
write csv
python
def write_csvs(self, asset_map, show_progress=False, invalid_data_behavior='warn'): """Read CSVs as DataFrames from our asset map. Parameters ---------- asset_map : dict[int -> str] A mapping from asset id to file path with the CSV data for that asset show_progress : bool Whether or not to show a progress bar while writing. invalid_data_behavior : {'warn', 'raise', 'ignore'} What to do when data is encountered that is outside the range of a uint32. """ read = partial( read_csv, parse_dates=['day'], index_col='day', dtype=self._csv_dtypes, ) return self.write( ((asset, read(path)) for asset, path in iteritems(asset_map)), assets=viewkeys(asset_map), show_progress=show_progress, invalid_data_behavior=invalid_data_behavior, )
https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/data/bcolz_daily_bars.py#L209-L237
write csv
python
def csv(self): """ Returns the security rules as a CSV. CSV format: - id - name - description - rules_direction - rules_ip_protocol - rules_from_port - rules_to_port - rules_grants_group_id - rules_grants_name - rules_grants_cidr_ip - rules_description Returns: str """ # Generate a csv file in memory with all the data in output = StringIO.StringIO() fieldnames = ['id', 'name', 'description', 'rules_direction', 'rules_ip_protocol', 'rules_from_port', 'rules_to_port', 'rules_grants_group_id', 'rules_grants_name', 'rules_grants_cidr_ip', 'rules_description'] writer = csv.DictWriter(output, fieldnames=fieldnames) writer.writeheader() for fr in self.rules: writer.writerow(fr.as_dict()) # Get the CSV in a string csv_content = output.getvalue() # Removing some useless newline at the end stripped_csv_content = csv_content.strip() return stripped_csv_content
https://github.com/percolate/ec2-security-groups-dumper/blob/f2a40d1b3802211c85767fe74281617e4dbae543/ec2_security_groups_dumper/main.py#L232-L276
write csv
python
def teecsv(table, source=None, encoding=None, errors='strict', write_header=True, **csvargs): """ Returns a table that writes rows to a CSV file as they are iterated over. """ source = write_source_from_arg(source) csvargs.setdefault('dialect', 'excel') return teecsv_impl(table, source=source, encoding=encoding, errors=errors, write_header=write_header, **csvargs)
https://github.com/petl-developers/petl/blob/1d33ca055f7e04e0d28a772041c9fd30c8d415d6/petl/io/csv.py#L169-L180
write csv
python
def _write_int(fname, data, append=True): """Write data to CSV file with validation.""" # pylint: disable=W0705 data_ex = pexdoc.exh.addex(ValueError, "There is no data to save to file") fos_ex = pexdoc.exh.addex( OSError, "File *[fname]* could not be created: *[reason]*" ) data_ex((len(data) == 0) or ((len(data) == 1) and (len(data[0]) == 0))) try: pmisc.make_dir(fname) mode = "w" if append is False else "a" if sys.hexversion < 0x03000000: # pragma: no cover, no branch with open(fname, mode) as file_handle: csv.writer(file_handle, delimiter=",").writerows(data) else: # pragma: no cover with open(fname, mode, newline="") as file_handle: csv.writer(file_handle, delimiter=",").writerows(data) except (IOError, OSError) as eobj: fos_ex(True, _MF("fname", fname, "reason", eobj.strerror))
https://github.com/pmacosta/pcsv/blob/cd1588c19b0cd58c38bc672e396db940f88ffbd7/pcsv/write.py#L37-L55
write csv
python
def _write_csv(f, table_): """Write the given table (list of dicts) to the given file as CSV. Writes UTF8-encoded, CSV-formatted text. ``f`` could be an opened file, sys.stdout, or a StringIO. """ fieldnames = table_[0].keys() set_fieldname = set(table_[0].keys()) # go through all the fields and find all the field names for row in table_: set_fieldname.update(set(row.keys())) # append the additonal fields sorted onto the end additional_fields = sorted(set_fieldname - set(table_[0].keys())) fieldnames += additional_fields writer = unicodecsv.DictWriter(f, fieldnames, encoding='utf-8') writer.writeheader() # Change lists into comma-separated strings. for dict_ in table_: for key, value in dict_.items(): if type(value) in (list, tuple): dict_[key] = ', '.join([unicode(v) for v in value]) writer.writerows(table_)
https://github.com/ckan/losser/blob/fd0832d9fa93cabe9ce9a9153dc923f2cf39cb5f/losser/losser.py#L46-L73
write csv
python
def WriteVcard(filename, vcard, fopen=codecs.open): """Writes a vCard into the given filename.""" if os.access(filename, os.F_OK): logger.warning('File exists at "{}", skipping.'.format(filename)) return False try: with fopen(filename, 'w', encoding='utf-8') as f: logger.debug('Writing {}:\n{}'.format(filename, u(vcard.serialize()))) f.write(u(vcard.serialize())) except OSError: logger.error('Error writing to file "{}", skipping.'.format(filename)) return False return True
https://github.com/dmwilcox/vcard-tools/blob/1b0f62a0f4c128c7a212ecdca34ff2acb746b262/vcardtools/vcf_splitter.py#L138-L150
write csv
python
def write_csv_line(mol, csv_writer, options): """ Parse mol object and write a line to the csv file """ # set variables status_field = options.status_field # elements for writing will be stored in the line list line = [] # ID id = mol.GetProp('id') if id is not None: line.append(id) else: line.append('n/a') # status line.append(mol.GetProp(status_field)) # query labels queryList = mol.properties.keys() for queryLabel in queryList: line.append(mol.properties[queryLabel]) # write line csv_writer.writerow(line)
https://github.com/rvswift/EB/blob/341880b79faf8147dc9fa6e90438531cd09fabcc/EB/builder/splitter/splitter.py#L60-L87
write csv
python
def rec2csv(r, filename): """Export a recarray *r* to a CSV file *filename*""" names = r.dtype.names def translate(x): if x is None or str(x).lower == "none": x = "" return str(x) with open(filename, "w") as csv: csv.write(",".join([str(x) for x in names])+"\n") for data in r: csv.write(",".join([translate(x) for x in data])+"\n") #print "Wrote CSV table %r" % filename return filename
https://github.com/orbeckst/RecSQL/blob/6acbf821022361719391697c9c2f0822f9f8022a/recsql/export.py#L24-L36
write csv
python
def _ReadCSV(self, file_name, cols, required, deprecated): """Reads lines from file_name, yielding a list of unicode values corresponding to the column names in cols.""" contents = self._GetUtf8Contents(file_name) if not contents: return eol_checker = util.EndOfLineChecker(StringIO.StringIO(contents), file_name, self._problems) reader = csv.reader(eol_checker) # Use excel dialect header = next(reader) header = map(lambda x: x.strip(), header) # trim any whitespace header_occurrences = util.defaultdict(lambda: 0) for column_header in header: header_occurrences[column_header] += 1 for name, count in header_occurrences.items(): if count > 1: self._problems.DuplicateColumn( header=name, file_name=file_name, count=count) # check for unrecognized columns, which are often misspellings header_context = (file_name, 1, [''] * len(header), header) valid_cols = cols + [deprecated_name for (deprecated_name, _) in deprecated] unknown_cols = set(header).difference(set(valid_cols)) for col in unknown_cols: # this is provided in order to create a nice colored list of # columns in the validator output self._problems.UnrecognizedColumn(file_name, col, header_context) # check for missing required columns col_index = [-1] * len(cols) for i in range(len(cols)): if cols[i] in header: col_index[i] = header.index(cols[i]) elif cols[i] in required: self._problems.MissingColumn(file_name, cols[i], header_context) # check for deprecated columns for (deprecated_name, new_name) in deprecated: if deprecated_name in header: self._problems.DeprecatedColumn(file_name, deprecated_name, new_name, header_context) row_num = 1 for row in reader: row_num += 1 if len(row) == 0: # skip extra empty lines in file continue if len(row) > len(header): self._problems.OtherProblem('Found too many cells (commas) in line ' '%d of file "%s". Every row in the file ' 'should have the same number of cells as ' 'the header (first line) does.' % (row_num, file_name), (file_name, row_num), type=problems.TYPE_WARNING) if len(row) < len(header): self._problems.OtherProblem('Found missing cells (commas) in line ' '%d of file "%s". Every row in the file ' 'should have the same number of cells as ' 'the header (first line) does.' % (row_num, file_name), (file_name, row_num), type=problems.TYPE_WARNING) result = [None] * len(cols) unicode_error_columns = [] # A list of column numbers with an error for i in range(len(cols)): ci = col_index[i] if ci >= 0: if len(row) <= ci: # handle short CSV rows result[i] = u'' else: try: result[i] = row[ci].decode('utf-8').strip() except UnicodeDecodeError: # Replace all invalid characters with # REPLACEMENT CHARACTER (U+FFFD) result[i] = codecs.getdecoder("utf8")(row[ci], errors="replace")[0].strip() unicode_error_columns.append(i) for i in unicode_error_columns: self._problems.InvalidValue(cols[i], result[i], 'Unicode error', (file_name, row_num, result, cols)) yield (result, row_num, cols)
https://github.com/google/transitfeed/blob/eb2991a3747ba541b2cb66502b305b6304a1f85f/transitfeed/loader.py#L283-L373
write csv
python
def writeFromDict(dataDict, headers, csvFile): """ Write dictionary to a CSV, where keys are row numbers and values are a list. """ with open(csvFile, "wb") as f: writer = csv.writer(f, delimiter=",") writer.writerow(headers) for row in sorted(dataDict.keys()): writer.writerow(dataDict[row])
https://github.com/numenta/htmresearch/blob/70c096b09a577ea0432c3f3bfff4442d4871b7aa/htmresearch/support/csv_helper.py#L155-L163
write csv
python
def csv(self, **kwargs): """ Export this table as a CSV """ out = StringIO() writer = csv.DictWriter(out, self.columns, **kwargs) writer.writerow(dict(zip(self.columns, self.columns))) writer.writerows(dict(row.items()) for row in self.rows) return out
https://github.com/eyeseast/python-tablefu/blob/d8761c1f87e3f89d9b89b0b6b9283fc4738b6676/table_fu/__init__.py#L260-L269
write csv
python
def _write_csv(filepath, data, kwargs): """See documentation of mpu.io.write.""" kwargs_open = {'newline': ''} mode = 'w' if sys.version_info < (3, 0): kwargs_open.pop('newline', None) mode = 'wb' with open(filepath, mode, **kwargs_open) as fp: if 'delimiter' not in kwargs: kwargs['delimiter'] = ',' if 'quotechar' not in kwargs: kwargs['quotechar'] = '"' with open(filepath, 'w') as fp: writer = csv.writer(fp, **kwargs) writer.writerows(data) return data
https://github.com/MartinThoma/mpu/blob/61bc36d0192ca90c0bcf9b8a5d7d0d8520e20ff6/mpu/io.py#L174-L189
write csv
python
def vcf_writer(parser, keep, extract, args): """Writes the data in VCF format.""" # The output output = sys.stdout if args.output == "-" else open(args.output, "w") try: # Getting the samples samples = np.array(parser.get_samples(), dtype=str) k = _get_sample_select(samples=samples, keep=keep) # Writing the VCF header output.write(_VCF_HEADER.format( date=datetime.today().strftime("%Y%m%d"), version=__version__, samples="\t".join(samples[k]), )) # The data generator generator = _get_generator(parser=parser, extract=extract, keep=k, check_maf=args.maf) # The number of markers extracted nb_extracted = 0 for data in generator: # Keeping only the required genotypes genotypes = data.genotypes # Computing the alternative allele frequency af = np.nanmean(genotypes) / 2 print(data.variant.chrom, data.variant.pos, data.variant.name, data.reference, data.coded, ".", "PASS", "AF={}".format(af), "GT:DS", sep="\t", end="", file=output) for geno in genotypes: if np.isnan(geno): output.write("\t./.:.") else: rounded_geno = int(round(geno, 0)) output.write("\t{}:{}".format( _VCF_GT_MAP[rounded_geno], geno, )) output.write("\n") nb_extracted += 1 if nb_extracted == 0: logger.warning("No markers matched the extract list") finally: output.close()
https://github.com/pgxcentre/geneparse/blob/f698f9708af4c7962d384a70a5a14006b1cb7108/geneparse/extract/__main__.py#L133-L184
write csv
python
def write(self, handle: TextIO, blank_lines: int = 1) -> None: """Write this :class:`SampleSheet` to a file-like object. Args: handle: Object to wrap by csv.writer. blank_lines: Number of blank lines to write between sections. """ if not isinstance(blank_lines, int) or blank_lines <= 0: raise ValueError('Number of blank lines must be a positive int.') writer = csv.writer(handle) csv_width: int = max([len(self.all_sample_keys), 2]) section_order = ['Header', 'Reads'] + self._sections + ['Settings'] def pad_iterable( iterable: Iterable, size: int = csv_width, padding: str = '' ) -> List[str]: return list(islice(chain(iterable, repeat(padding)), size)) def write_blank_lines( writer: Any, n: int = blank_lines, width: int = csv_width ) -> None: for i in range(n): writer.writerow(pad_iterable([], width)) for title in section_order: writer.writerow(pad_iterable([f'[{title}]'], csv_width)) section = getattr(self, title) if title == 'Reads': for read in self.Reads: writer.writerow(pad_iterable([read], csv_width)) else: for key, value in section.items(): writer.writerow(pad_iterable([key, value], csv_width)) write_blank_lines(writer) writer.writerow(pad_iterable(['[Data]'], csv_width)) writer.writerow(pad_iterable(self.all_sample_keys, csv_width)) for sample in self.samples: line = [getattr(sample, key) for key in self.all_sample_keys] writer.writerow(pad_iterable(line, csv_width))
https://github.com/clintval/sample-sheet/blob/116ac6f26f6e61b57716c90f6e887d3d457756f3/sample_sheet/__init__.py#L865-L908
write csv
python
def create_csv(filename, csv_data, mode="w"): """ Create a CSV file with the given data and store it in the file with the given name. :param filename: name of the file to store the data in :pram csv_data: the data to be stored in the file :param mode: the mode in which we have to open the file. It can be 'w', 'a', etc. Default is 'w' """ with open(filename, mode) as f: csv_data.replace("_", r"\_") f.write(csv_data)
https://github.com/chaoss/grimoirelab-manuscripts/blob/94a3ad4f11bfbcd6c5190e01cb5d3e47a5187cd9/manuscripts2/report.py#L58-L71
write csv
python
def to_csv(self, output_file=None, *, fields=None, fields_to_explode=None, append=False, header=True, header_prefix='', sep=',', newline='\n'): """ Parameters ---------- output_file: str or file object or None The file to which output will be written. By default, any existing content is overwritten. Use `append=True` to open the file in append mode instead. If `output_file` is None, the generated CSV output is returned as a string instead of written to a file. fields: list or dict List of field names to export, or dictionary mapping output column names to attribute names of the generators. Examples: fields=['field_name_1', 'field_name_2'] fields={'COL1': 'field_name_1', 'COL2': 'field_name_2'} fields_to_explode: list Optional list of field names where each entry (which must itself be a sequence) is to be "exploded" into separate rows. (*Note:* this is not supported yet for CSV export.) append: bool If `True`, open the file in 'append' mode to avoid overwriting existing content. Default is `False`, i.e. any existing content will be overwritten. This argument only has an effect if `output_file` is given (i.e. if output happens to a file instead of returning a CSV string). header: bool or str or None If `header=False` or `header=None` then no header line will be written. If `header` is a string then this string will be used as the header line. If `header=True` then a header line will be automatically generated from the field names of the custom generator. header_prefix: str If `header=True` then the auto-generated header line will be prefixed with `header_prefix` (otherwise this argument has no effect). For example, set `header_prefix='#'` to make the header line start with '#'. Default: '' sep: str Field separator to use in the output. Default: ',' newline: str Line terminator to use in the output. Default: '\n' Returns ------- The return value depends on the value of `output_file`. If `output_file` is given, writes the output to the file and returns `None`. If `output_file` is `None`, returns a string containing the CSV output. """ assert isinstance(append, bool) if fields is None: raise NotImplementedError("TODO: derive field names automatically from the generator which produced this item list") if fields_to_explode is not None: raise NotImplementedError("TODO: the 'fields_to_explode' argument is not supported for CSV export yet.") if isinstance(fields, (list, tuple)): fields = {name: name for name in fields} header_line = _generate_csv_header_line(header=header, header_prefix=header_prefix, header_names=fields.keys(), sep=sep, newline=newline) if output_file is None: file_or_string = io.StringIO() elif isinstance(output_file, str): mode = 'a' if append else 'w' file_or_string = open(output_file, mode) # ensure parent directory of output file exits dirname = os.path.dirname(os.path.abspath(output_file)) if not os.path.exists(dirname): logger.debug(f"Creating parent directory of output file '{output_file}'") os.makedirs(dirname) elif isinstance(output_file, io.IOBase): file_or_string = output_file else: raise TypeError(f"Invalid output file: {output_file} (type: {type(output_file)})") retval = None attr_getters = [attrgetter(attr_name) for attr_name in fields.values()] try: # TODO: quick-and-dirty solution to enable writing to gzip files; tidy this up! # (Note that for regular file output we don't want to encode each line to a bytes # object because this seems to be ca. 2x slower). if isinstance(file_or_string, gzip.GzipFile): file_or_string.write(header_line.encode()) for x in self.items: line = sep.join([format(func(x)) for func in attr_getters]) + newline file_or_string.write(line.encode()) else: file_or_string.write(header_line) for x in self.items: line = sep.join([format(func(x)) for func in attr_getters]) + newline file_or_string.write(line) if output_file is None: retval = file_or_string.getvalue() finally: file_or_string.close() return retval
https://github.com/maxalbert/tohu/blob/43380162fadec99cdd5c5c3152dd6b7d3a9d39a8/tohu/v6/item_list.py#L182-L280
write csv
python
def printcsv(csvdiffs): """print the csv""" for row in csvdiffs: print(','.join([str(cell) for cell in row]))
https://github.com/santoshphilip/eppy/blob/55410ff7c11722f35bc4331ff5e00a0b86f787e1/eppy/useful_scripts/idfdiff.py#L137-L140
write csv
python
def gcs_write(self, log, remote_log_location, append=True): """ Writes the log to the remote_log_location. Fails silently if no hook was created. :param log: the log to write to the remote_log_location :type log: str :param remote_log_location: the log's location in remote storage :type remote_log_location: str (path) :param append: if False, any existing log file is overwritten. If True, the new log is appended to any existing logs. :type append: bool """ if append: try: old_log = self.gcs_read(remote_log_location) log = '\n'.join([old_log, log]) if old_log else log except Exception as e: if not hasattr(e, 'resp') or e.resp.get('status') != '404': log = '*** Previous log discarded: {}\n\n'.format(str(e)) + log try: bkt, blob = self.parse_gcs_url(remote_log_location) from tempfile import NamedTemporaryFile with NamedTemporaryFile(mode='w+') as tmpfile: tmpfile.write(log) # Force the file to be flushed, since we're doing the # upload from within the file context (it hasn't been # closed). tmpfile.flush() self.hook.upload(bkt, blob, tmpfile.name) except Exception as e: self.log.error('Could not write logs to %s: %s', remote_log_location, e)
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/utils/log/gcs_task_handler.py#L132-L163
write csv
python
def _write_metadata(self): """ _write_metadata: Writes node metadata to csv file Args: None Returns: None """ string_buffer = StringIO() writer = csv.writer(string_buffer, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL) writer.writerow(['Path *', 'Title *', 'Source ID', 'Description', 'Author', 'Language', 'License ID *', 'License Description', 'Copyright Holder', 'Thumbnail']) for k in self.map: node = self.map[k] writer.writerow([k, node['title'], node['source_id'], node['description'], node['author'], node['language'], node['license'], node['license_description'], node['copyright_holder'], node['thumbnail']]) self.zf.writestr('Content.csv', string_buffer.getvalue())
https://github.com/learningequality/ricecooker/blob/2f0385282500cb77ef2894646c6f9ce11bd7a853/ricecooker/utils/data_writer.py#L84-L96
write csv
python
def write_dicts_to_csv(self, dicts): """Saves .csv file with posts data :param dicts: Dictionaries with same values """ csv_headers = sorted(dicts[0].keys()) with open(self.path, "w") as out_file: # write to file dict_writer = csv.DictWriter( out_file, csv_headers, delimiter=",", quotechar="\"" ) dict_writer.writeheader() dict_writer.writerows(dicts)
https://github.com/sirfoga/pyhal/blob/4394d8a1f7e45bea28a255ec390f4962ee64d33a/hal/files/save_as.py#L18-L29
write csv
python
def write_text(self, text: str, *, encoding='utf-8', append=True): ''' write text into the file. ''' mode = 'a' if append else 'w' return self.write(text, mode=mode, encoding=encoding)
https://github.com/Cologler/fsoopify-python/blob/83d45f16ae9abdea4fcc829373c32df501487dda/fsoopify/nodes.py#L160-L163
write csv
python
def WriteRow(stream, values): "Writes one row of comma-separated values to stream." stream.write(','.join([EncodeForCSV(val) for val in values])) stream.write('\n')
https://github.com/google/transitfeed/blob/eb2991a3747ba541b2cb66502b305b6304a1f85f/misc/import_ch_zurich.py#L123-L126
write csv
python
def csv(args): """ %prog csv excelfile Convert EXCEL to csv file. """ from xlrd import open_workbook p = OptionParser(csv.__doc__) p.set_sep(sep=',') opts, args = p.parse_args(args) if len(args) != 1: sys.exit(not p.print_help()) excelfile, = args sep = opts.sep csvfile = excelfile.rsplit(".", 1)[0] + ".csv" wb = open_workbook(excelfile) fw = open(csvfile, "w") for s in wb.sheets(): print('Sheet:',s.name, file=sys.stderr) for row in range(s.nrows): values = [] for col in range(s.ncols): values.append(s.cell(row, col).value) print(sep.join(str(x) for x in values), file=fw)
https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/formats/excel.py#L162-L188
write csv
python
def write_text(self, name, s): """Write string data to cur_dir/name using write_file().""" buf = io.BytesIO(compat.to_bytes(s)) self.write_file(name, buf)
https://github.com/mar10/pyftpsync/blob/bbdc94186975cdc1cc4f678474bdce08bce7bb76/ftpsync/targets.py#L315-L318
write csv
python
def save_csv(self, filename, write_header_separately=True): """ save the default array as a CSV file """ txt = '' #print("SAVING arr = ", self.arr) with open(filename, "w") as f: if write_header_separately: f.write(','.join([c for c in self.header]) + '\n') for row in self.arr: #print('save_csv: saving row = ', row) txt = ','.join([self.force_to_string(col) for col in row]) #print(txt) f.write(txt + '\n') f.write('\n')
https://github.com/acutesoftware/AIKIF/blob/fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03/aikif/dataTools/cls_datatable.py#L241-L256
write csv
python
def to_csv(self, filename=None, *, fields=None, append=False, header=True, header_prefix='', sep=',', newline='\n'): """ Parameters ---------- filename: str or None The file to which output will be written. By default, any existing content is overwritten. Use `append=True` to open the file in append mode instead. If filename is None, the generated CSV output is returned instead of written to a file. fields: list or dict List of field names to export, or dictionary mapping output column names to attribute names of the generators. Examples: fields=['field_name_1', 'field_name_2'] fields={'COL1': 'field_name_1', 'COL2': 'field_name_2'} append: bool If `True`, open the file in 'append' mode to avoid overwriting existing content. Default is `False`, i.e. any existing content will be overwritten. This argument only has an effect if `filename` is given (i.e. if output happens to a file instead of returning a CSV string). header: bool or str or None If `header=False` or `header=None` then no header line will be written. If `header` is a string then this string will be used as the header line. If `header=True` then a header line will be automatically generated from the field names of the custom generator. header_prefix: str If `header=True` then the auto-generated header line will be prefixed with `header_prefix` (otherwise this argument has no effect). For example, set `header_prefix='#'` to make the header line start with '#'. Default: '' sep: str Field separator to use in the output. Default: ',' newline: str Line terminator to use in the output. Default: '\n' Returns ------- The return value depends on the value of `filename`. If `filename` is given, writes the output to the file and returns `None`. If `filename` is `None`, returns a string containing the CSV output. """ assert isinstance(append, bool) if fields is None: raise NotImplementedError("TODO: derive field names automatically from the generator which produced this item list") if isinstance(fields, (list, tuple)): fields = {name: name for name in fields} header_line = _generate_csv_header_line(header=header, header_prefix=header_prefix, header_names=fields.keys(), sep=sep, newline=newline) if filename is not None: # ensure parent directory of output file exits dirname = os.path.dirname(os.path.abspath(filename)) if not os.path.exists(dirname): os.makedirs(dirname) file_or_string = open(filename, 'a' if append else 'w') if (filename is not None) else io.StringIO() retval = None attr_getters = [attrgetter(attr_name) for attr_name in fields.values()] try: file_or_string.write(header_line) for x in self.items: line = sep.join([format(func(x)) for func in attr_getters]) + newline file_or_string.write(line) if filename is None: retval = file_or_string.getvalue() finally: file_or_string.close() return retval
https://github.com/maxalbert/tohu/blob/43380162fadec99cdd5c5c3152dd6b7d3a9d39a8/tohu/v4/item_list.py#L132-L206
write csv
python
def csv_from_items(items, stream=None): """Write a list of items to stream in CSV format. The items need to be attrs-decorated. """ items = iter(items) first = next(items) cls = first.__class__ if stream is None: stream = sys.stdout fields = [f.name for f in attr.fields(cls)] writer = csv.DictWriter(stream, fieldnames=fields) writer.writeheader() writer.writerow(attr.asdict(first)) writer.writerows((attr.asdict(x) for x in items))
https://github.com/numirias/firefed/blob/908114fe3a1506dcaafb23ce49e99f171e5e329d/firefed/feature/feature.py#L132-L146
write csv
python
def csv_format(csv_data, c_headers=None, r_headers=None, rows=None, **kwargs): """ Format csv rows parsed to Dict or Array """ result = None c_headers = [] if c_headers is None else c_headers r_headers = [] if r_headers is None else r_headers rows = [] if rows is None else rows result_format = kwargs.get('result_format', ARRAY_RAW_FORMAT) # DICT FORMAT if result_format == DICT_FORMAT: result = csv_dict_format(csv_data, c_headers, r_headers) # ARRAY_RAW_FORMAT elif result_format == ARRAY_RAW_FORMAT: result = rows # ARRAY_CLEAN_FORMAT elif result_format == ARRAY_CLEAN_FORMAT: result = csv_array_clean_format(csv_data, c_headers, r_headers) else: result = None # DEFAULT if result and result_format < DICT_FORMAT: result = [result] return result
https://github.com/Datary/scrapbag/blob/3a4f9824ab6fe21121214ba9963690618da2c9de/scrapbag/csvs.py#L385-L415
write csv
python
def format_csv(self, delim=',', qu='"'): """ Prepares the data in CSV format """ res = qu + self.name + qu + delim if self.data: for d in self.data: res += qu + str(d) + qu + delim return res + '\n'
https://github.com/acutesoftware/AIKIF/blob/fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03/aikif/core_data.py#L40-L48
write csv
python
def csv(self, sep=',', branches=None, include_labels=True, limit=None, stream=None): """ Print csv representation of tree only including branches of basic types (no objects, vectors, etc..) Parameters ---------- sep : str, optional (default=',') The delimiter used to separate columns branches : list, optional (default=None) Only include these branches in the CSV output. If None, then all basic types will be included. include_labels : bool, optional (default=True) Include a first row of branch names labelling each column. limit : int, optional (default=None) Only include up to a maximum of ``limit`` rows in the CSV. stream : file, (default=None) Stream to write the CSV output on. By default the CSV will be written to ``sys.stdout``. """ supported_types = (Scalar, Array, stl.string) if stream is None: stream = sys.stdout if not self._buffer: self.create_buffer(ignore_unsupported=True) if branches is None: branchdict = OrderedDict([ (name, self._buffer[name]) for name in self.iterbranchnames() if isinstance(self._buffer[name], supported_types)]) else: branchdict = OrderedDict() for name in branches: if not isinstance(self._buffer[name], supported_types): raise TypeError( "selected branch `{0}` " "is not a scalar or array type".format(name)) branchdict[name] = self._buffer[name] if not branchdict: raise RuntimeError( "no branches selected or no " "branches of scalar or array types exist") if include_labels: # expand array types to f[0],f[1],f[2],... print(sep.join( name if isinstance(value, (Scalar, BaseChar, stl.string)) else sep.join('{0}[{1:d}]'.format(name, idx) for idx in range(len(value))) for name, value in branchdict.items()), file=stream) # even though 'entry' is not used, enumerate or simply iterating over # self is required to update the buffer with the new branch values at # each tree entry. for i, entry in enumerate(self): line = [] for value in branchdict.values(): if isinstance(value, (Scalar, BaseChar)): token = str(value.value) elif isinstance(value, stl.string): token = str(value) else: token = sep.join(map(str, value)) line.append(token) print(sep.join(line), file=stream) if limit is not None and i + 1 == limit: break
https://github.com/rootpy/rootpy/blob/3926935e1f2100d8ba68070c2ab44055d4800f73/rootpy/tree/tree.py#L511-L582
write csv
python
def _parsecsv(x): """Deserialize file-like object containing csv to a Python generator. """ for line in x: # decode as utf-8, whitespace-strip and split on delimiter yield line.decode('utf-8').strip().split(config.DELIMITER)
https://github.com/gisgroup/statbank-python/blob/3678820d8da35f225d706ea5096c1f08bf0b9c68/statbank/request.py#L62-L67
write csv
python
def encode_csv(data_dict, column_names): """Builds a csv string. Args: data_dict: dict of {column_name: 1 value} column_names: list of column names Returns: A csv string version of data_dict """ import csv import six values = [str(data_dict[x]) for x in column_names] str_buff = six.StringIO() writer = csv.writer(str_buff, lineterminator='') writer.writerow(values) return str_buff.getvalue()
https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/solutionbox/ml_workbench/tensorflow/transform.py#L373-L389
write csv
python
def _get_new_csv_writers(trans_title, meta_title, trans_csv_path, meta_csv_path): """ Prepare new csv writers, write title rows and return them. """ trans_writer = UnicodeWriter(trans_csv_path) trans_writer.writerow(trans_title) meta_writer = UnicodeWriter(meta_csv_path) meta_writer.writerow(meta_title) return trans_writer, meta_writer
https://github.com/VorskiImagineering/C3PO/blob/e3e35835e5ac24158848afed4f905ca44ac3ae00/c3po/converters/po_csv.py#L32-L43
write csv
python
def from_csv(self, csv_source, delimiter=","): """ Set tabular attributes to the writer from a character-separated values (CSV) data source. Following attributes are set to the writer by the method: - :py:attr:`~.headers`. - :py:attr:`~.value_matrix`. :py:attr:`~.table_name` also be set if the CSV data source is a file. In that case, :py:attr:`~.table_name` is as same as the filename. :param str csv_source: Input CSV data source either can be designated CSV text or CSV file path. :Examples: :ref:`example-from-csv` :Dependency Packages: - `pytablereader <https://github.com/thombashi/pytablereader>`__ """ import pytablereader as ptr loader = ptr.CsvTableTextLoader(csv_source, quoting_flags=self._quoting_flags) loader.delimiter = delimiter try: for table_data in loader.load(): self.from_tabledata(table_data, is_overwrite_table_name=False) return except ptr.DataError: pass loader = ptr.CsvTableFileLoader(csv_source, quoting_flags=self._quoting_flags) loader.delimiter = delimiter for table_data in loader.load(): self.from_tabledata(table_data)
https://github.com/thombashi/pytablewriter/blob/52ea85ed8e89097afa64f137c6a1b3acdfefdbda/pytablewriter/writer/_table_writer.py#L577-L613
write csv
python
def saveSV(fname, X, comments=None, metadata=None, printmetadict=None, dialect=None, delimiter=None, doublequote=True, lineterminator='\n', escapechar = None, quoting=csv.QUOTE_MINIMAL, quotechar='"', skipinitialspace=False, stringifier=None, verbosity=DEFAULT_VERBOSITY): """ Save a tabarray to a separated-variable (CSV) file. **Parameters** **fname** : string Path to a separated variable (CSV) text file. **X** : tabarray The actual data in a :class:`tabular.tab.tabarray`. **comments** : string, optional The character to be used to denote the start of a header (non-data) line, e.g. '#'. If not specified, it is determined according to the following rule: '#' if `metadata` argument is set, otherwise ''. **delimiter** : string, optional The character to beused to separate values in each line of text, e.g. ','. If not specified, by default, this is inferred from the file extension: if the file ends in `.csv`, the delimiter is ',', otherwise it is '\\t.' **linebreak** : string, optional The string separating lines of text. By default, this is assumed to be '\\n', and can also be set to be '\\r' or '\\r\\n'. **metadata** : list of strings or Boolean, optional Allowed values are True, False, or any sublists of the list `['names', 'formats', 'types', 'coloring', 'dialect']`. These keys indicate what special metadata is printed in the header. * If a sublist of `['names', 'formats', 'types', 'coloring', 'dialect']`, then the indicated types of metadata are written out. * If `True`, this is the same as `metadata = ['coloring', 'types', 'names','dialect']`, e.g. as many types of metadata as this algorithm currently knows how to write out. * If 'False', no metadata is printed at all, e.g. just the data. * If `metadata` is not specified, the default is `['names']`, that is, just column names are written out. **printmetadict** : Boolean, optional Whether or not to print a string representation of the `metadatadict` in the first line of the header. If `printmetadict` is not specified, then: * If `metadata` is specified and is not `False`, then `printmetadata` defaults to `True`. * Else if `metadata` is `False`, then `printmetadata` defaults to `False`. * Else `metadata` is not specified, and `printmetadata` defaults to `False`. See the :func:`tabular.io.loadSV` for more information about `metadatadict`. **stringifier** : callable Callable taking 1-d numpy array and returning Python list of strings of same length, or dictionary or tuple of such callables. If specified, the callable will be applied to each column, and the resulting list of strings will be written to the file. If specified as a list or dictionary of callables, the functions will be applied to correponding columns. The default used if **stringifier** is not specified, is `tb.utils.DEFAULT_STRINGIFIER`, which merely passes through string-type columns, and converts numerical-type columns directly to corresponding strings with NaNs replaced with blank values. The main purpose of specifying a non-default value is to encode numerical values in various string encodings that might be used required for other applications like databases. NOTE: In certain special circumstances (e.g. when the lineterminator or delimiter character appears in a field of the data), the Python CSV writer is used to write out data. To allow for control of the operation of the writer in these circumstances, the following other parameters replicating the interface of the CSV module are also valid, and values will be passed through: **doublequote**, **escapechar**, **quoting**, **quotechar**, and **skipinitialspace**. (See Python CSV module documentation for more information.) **See Also:** :func:`tabular.io.loadSV` """ if metadata is None: metakeys = ['names'] if printmetadict is None: printmetadict = False if verbosity > 8: print '''Defaulting to not printing out the metametadata dictionary line.''' if comments is None: comments = '' if verbosity > 8: print 'Defaulting empty comment string.' if verbosity > 7: print 'Defaulting to writing out names metadata.' elif metadata is True: metakeys = defaultmetadatakeys(X) if printmetadict is None: printmetadict = True if verbosity > 8: print '''Defaulting to printing out the metametadata dictionary line.''' if comments is None: comments = '' if verbosity > 8: print 'Defaulting empty comment string.' if verbosity >= 5: print 'Writing out all present metadata keys ... ' elif metadata is False: metakeys = [] printmetadict = False comments = '' if verbosity >= 5: print 'Writing out no metadata at all.' else: metakeys = metadata if printmetadict is None: if metakeys == []: printmetadict = False else: printmetadict = True if comments is None: comments = '' if verbosity >= 5: print '''Using user-specified metadata keys to contol metadata writing.''' assert lineterminator in ['\r','\n','\r\n'], '''lineterminator must be one of ''' + repr( ['\r','\n','\r\n']) dialect = getdialect(fname, dialect, delimiter, lineterminator, doublequote, escapechar, quoting, quotechar, skipinitialspace) delimiter = dialect.delimiter if 6 > verbosity > 2: print 'Using delimiter ', repr(delimiter) elif verbosity >= 6: print 'Using dialect with values:', repr(printdialect(dialect)) metadata = getstringmetadata(X,metakeys,dialect) metametadata = {} v = 1 for k in metakeys: if k in metadata.keys(): nl = len(metadata[k].split(lineterminator)) metametadata[k] = v if nl == 1 else (v, v + nl) v = v + nl F = open(fname,'wb') if printmetadict is True: line = "metametadata=" + repr(metametadata) F.write(comments + line + lineterminator) for k in metakeys: if k in metadata.keys(): for line in metadata[k].split(lineterminator): F.write(comments + line + lineterminator) Write(X, F, dialect, stringifier=stringifier) F.close()
https://github.com/yamins81/tabular/blob/1caf091c8c395960a9ad7078f95158b533cc52dd/tabular/io.py#L705-L894
write csv
python
def getCSVReader(data, reader_type=csv.DictReader): """Take a Rave CSV output ending with a line with just EOF on it and return a DictReader""" f = StringIO(data[:-4]) # Remove \nEOF return reader_type(f)
https://github.com/mdsol/rwslib/blob/1a86bc072d408c009ed1de8bf6e98a1769f54d18/rwslib/extras/local_cv.py#L51-L54
write csv
python
def _write(self, content, mode, encoding=None, linesep=False): """Handles file writes.""" makedirs(self.path) try: encoding = encoding or ENCODING if "b" not in mode: try: content = str(content) except: pass if linesep: content += os.linesep with codecs.open(self.path, mode, encoding=encoding) as fo: fo.write(content) return True except: return False
https://github.com/jeffrimko/Auxly/blob/5aae876bcb6ca117c81d904f9455764cdc78cd48/lib/auxly/filesys.py#L147-L163
write csv
python
def to_csv(self, datadir=None, sep=None, cycles=False, raw=True, summary=True, shifted=False, method=None, shift=0.0, last_cycle=None): """Saves the data as .csv file(s). Args: datadir: folder where to save the data (uses current folder if not given). sep: the separator to use in the csv file (defaults to CellpyData.sep). cycles: (bool) export voltage-capacity curves if True. raw: (bool) export raw-data if True. summary: (bool) export summary if True. shifted (bool): export with cumulated shift. method (string): how the curves are given "back-and-forth" - standard back and forth; discharge (or charge) reversed from where charge (or discharge) ends. "forth" - discharge (or charge) continues along x-axis. "forth-and-forth" - discharge (or charge) also starts at 0 (or shift if not shift=0.0) shift: start-value for charge (or discharge) last_cycle: process only up to this cycle (if not None). Returns: Nothing """ if sep is None: sep = self.sep self.logger.debug("saving to csv") dataset_number = -1 for data in self.datasets: dataset_number += 1 if not self._is_not_empty_dataset(data): self.logger.info("to_csv -") self.logger.info("empty test [%i]" % dataset_number) self.logger.info("not saved!") else: if isinstance(data.loaded_from, (list, tuple)): txt = "merged file" txt += "using first file as basename" self.logger.debug(txt) no_merged_sets = len(data.loaded_from) no_merged_sets = "_merged_" + str(no_merged_sets).zfill(3) filename = data.loaded_from[0] else: filename = data.loaded_from no_merged_sets = "" firstname, extension = os.path.splitext(filename) firstname += no_merged_sets if datadir: firstname = os.path.join(datadir, os.path.basename(firstname)) if raw: outname_normal = firstname + "_normal.csv" self._export_normal(data, outname=outname_normal, sep=sep) if data.step_table_made is True: outname_steps = firstname + "_steps.csv" self._export_steptable(data, outname=outname_steps, sep=sep) else: self.logger.debug("step_table_made is not True") if summary: outname_stats = firstname + "_stats.csv" self._export_stats(data, outname=outname_stats, sep=sep) if cycles: outname_cycles = firstname + "_cycles.csv" self._export_cycles(outname=outname_cycles, dataset_number=dataset_number, sep=sep, shifted=shifted, method=method, shift=shift, last_cycle=last_cycle)
https://github.com/jepegit/cellpy/blob/9f4a84cdd11f72cfa02cda8c2d7b5174abbb7370/cellpy/readers/cellreader.py#L1932-L2010
write csv
python
def s3_write(self, log, remote_log_location, append=True): """ Writes the log to the remote_log_location. Fails silently if no hook was created. :param log: the log to write to the remote_log_location :type log: str :param remote_log_location: the log's location in remote storage :type remote_log_location: str (path) :param append: if False, any existing log file is overwritten. If True, the new log is appended to any existing logs. :type append: bool """ if append and self.s3_log_exists(remote_log_location): old_log = self.s3_read(remote_log_location) log = '\n'.join([old_log, log]) if old_log else log try: self.hook.load_string( log, key=remote_log_location, replace=True, encrypt=configuration.conf.getboolean('core', 'ENCRYPT_S3_LOGS'), ) except Exception: self.log.exception('Could not write logs to %s', remote_log_location)
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/utils/log/s3_task_handler.py#L146-L170
write csv
python
def as_csv(self): """Return a CSV representation as a string""" from io import StringIO s = StringIO() w = csv.writer(s) for row in self.rows: w.writerow(row) return s.getvalue()
https://github.com/Metatab/metatab/blob/8336ec3e4bd8da84a9a5cb86de1c1086e14b8b22/metatab/doc.py#L801-L811
write csv
python
def write_text(self, data, encoding=None, errors=None): """ Open the file in text mode, write to it, and close the file. """ if not isinstance(data, six.text_type): raise TypeError( 'data must be %s, not %s' % (six.text_type.__class__.__name__, data.__class__.__name__)) with self.open(mode='w', encoding=encoding, errors=errors) as f: return f.write(data)
https://github.com/lrq3000/pyFileFixity/blob/fd5ef23bb13835faf1e3baa773619b86a1cc9bdf/pyFileFixity/lib/pathlib2.py#L1314-L1323
write csv
python
def write_to_file(data, path): """Export extracted fields to csv Appends .csv to path if missing and generates csv file in specified directory, if not then in root Parameters ---------- data : dict Dictionary of extracted fields path : str directory to save generated csv file Notes ---- Do give file name to the function parameter path. Examples -------- >>> from invoice2data.output import to_csv >>> to_csv.write_to_file(data, "/exported_csv/invoice.csv") >>> to_csv.write_to_file(data, "invoice.csv") """ if path.endswith('.csv'): filename = path else: filename = path + '.csv' if sys.version_info[0] < 3: openfile = open(filename, "wb") else: openfile = open(filename, "w", newline='') with openfile as csv_file: writer = csv.writer(csv_file, delimiter=',') for line in data: first_row = [] for k, v in line.items(): first_row.append(k) writer.writerow(first_row) for line in data: csv_items = [] for k, v in line.items(): # first_row.append(k) if k == 'date': v = v.strftime('%d/%m/%Y') csv_items.append(v) writer.writerow(csv_items)
https://github.com/invoice-x/invoice2data/blob/d97fdc5db9c1844fd77fa64f8ea7c42fefd0ba20/src/invoice2data/output/to_csv.py#L5-L54
write csv
python
def writerow(self, row): """ Writes a row to the CSV file """ self.writer.writerow(row) # Fetch UTF-8 output from the queue ... data = self.queue.getvalue() # write to the target stream self.stream.write(data) # empty queue self.queue.truncate(0) self.queue.seek(0)
https://github.com/UCL-INGI/INGInious/blob/cbda9a9c7f2b8e8eb1e6d7d51f0d18092086300c/inginious/frontend/pages/course_admin/utils.py#L175-L184
write csv
python
def reader(f): '''CSV Reader factory for CADA format''' return unicodecsv.reader(f, encoding='utf-8', delimiter=b',', quotechar=b'"')
https://github.com/etalab/cada/blob/36e8b57514445c01ff7cd59a1c965180baf83d5e/cada/csv.py#L30-L32
write csv
python
def to_csv( self, hql, csv_filepath, schema='default', delimiter=',', lineterminator='\r\n', output_header=True, fetch_size=1000, hive_conf=None): """ Execute hql in target schema and write results to a csv file. :param hql: hql to be executed. :type hql: str or list :param csv_filepath: filepath of csv to write results into. :type csv_filepath: str :param schema: target schema, default to 'default'. :type schema: str :param delimiter: delimiter of the csv file, default to ','. :type delimiter: str :param lineterminator: lineterminator of the csv file. :type lineterminator: str :param output_header: header of the csv file, default to True. :type output_header: bool :param fetch_size: number of result rows to write into the csv file, default to 1000. :type fetch_size: int :param hive_conf: hive_conf to execute alone with the hql. :type hive_conf: dict """ results_iter = self._get_results(hql, schema, fetch_size=fetch_size, hive_conf=hive_conf) header = next(results_iter) message = None i = 0 with open(csv_filepath, 'wb') as f: writer = csv.writer(f, delimiter=delimiter, lineterminator=lineterminator, encoding='utf-8') try: if output_header: self.log.debug('Cursor description is %s', header) writer.writerow([c[0] for c in header]) for i, row in enumerate(results_iter, 1): writer.writerow(row) if i % fetch_size == 0: self.log.info("Written %s rows so far.", i) except ValueError as exception: message = str(exception) if message: # need to clean up the file first os.remove(csv_filepath) raise ValueError(message) self.log.info("Done. Loaded a total of %s rows.", i)
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/hooks/hive_hooks.py#L858-L918
write csv
python
def write_document(doc, fnm): """Write a Text document to file. Parameters ---------- doc: Text The document to save. fnm: str The filename to save the document """ with codecs.open(fnm, 'wb', 'ascii') as f: f.write(json.dumps(doc, indent=2))
https://github.com/estnltk/estnltk/blob/28ae334a68a0673072febc318635f04da0dcc54a/estnltk/corpus.py#L80-L91