query
stringlengths
9
60
language
stringclasses
1 value
code
stringlengths
105
25.7k
url
stringlengths
91
217
how to read .csv file in an efficient way?
python
def read_csv(filename, delimiter=",", skip=0, guess_type=True, has_header=True, use_types={}): """Read a CSV file Usage ----- >>> data = read_csv(filename, delimiter=delimiter, skip=skip, guess_type=guess_type, has_header=True, use_types={}) # Use specific types >>> types = {"sepal.length": int, "petal.width": float} >>> data = read_csv(filename, guess_type=guess_type, use_types=types) keywords :has_header: Determine whether the file has a header or not """ with open(filename, 'r') as f: # Skip the n first lines if has_header: header = f.readline().strip().split(delimiter) else: header = None for i in range(skip): f.readline() for line in csv.DictReader(f, delimiter=delimiter, fieldnames=header): if use_types: yield apply_types(use_types, guess_type, line) elif guess_type: yield dmap(determine_type, line) else: yield line
https://github.com/TaurusOlson/incisive/blob/25bb9f53495985c1416c82e26f54158df4050cb0/incisive/core.py#L55-L88
how to read .csv file in an efficient way?
python
def read_csv(filename, keys=None, convert_types=False, **kwargs): ''' Read a CSV in canonical form: :: <agent_id, t_step, key, value, value_type> ''' df = pd.read_csv(filename) if convert_types: df = convert_types_slow(df) if keys: df = df[df['key'].isin(keys)] df = process_one(df) return df
https://github.com/gsi-upm/soil/blob/a3ea434f237f039c3cadbc2e0a83ae626d77b818/soil/analysis.py#L42-L55
how to read .csv file in an efficient way?
python
def read_csv( filename: Union[PathLike, Iterator[str]], delimiter: Optional[str]=',', first_column_names: Optional[bool]=None, dtype: str='float32', ) -> AnnData: """Read ``.csv`` file. Same as :func:`~anndata.read_text` but with default delimiter ``','``. Parameters ---------- filename Data file. delimiter Delimiter that separates data within text file. If ``None``, will split at arbitrary number of white spaces, which is different from enforcing splitting at single white space ``' '``. first_column_names Assume the first column stores row names. dtype Numpy data type. """ return read_text(filename, delimiter, first_column_names, dtype)
https://github.com/theislab/anndata/blob/34f4eb63710628fbc15e7050e5efcac1d7806062/anndata/readwrite/read.py#L16-L39
how to read .csv file in an efficient way?
python
def read_csv(csv_file, ext='.csv', format=None, delete_empty_keys=False, fieldnames=[], rowlimit=100000000, numbers=False, normalize_names=True, unique_names=True, verbosity=0): r""" Read a csv file from a path or file pointer, returning a dict of lists, or list of lists (according to `format`) filename: a directory or list of file paths numbers: whether to attempt to convert strings in csv to numbers TODO: merge with `nlp.util.make_dataframe` function Handles unquoted and quoted strings, quoted commas, quoted newlines (EOLs), complex numbers, times, dates, datetimes >>> read_csv('"name\r\n",rank,"serial\nnumber",date <BR />\t\n"McCain, John","1","123456789",9/11/2001\n' + ... 'Bob,big cheese,1-23,1/1/2001 12:00 GMT', format='header+values list', numbers=True) [['name', 'rank', 'serial\nnumber', 'date'], ['McCain, John', 1.0, 123456789.0, '9/11/2001'], ['Bob', 'big cheese', '1-23', '1/1/2001 12:00 GMT']] """ if not csv_file: return if isinstance(csv_file, basestring): # truncate `csv_file` in case it is a string buffer containing GBs of data path = csv_file[:1025] try: # see http://stackoverflow.com/a/4169762/623735 before trying 'rU' fpin = open(path, 'rUb') # U = universal EOL reader, b = binary except (IOError, FileNotFoundError): # truncate path more, in case path is used later as a file description: path = csv_file[:128] fpin = StringIO(str(csv_file)) else: fpin = csv_file try: path = csv_file.name except (IndexError, ValueError, AttributeError, TypeError): path = 'unknown file buffer path' format = format or 'h' format = format[0].lower() # if fieldnames not specified then assume that first row of csv contains headings csvr = csv.reader(fpin, dialect=csv.excel) if not fieldnames: while not fieldnames or not any(fieldnames): fieldnames = strip_br([str(s).strip() for s in next(csvr)]) if verbosity > 0: logger.info('Column Labels: ' + repr(fieldnames)) if unique_names: norm_names = OrderedDict([(fldnm, fldnm) for fldnm in fieldnames]) else: norm_names = OrderedDict([(num, fldnm) for num, fldnm in enumerate(fieldnames)]) if normalize_names: norm_names = OrderedDict([(num, make_name(fldnm, **make_name.DJANGO_FIELD)) for num, fldnm in enumerate(fieldnames)]) # required for django-formatted json files model_name = make_name(path, **make_name.DJANGO_MODEL) if format in 'c': # columnwise dict of lists recs = OrderedDict((norm_name, []) for norm_name in list(norm_names.values())) elif format in 'vh': recs = [fieldnames] else: recs = [] if verbosity > 0: logger.info('Field Names: ' + repr(norm_names if normalize_names else fieldnames)) rownum = 0 eof = False pbar = None start_seek_pos = fpin.tell() or 0 if verbosity > 1: print('Starting at byte {} in file buffer.'.format(start_seek_pos)) fpin.seek(0, os.SEEK_END) file_len = fpin.tell() - start_seek_pos # os.fstat(fpin.fileno()).st_size fpin.seek(start_seek_pos) if verbosity > 1: print(('There appear to be {} bytes remaining in the file buffer.' + 'Resetting (seek) to starting position in file.').format(file_len)) # if verbosity > 0: # pbar = progressbar.ProgressBar(maxval=file_len) # pbar.start() while csvr and rownum < rowlimit and not eof: if pbar: pbar.update(fpin.tell() - start_seek_pos) rownum += 1 row = [] row_dict = OrderedDict() # skip rows with all empty strings as values, while not row or not any(len(x) for x in row): try: row = next(csvr) if verbosity > 1: logger.info(' row content: ' + repr(row)) except StopIteration: eof = True break if eof: break if len(row) and isinstance(row[-1], basestring) and len(row[-1]): row = strip_br(row) if numbers: # try to convert the type to a numerical scalar type (int, float etc) row = [tryconvert(v, desired_types=NUMBERS_AND_DATETIMES, empty=None, default=v) for v in row] if row: N = min(max(len(row), 0), len(norm_names)) row_dict = OrderedDict( ((field_name, field_value) for field_name, field_value in zip( list(list(norm_names.values()) if unique_names else norm_names)[:N], row[:N]) if (str(field_name).strip() or delete_empty_keys is False)) ) if format in 'dj': # django json format recs += [{"pk": rownum, "model": model_name, "fields": row_dict}] elif format in 'vhl': # list of lists of values, with header row (list of str) recs += [[value for field_name, value in viewitems(row_dict) if (field_name.strip() or delete_empty_keys is False)]] elif format in 'c': # columnwise dict of lists for field_name in row_dict: recs[field_name] += [row_dict[field_name]] if verbosity > 2: print([recs[field_name][-1] for field_name in row_dict]) else: recs += [row_dict] if verbosity > 2 and format not in 'c': print(recs[-1]) if file_len > fpin.tell(): logger.info("Only %d of %d bytes were read and processed." % (fpin.tell(), file_len)) if pbar: pbar.finish() fpin.close() if not unique_names: return recs, norm_names return recs
https://github.com/totalgood/pugnlp/blob/c43445b14afddfdeadc5f3076675c9e8fc1ee67c/src/pugnlp/util.py#L1451-L1583
how to read .csv file in an efficient way?
python
def _readcsv(self, path_to_csv): """reads a csv column""" return np.genfromtxt(path_to_csv, dtype=None, delimiter=',', names=True)
https://github.com/Esri/ArcREST/blob/ab240fde2b0200f61d4a5f6df033516e53f2f416/src/arcrest/enrichment/_geoenrichment.py#L56-L61
how to read .csv file in an efficient way?
python
def read_csv(filename, delimiter=CSV_DELIMITER): """ Read data from CSV file :param filename: name of CSV file to be read :type filename: str :param delimiter: type of CSV delimiter. Default is ``;`` :type delimiter: str :return: data stored in CSV file as list """ with open(filename, 'r') as file: return list(csv.reader(file, delimiter=delimiter))
https://github.com/sentinel-hub/sentinelhub-py/blob/08a83b7f1e289187159a643336995d8369860fea/sentinelhub/io_utils.py#L113-L123
how to read .csv file in an efficient way?
python
def read(self): """Reads CSV file and returns list of contents""" # Validate file path assert os.path.isfile(self.file_path), 'No such file exists: ' + str(self.file_path) # Open CSV file and read contents with open(self.file_path, 'r') as f: reader = csv_builtin.reader(f) loaded_data = list(reader) # Force digits to become integers return juggle_types(loaded_data)
https://github.com/mrstephenneal/databasetools/blob/e515c568e80fe990c192eb7df0094ad2f474ee67/databasetools/csv.py#L35-L46
how to read .csv file in an efficient way?
python
def read_csv_file(self, file_name): """ Parses a CSV file into a list. :param file_name: name of the CSV file :return: a list with the file's contents """ result = [] with open(os.path.join(self.__path(), os.path.basename(file_name)), 'rt') as csvfile: headers_reader = csv.reader(csvfile, delimiter=',', quotechar='|') for type_row in headers_reader: for t in type_row: result.append(t) return result
https://github.com/weso/CWR-DataApi/blob/f3b6ba8308c901b6ab87073c155c08e30692333c/data_cwr/accessor.py#L39-L53
how to read .csv file in an efficient way?
python
def read_csv(self, csv_location, csv_configs=None): # type: (str, Dict[str, int]) -> List[List[str]] """ Read the csv file """ csv_configs = self._generate_configs_from_default(csv_configs) with open(csv_location, 'r') as csv_file: csv_reader = csv.reader(csv_file) self.csv_data = list(csv_reader) self.csv_data = self.csv_data[csv_configs['HEADER_COLUMNS_TO_SKIP']:] return self.csv_data
https://github.com/albertyw/csv-ical/blob/cdb55a226cd0cb6cc214d896a6cea41a5b92c9ed/csv_ical/convert.py#L46-L54
how to read .csv file in an efficient way?
python
def read_csv(fpath): """ reads csv in unicode """ import csv import utool as ut #csvfile = open(fpath, 'rb') with open(fpath, 'rb') as csvfile: row_iter = csv.reader(csvfile, delimiter=str(','), quotechar=str('|')) row_list = [ut.lmap(ut.ensure_unicode, row) for row in row_iter] return row_list
https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_csv.py#L151-L159
how to read .csv file in an efficient way?
python
def read(filepath, **kwargs): """ Read a file. Supported formats: * CSV * JSON, JSONL * pickle Parameters ---------- filepath : str Path to the file that should be read. This methods action depends mainly on the file extension. kwargs : dict Any keywords for the specific file format. For CSV, this is 'delimiter', 'quotechar', 'skiprows', 'format' Returns ------- data : str or bytes """ if filepath.lower().endswith('.csv'): return _read_csv(filepath, kwargs) elif filepath.lower().endswith('.json'): with open(filepath) as data_file: data = json.load(data_file, **kwargs) return data elif filepath.lower().endswith('.jsonl'): return _read_jsonl(filepath, kwargs) elif filepath.lower().endswith('.pickle'): with open(filepath, 'rb') as handle: data = pickle.load(handle) return data elif (filepath.lower().endswith('.yml') or filepath.lower().endswith('.yaml')): raise NotImplementedError('YAML is not supported, because you need ' 'PyYAML in Python3. ' 'See ' 'https://stackoverflow.com/a/42054860/562769' ' as a guide how to use it.') elif (filepath.lower().endswith('.h5') or filepath.lower().endswith('.hdf5')): raise NotImplementedError('HDF5 is not supported. See ' 'https://stackoverflow.com/a/41586571/562769' ' as a guide how to use it.') else: raise NotImplementedError('File \'{}\' is not known.'.format(filepath))
https://github.com/MartinThoma/mpu/blob/61bc36d0192ca90c0bcf9b8a5d7d0d8520e20ff6/mpu/io.py#L29-L77
how to read .csv file in an efficient way?
python
def read_csv(fname, sep=','): """ :param fname: a CSV file with an header and float fields :param sep: separato (default the comma) :return: a structured array of floats """ with open(fname, encoding='utf-8-sig') as f: header = next(f).strip().split(sep) dt = numpy.dtype([(h, numpy.bool if h == 'vs30measured' else float) for h in header]) return numpy.loadtxt(f, dt, delimiter=sep)
https://github.com/gem/oq-engine/blob/8294553a0b8aba33fd96437a35065d03547d0040/openquake/commonlib/readinput.py#L258-L268
how to read .csv file in an efficient way?
python
def read_csv(path, delim=','): """This function reads comma-separated values from a file. Parameter <path> is either a pathname or a file-like object that supports the |readline()| method. Empty lines and lines beginning with "#" are ignored. Parameter <delim> specifies how a line is separated into values. If it does not contain the letter "%", then <delim> marks the end of a value. Otherwise, this function acts like scanf in C: chart_data.read_csv('file', '%d,%s:%d') Paramter <delim> currently supports only three conversion format specifiers: "d"(int), "f"(double), and "s"(string).""" fd = _try_open_file(path, 'r', 'The first argument must be a pathname or an object that supports readline() method') data = [] line = fd.readline() while line != "": if line[0] != '#' and not empty_line_p(line): data.append(parse_line(line, delim)) line = fd.readline() _try_close_file(fd, path) return data
https://github.com/ska-sa/purr/blob/4c848768d0485d0f88b30850d0d5372221b21b66/Purr/Plugins/local_pychart/chart_data.py#L221-L249
how to read .csv file in an efficient way?
python
def _read_csv_lines(path): """ Opens CSV file `path` and returns list of rows. Pass output of this function to `csv.DictReader` for reading data. """ csv_file = open(path, 'r') csv_lines_raw = csv_file.readlines() csv_lines_clean = [line for line in csv_lines_raw if len(line.strip()) > 0] return csv_lines_clean
https://github.com/learningequality/ricecooker/blob/2f0385282500cb77ef2894646c6f9ce11bd7a853/ricecooker/utils/metadata_provider.py#L875-L883
how to read .csv file in an efficient way?
python
def read_csv_from_file(filename): """ Opens the target CSV file and creates a dictionary with one list for each CSV column. :param str filename: :return list of lists: column values """ logger_csvs.info("enter read_csv_from_file") d = {} l = [] try: logger_csvs.info("open file: {}".format(filename)) with open(filename, 'r') as f: r = csv.reader(f, delimiter=',') # Create a dict with X lists corresponding to X columns for idx, col in enumerate(next(r)): d[idx] = [] d = cast_values_csvs(d, idx, col) # Start iter through CSV data for row in r: for idx, col in enumerate(row): # Append the cell to the correct column list d = cast_values_csvs(d, idx, col) # Make a list of lists out of the dictionary instead for idx, col in d.items(): l.append(col) except FileNotFoundError as e: print('CSV FileNotFound: ' + filename) logger_csvs.warn("read_csv_to_columns: FileNotFound: {}, {}".format(filename, e)) logger_csvs.info("exit read_csv_from_file") return l
https://github.com/nickmckay/LiPD-utilities/blob/5dab6bbeffc5effd68e3a6beaca6b76aa928e860/Python/lipd/csvs.py#L208-L241
how to read .csv file in an efficient way?
python
def read_csv(path, fieldnames=None, sniff=True, encoding='utf-8', *args, **kwargs): ''' Read CSV rows as table from a file. By default, csv.reader() will be used any output will be a list of lists. If fieldnames is provided, DictReader will be used and output will be list of OrderedDict instead. CSV sniffing (dialect detection) is enabled by default, set sniff=False to switch it off. ''' return list(r for r in read_csv_iter(path, fieldnames=fieldnames, sniff=sniff, encoding=encoding, *args, **kwargs))
https://github.com/letuananh/chirptext/blob/ce60b47257b272a587c8703ea1f86cd1a45553a7/chirptext/chio.py#L164-L170
how to read .csv file in an efficient way?
python
def readcsv(fn): """ Wrapper to read arbitrary csv, check for header Needs some work to be more robust, quickly added for demcoreg sampling """ import csv #Check first line for header with open(fn, 'r') as f: reader = csv.DictReader(f) hdr = reader.fieldnames #Assume there is a header on first line, check skiprows = 1 if np.all(f.isdigit() for f in hdr): hdr = None skiprows = 0 #Check header for lat/lon/z or x/y/z tags #Should probably do genfromtxt here if header exists and dtype of cols is variable pts = np.loadtxt(fn, delimiter=',', skiprows=skiprows, dtype=None) return pts
https://github.com/dshean/pygeotools/blob/5ac745717c0098d01eb293ff1fe32fd7358c76ab/pygeotools/lib/iolib.py#L604-L626
how to read .csv file in an efficient way?
python
def _read_input_csv(in_file): """Parse useful details from SampleSheet CSV file. """ with io.open(in_file, newline=None) as in_handle: reader = csv.reader(in_handle) next(reader) # header for line in reader: if line: # empty lines (fc_id, lane, sample_id, genome, barcode) = line[:5] yield fc_id, lane, sample_id, genome, barcode
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/illumina/samplesheet.py#L81-L90
how to read .csv file in an efficient way?
python
def read_csv(filepath): """ Read a CSV into a list of dictionarys. The first line of the CSV determines the keys of the dictionary. Parameters ---------- filepath : string Returns ------- list of dictionaries """ symbols = [] with open(filepath, 'rb') as csvfile: spamreader = csv.DictReader(csvfile, delimiter=',', quotechar='"') for row in spamreader: symbols.append(row) return symbols
https://github.com/MartinThoma/hwrt/blob/725c21a3d0f5a30b8492cbc184b3688ceb364e1c/hwrt/filter_dataset.py#L179-L197
how to read .csv file in an efficient way?
python
def read_csv_iter(path, fieldnames=None, sniff=True, mode='rt', encoding='utf-8', *args, **kwargs): ''' Iterate through CSV rows in a file. By default, csv.reader() will be used any output will be a list of lists. If fieldnames is provided, DictReader will be used and output will be list of OrderedDict instead. CSV sniffing (dialect detection) is enabled by default, set sniff=False to switch it off. ''' with open(path, mode=mode, encoding=encoding) as infile: for row in iter_csv_stream(infile, fieldnames=fieldnames, sniff=sniff, *args, **kwargs): yield row
https://github.com/letuananh/chirptext/blob/ce60b47257b272a587c8703ea1f86cd1a45553a7/chirptext/chio.py#L149-L157
how to read .csv file in an efficient way?
python
def read_creds_from_csv(filename): """ Read credentials from a CSV file :param filename: :return: """ key_id = None secret = None mfa_serial = None secret_next = False with open(filename, 'rt') as csvfile: for i, line in enumerate(csvfile): values = line.split(',') for v in values: if v.startswith('AKIA'): key_id = v.strip() secret_next = True elif secret_next: secret = v.strip() secret_next = False elif re_mfa_serial_format.match(v): mfa_serial = v.strip() return key_id, secret, mfa_serial
https://github.com/nccgroup/opinel/blob/2d4f5b96e0a1f9cb0356629f4f87e4ed99ce2606/opinel/utils/credentials.py#L230-L253
how to read .csv file in an efficient way?
python
def read_file(fname): """ read a CSV file (ref_classes.csv) and return the list of names """ print("NO - dont use this function read_file(fname):") exit(1) lst = [] with open(fname, 'r') as f: #hdr = f.readline() for line in f: lst.append(line.strip('\n')) return lst
https://github.com/acutesoftware/virtual-AI-simulator/blob/57de679a5b1a58c38fefe6aea58af1f3a7e79c58/vais/character.py#L254-L266
how to read .csv file in an efficient way?
python
def read_csv(csv_name, usecols=None): """Returns a DataFrame from a .csv file stored in /data/raw/""" csv_path = os.path.join(DATA_FOLDER, csv_name) csv = pd.read_csv(csv_path, low_memory=False, usecols=usecols, encoding="utf-8") return csv
https://github.com/lappis-unb/salic-ml/blob/1b3ebc4f8067740999897ccffd9892dc94482a93/src/salicml/utils/read_csv.py#L10-L15
how to read .csv file in an efficient way?
python
def read_csv(csvfile, options): """ Read csv and return molList, a list of mol objects """ # open file or exit name, ext = os.path.splitext(csvfile) try: if ext == '.gz': f = gzip.open(csvfile, 'rb') else: f = open(csvfile, 'rU') except IOError: print(" \n '{f}' could not be opened\n".format(f=os.path.basename(csvfile))) sys.exit(1) # read file csv_reader = csv.reader(f) molList = [] linenumber = 1 for line in csv_reader: # get column labels from the first line if linenumber == 1: prop_indices = read_header(line, options) # otherwise read line & append to MolList else: mol = Molecule() mol = read_line(line, options, prop_indices, mol) # if the line's junk, skip it if mol == 1: print(" skipping molecule 'm'\n".format(m=(linenumber - 1))) else: molList.append(mol) linenumber += 1 # return molList return molList
https://github.com/rvswift/EB/blob/341880b79faf8147dc9fa6e90438531cd09fabcc/EB/builder/splitter/splitter.py#L190-L230
how to read .csv file in an efficient way?
python
def to_csv(self, path, mode=WRITE_MODE, dialect='excel', compression=None, newline='', **fmtparams): """ Saves the sequence to a csv file. Each element should be an iterable which will be expanded to the elements of each row. :param path: path to write file :param mode: file open mode :param dialect: passed to csv.writer :param fmtparams: passed to csv.writer """ if 'b' in mode: newline = None with universal_write_open(path, mode=mode, compression=compression, newline=newline) as output: csv_writer = csv.writer(output, dialect=dialect, **fmtparams) for row in self: csv_writer.writerow([six.u(str(element)) for element in row])
https://github.com/EntilZha/PyFunctional/blob/ac04e4a8552b0c464a7f492f7c9862424867b63e/functional/pipeline.py#L1525-L1544
how to read .csv file in an efficient way?
python
def read_csv(fname): """ Read a csv file into a DataAccessObject :param fname: filename """ values = defaultdict(list) with open(fname) as f: reader = csv.DictReader(f) for row in reader: for (k,v) in row.items(): values[k].append(v) npvalues = {k: np.array(values[k]) for k in values.keys()} for k in npvalues.keys(): for datatype in [np.int, np.float]: try: npvalues[k][:1].astype(datatype) npvalues[k] = npvalues[k].astype(datatype) break except: pass dao = DataAccessObject(npvalues) return dao
https://github.com/andrea-cuttone/geoplotlib/blob/a1c355bccec91cabd157569fad6daf53cf7687a1/geoplotlib/utils.py#L141-L163
how to read .csv file in an efficient way?
python
def read(self): """Reads data from the CSV file.""" companies = [] with open(self.file) as f: reader = unicodecsv.reader(f) for line in reader: if len(line) >= 1: cnpj = self.format(line[0]) if self.valid(cnpj): companies.append(cnpj) return companies
https://github.com/vkruoso/receita-tools/blob/fd62a252c76541c9feac6470b9048b31348ffe86/receita/tools/get.py#L50-L60
how to read .csv file in an efficient way?
python
def read_csv(csv_file, options, ensemble_list=None): """ Read csv and return molList, otherwise print error and exit. """ name, ext = os.path.splitext(csv_file) try: if ext == '.gz': f = gzip.open(csv_file, 'rb') else: f = open(csv_file, 'rU') except IOError: print(" \n '{f}' could not be opened\n".format(f=os.path.basename(csv_file))) sys.exit(1) csv_reader = csv.reader(f) molList = [] line_number = 1 for line in csv_reader: if line_number == 1: if ensemble_list: prop_indices = read_header(line, options, ensemble_list) else: prop_indices = read_header(line, options) else: mol = Molecule() if ensemble_list: mol = read_line(line, options, prop_indices, mol, ensemble_list) else: mol = read_line(line, options, prop_indices, mol) if mol == 1: print(" skipping molecule {m}\n".format(m=(line_number - 1))) else: molList.append(mol) line_number += 1 return molList
https://github.com/rvswift/EB/blob/341880b79faf8147dc9fa6e90438531cd09fabcc/EB/builder/utilities/csv_interface.py#L11-L49
how to read .csv file in an efficient way?
python
def to_csv(self, fileobj=sys.stdout): """Write data on file fileobj using CSV format.""" openclose = is_string(fileobj) if openclose: fileobj = open(fileobj, "w") for idx, section in enumerate(self.sections): fileobj.write(section.to_csvline(with_header=(idx == 0))) fileobj.flush() if openclose: fileobj.close()
https://github.com/materialsproject/pymatgen/blob/4ca558cf72f8d5f8a1f21dfdfc0181a971c186da/pymatgen/io/abinit/abitimer.py#L644-L656
how to read .csv file in an efficient way?
python
def read_csv(csv_name): """ Read data from a csv file into a dictionary. :param str csv_name: path to a csv file. :return dict: a dictionary represents the data in file. """ data = {} if not isinstance(csv_name, (str, unicode)): raise exceptions.InvalidDataFormat('luminol.utils: csv_name has to be a string!') with open(csv_name, 'r') as csv_data: reader = csv.reader(csv_data, delimiter=',', quotechar='|') for row in reader: try: key = to_epoch(row[0]) value = float(row[1]) data[key] = value except ValueError: pass return data
https://github.com/linkedin/naarad/blob/261e2c0760fd6a6b0ee59064180bd8e3674311fe/lib/luminol/src/luminol/utils.py#L40-L58
how to read .csv file in an efficient way?
python
def read_csv(user_id, records_path, antennas_path=None, attributes_path=None, recharges_path=None, network=False, duration_format='seconds', describe=True, warnings=True, errors=False, drop_duplicates=False): """ Load user records from a CSV file. Parameters ---------- user_id : str ID of the user (filename) records_path : str Path of the directory all the user files. antennas_path : str, optional Path of the CSV file containing (place_id, latitude, longitude) values. This allows antennas to be mapped to their locations. recharges_path : str, optional Path of the directory containing recharges files (``datetime, amount, balance, retailer_id`` CSV file). antennas_path : str, optional Path of the CSV file containing (place_id, latitude, longitude) values. This allows antennas to be mapped to their locations. network : bool, optional If network is True, bandicoot loads the network of the user's correspondants from the same path. Defaults to False. duration_format : str, default is 'seconds' Allows reading records with call duration specified in other formats than seconds. Options are 'seconds' or any format such as '%H:%M:%S', '%M%S', etc. describe : boolean If describe is True, it will print a description of the loaded user to the standard output. errors : boolean If errors is True, returns a tuple (user, errors), where user is the user object and errors are the records which could not be loaded. drop_duplicates : boolean If drop_duplicates, remove "duplicated records" (same correspondants, direction, date and time). Not activated by default. Examples -------- >>> user = bandicoot.read_csv('sample_records', '.') >>> print len(user.records) 10 >>> user = bandicoot.read_csv('sample_records', 'samples', 'sample_places.csv') >>> print len(user.antennas) 5 >>> user = bandicoot.read_csv('sample_records', '.', None, 'sample_attributes.csv') >>> print user.attributes['age'] 25 Notes ----- - The csv files can be single, or double quoted if needed. - Empty cells are filled with ``None``. For example, if the column ``call_duration`` is empty for one record, its value will be ``None``. Other values such as ``"N/A"``, ``"None"``, ``"null"`` will be considered as a text. """ antennas = None if antennas_path is not None: try: with open(antennas_path, 'r') as csv_file: reader = csv.DictReader(csv_file) antennas = dict((d['antenna_id'], (float(d['latitude']), float(d['longitude']))) for d in reader) except IOError: pass user_records = os.path.join(records_path, user_id + '.csv') with open(user_records, 'r') as csv_file: reader = csv.DictReader(csv_file) records = [_parse_record(r, duration_format) for r in reader] attributes = None if attributes_path is not None: user_attributes = os.path.join(attributes_path, user_id + '.csv') attributes = _load_attributes(user_attributes) recharges = None if recharges_path is not None: user_recharges = os.path.join(recharges_path, user_id + '.csv') recharges = _load_recharges(user_recharges) user, bad_records = load(user_id, records, antennas, attributes, recharges, antennas_path, attributes_path, recharges_path, describe=False, warnings=warnings, drop_duplicates=drop_duplicates) # Loads the network if network is True: user.network = _read_network(user, records_path, attributes_path, read_csv, antennas_path, warnings, drop_duplicates=drop_duplicates) user.recompute_missing_neighbors() if describe: user.describe() if errors: return user, bad_records return user
https://github.com/yvesalexandre/bandicoot/blob/73a658f6f17331541cf0b1547028db9b70e8d58a/bandicoot/io.py#L488-L604
how to read .csv file in an efficient way?
python
def read_csv(filename): """Reads a CSV file containing a tabular description of a transition function, as found in Sipser. Major difference: instead of multiple header rows, only a single header row whose entries might be tuples. """ with open(filename) as file: table = list(csv.reader(file)) m = from_table(table) return m
https://github.com/ND-CSE-30151/tock/blob/b8d21901aaf0e6ac913c2afa855f5b5a882a16c6/tock/tables.py#L126-L135
how to read .csv file in an efficient way?
python
def read_list_from_csv(filepath, dict_form=False, headers=None, **kwargs): # type: (str, bool, Union[int, List[int], List[str], None], Any) -> List[Union[Dict, List]] """Read a list of rows in dict or list form from a csv. (The headers argument is either a row number or list of row numbers (in case of multi-line headers) to be considered as headers (rows start counting at 1), or the actual headers defined a list of strings. If not set, all rows will be treated as containing values.) Args: filepath (str): Path to read from dict_form (bool): Return in dict form. Defaults to False. headers (Union[int, List[int], List[str], None]): Row number of headers. Defaults to None. **kwargs: Other arguments to pass to Tabulator Stream Returns: List[Union[Dict, List]]: List of rows in dict or list form """ stream = Stream(filepath, headers=headers, **kwargs) stream.open() result = stream.read(keyed=dict_form) stream.close() return result
https://github.com/OCHA-DAP/hdx-python-utilities/blob/9c89e0aa5afac2c002b02a2d8f0e5b91eeb3d2a3/src/hdx/utilities/dictandlist.py#L339-L360
how to read .csv file in an efficient way?
python
def print_csv(headers, data_list, parse_row_fn): """Takes headers, data, and a row parsing function, and prints data to the console in a csv format. """ try: writer = csv.writer(sys.stdout) writer.writerow(headers) for data in data_list: writer.writerow(parse_row_fn(data)) except csv.Error as e: raise CliException('Error writing CSV: {}'.format(e))
https://github.com/hyperledger/sawtooth-core/blob/8cf473bc2207e51f02bd182d825158a57d72b098/cli/sawtooth_cli/format_utils.py#L91-L101
how to read .csv file in an efficient way?
python
def format_to_csv(filename, skiprows=0, delimiter=""): """Convert a file to a .csv file""" if not delimiter: delimiter = "\t" input_file = open(filename, "r") if skiprows: [input_file.readline() for _ in range(skiprows)] new_filename = os.path.splitext(filename)[0] + ".csv" output_file = open(new_filename, "w") header = input_file.readline().split() reader = csv.DictReader(input_file, fieldnames=header, delimiter=delimiter) writer = csv.DictWriter(output_file, fieldnames=header, delimiter=",") # Write header writer.writerow(dict((x, x) for x in header)) # Write rows for line in reader: if None in line: del line[None] writer.writerow(line) input_file.close() output_file.close() print "Saved %s." % new_filename
https://github.com/TaurusOlson/incisive/blob/25bb9f53495985c1416c82e26f54158df4050cb0/incisive/core.py#L155-L182
how to read .csv file in an efficient way?
python
def read_file(self): '''if this is stored in a file, read it into self.column''' column_selector = r'(.*)\[(\d+)\]$' if self.column_file: column = None m = re.match(column_selector,self.column_file) file = self.column_file if m: file = m.group(1) column = int(m.group(2)) with open(file) as f: lines = f.read().split('\n') if column!=None: lines = [x.split()[column] for x in lines] self.column = [nl.numberize(x) for x in lines] self.column_file = None if self.times_file: with open(self.times_file) as f: self.times = [[nl.numberize(x) for x in y.split()] for y in f.read().split('\n')] self.times_file = None
https://github.com/azraq27/neural/blob/fe91bfeecbf73ad99708cf5dca66cb61fcd529f5/neural/decon.py#L312-L331
how to read .csv file in an efficient way?
python
def _read(**kwargs): """Read csv file from local disk. Args: filepath_or_buffer: The filepath of the csv file. We only support local files for now. kwargs: Keyword arguments in pandas.read_csv """ pd_obj = BaseFactory.read_csv(**kwargs) # This happens when `read_csv` returns a TextFileReader object for iterating through if isinstance(pd_obj, pandas.io.parsers.TextFileReader): reader = pd_obj.read pd_obj.read = lambda *args, **kwargs: DataFrame( query_compiler=reader(*args, **kwargs) ) return pd_obj return DataFrame(query_compiler=pd_obj)
https://github.com/modin-project/modin/blob/5b77d242596560c646b8405340c9ce64acb183cb/modin/pandas/io.py#L104-L120
how to read .csv file in an efficient way?
python
def get_csv(filename): """ Return a csv representation of the exif get a filename and returns a unicode string with a CSV format Arguments: filename {string} -- your filename Returns: [unicode] -- unicode string """ check_if_this_file_exist(filename) #Process this function filename = os.path.abspath(filename) s = command_line(['exiftool', '-G', '-csv', '-sort', filename]) if s: #convert bytes to string s = s.decode('utf-8') return s else: return 0
https://github.com/guinslym/pyexifinfo/blob/56e5b44e77ee17b018a530ec858f19a9c6c07018/pyexifinfo/pyexifinfo.py#L123-L144
how to read .csv file in an efficient way?
python
def from_csv(cls, path, sep=',', parse_dates=True, header=None, index_col=0, encoding=None, infer_datetime_format=False): """ Read CSV file. .. deprecated:: 0.21.0 Use :func:`pandas.read_csv` instead. It is preferable to use the more powerful :func:`pandas.read_csv` for most general purposes, but ``from_csv`` makes for an easy roundtrip to and from a file (the exact counterpart of ``to_csv``), especially with a time Series. This method only differs from :func:`pandas.read_csv` in some defaults: - `index_col` is ``0`` instead of ``None`` (take first column as index by default) - `header` is ``None`` instead of ``0`` (the first row is not used as the column names) - `parse_dates` is ``True`` instead of ``False`` (try parsing the index as datetime by default) With :func:`pandas.read_csv`, the option ``squeeze=True`` can be used to return a Series like ``from_csv``. Parameters ---------- path : str, file path, or file handle / StringIO sep : str, default ',' Field delimiter. parse_dates : bool, default True Parse dates. Different default from read_table. header : int, default None Row to use as header (skip prior rows). index_col : int or sequence, default 0 Column to use for index. If a sequence is given, a MultiIndex is used. Different default from read_table. encoding : str, optional A string representing the encoding to use if the contents are non-ascii, for python versions prior to 3. infer_datetime_format : bool, default False If True and `parse_dates` is True for a column, try to infer the datetime format based on the first datetime string. If the format can be inferred, there often will be a large parsing speed-up. Returns ------- Series See Also -------- read_csv """ # We're calling `DataFrame.from_csv` in the implementation, # which will propagate a warning regarding `from_csv` deprecation. from pandas.core.frame import DataFrame df = DataFrame.from_csv(path, header=header, index_col=index_col, sep=sep, parse_dates=parse_dates, encoding=encoding, infer_datetime_format=infer_datetime_format) result = df.iloc[:, 0] if header is None: result.index.name = result.name = None return result
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/series.py#L4171-L4236
how to read .csv file in an efficient way?
python
def from_csv(cls, path, header=0, sep=',', index_col=0, parse_dates=True, encoding=None, tupleize_cols=None, infer_datetime_format=False): """ Read CSV file. .. deprecated:: 0.21.0 Use :func:`read_csv` instead. It is preferable to use the more powerful :func:`read_csv` for most general purposes, but ``from_csv`` makes for an easy roundtrip to and from a file (the exact counterpart of ``to_csv``), especially with a DataFrame of time series data. This method only differs from the preferred :func:`read_csv` in some defaults: - `index_col` is ``0`` instead of ``None`` (take first column as index by default) - `parse_dates` is ``True`` instead of ``False`` (try parsing the index as datetime by default) So a ``pd.DataFrame.from_csv(path)`` can be replaced by ``pd.read_csv(path, index_col=0, parse_dates=True)``. Parameters ---------- path : string file path or file handle / StringIO header : int, default 0 Row to use as header (skip prior rows) sep : string, default ',' Field delimiter index_col : int or sequence, default 0 Column to use for index. If a sequence is given, a MultiIndex is used. Different default from read_table parse_dates : boolean, default True Parse dates. Different default from read_table tupleize_cols : boolean, default False write multi_index columns as a list of tuples (if True) or new (expanded format) if False) infer_datetime_format : boolean, default False If True and `parse_dates` is True for a column, try to infer the datetime format based on the first datetime string. If the format can be inferred, there often will be a large parsing speed-up. Returns ------- DataFrame See Also -------- read_csv """ warnings.warn("from_csv is deprecated. Please use read_csv(...) " "instead. Note that some of the default arguments are " "different, so please refer to the documentation " "for from_csv when changing your function calls", FutureWarning, stacklevel=2) from pandas.io.parsers import read_csv return read_csv(path, header=header, sep=sep, parse_dates=parse_dates, index_col=index_col, encoding=encoding, tupleize_cols=tupleize_cols, infer_datetime_format=infer_datetime_format)
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/frame.py#L1813-L1877
how to read .csv file in an efficient way?
python
def write_csv(data, file_name, encoding='utf-8'): ''' Writes out to csv format. Args: data: 2D list of tables/worksheets. file_name: Name of the output file. ''' name_extension = len(data) > 1 root, ext = os.path.splitext(file_name) for i, sheet in enumerate(data): fname = file_name if not name_extension else root+"_"+str(i)+ext with open(fname, 'wb') as date_file: csv_file = csv.writer(date_file, encoding=encoding) for line in sheet: csv_file.writerow(line)
https://github.com/OpenGov/python_data_wrap/blob/7de38bb30d7a500adc336a4a7999528d753e5600/datawrap/tableloader.py#L363-L379
how to read .csv file in an efficient way?
python
def csv(file, *args, **kwargs): ''' Write CSV file. Parameters ---------- file : Path *args csv.DictWriter args (except the f arg) **kwargs csv.DictWriter args Examples -------- with write.csv(file) as writer: writer.writerow((1,2,3)) ''' with file.open('w', newline='') as f: yield DictWriter(f, *args, **kwargs)
https://github.com/timdiels/pytil/blob/086a3f8d52caecdd9d1c9f66c8d8a6d38667b00b/pytil/write.py#L29-L47
how to read .csv file in an efficient way?
python
def iter_csv_stream(input_stream, fieldnames=None, sniff=False, *args, **kwargs): ''' Read CSV content as a table (list of lists) from an input stream ''' if 'dialect' not in kwargs and sniff: kwargs['dialect'] = csv.Sniffer().sniff(input_stream.read(1024)) input_stream.seek(0) if 'quoting' in kwargs and kwargs['quoting'] is None: kwargs['quoting'] = csv.QUOTE_MINIMAL if fieldnames: # read csv using dictreader if isinstance(fieldnames, bool): reader = csv.DictReader(input_stream, *args, **kwargs) else: reader = csv.DictReader(input_stream, *args, fieldnames=fieldnames, **kwargs) for row in reader: yield row else: csvreader = csv.reader(input_stream, *args, **kwargs) for row in csvreader: yield row
https://github.com/letuananh/chirptext/blob/ce60b47257b272a587c8703ea1f86cd1a45553a7/chirptext/chio.py#L124-L142
how to read .csv file in an efficient way?
python
def _read_csv(reader, header, encode_fn): """ Given a constructed CSV reader object, a header row that we've read, and a detected encoding, yield its rows as dictionaries. """ for row in reader: if len(row) == 0: continue row = [encode_fn(cell) for cell in row] row_list = zip(header, row) row_dict = dict(row_list) if len(row_dict['text']) == 0: continue row_dict['text'] = unicodedata.normalize( 'NFKC', row_dict['text'].strip() ) if row_dict.get('title') == '': del row_dict['title'] if 'date' in row_dict: # We handle dates further in open_json_or_csv_somehow if row_dict['date'] == '': del row_dict['date'] if 'subset' in row_dict: subsets = [cell[1] for cell in row_list if cell[1] != '' and cell[0] == 'subset'] if subsets: row_dict['subsets'] = subsets if 'subset' in row_dict: del row_dict['subset'] yield row_dict
https://github.com/LuminosoInsight/luminoso-api-client-python/blob/3bedf2a454aee39214c11fbf556ead3eecc27881/luminoso_api/v4_json_stream.py#L257-L286
how to read .csv file in an efficient way?
python
def _read_generic_csv(file): """ Read and parse generic CSV file. Notes ----- Assumes columns are 'rsid', 'chrom' / 'chromosome', 'pos' / 'position', and 'genotype'; values are comma separated; unreported genotypes are indicated by '--'; and one header row precedes data. For example: rsid,chromosome,position,genotype rs1,1,1,AA rs2,1,2,CC rs3,1,3,-- Parameters ---------- file : str path to file Returns ------- pandas.DataFrame individual's genetic data normalized for use with `lineage` str name of data source """ df = pd.read_csv( file, skiprows=1, na_values="--", names=["rsid", "chrom", "pos", "genotype"], index_col=0, dtype={"chrom": object, "pos": np.int64}, ) return sort_snps(df), "generic"
https://github.com/apriha/lineage/blob/13106a62a959a80ac26c68d1566422de08aa877b/src/lineage/snps.py#L390-L425
how to read .csv file in an efficient way?
python
def _write_csv(self, datasets, filename): """ Write CSV :param datasets: Datasets :param filename: File Name """ with open('/'.join([self.output, filename]), mode='w', encoding=self.encoding) as write_file: writer = csv.writer(write_file, delimiter=',') for i, row in enumerate(datasets): if i == 0: # header writer.writerow(list(row.keys())) writer.writerow(list(row.values()))
https://github.com/Shinichi-Nakagawa/pitchpx/blob/5747402a0b3416f5e910b479e100df858f0b6440/pitchpx/mlbam.py#L132-L144
how to read .csv file in an efficient way?
python
def _readFile(self, sldir, fileName, sep): ''' Private method that reads in the header and column data. ''' if sldir.endswith(os.sep): fileName = str(sldir)+str(fileName) else: fileName = str(sldir)+os.sep+str(fileName) fileLines=[] #list of lines in the file header=[] #list of Header lines dataCols=[] #Dictionary of data column names data=[] #List of Data lists cols=[] #List of column names f=open(fileName,'r') fileLines=f.readlines() i=0 if self.datatype != 'trajectory': while i<len(fileLines): if fileLines[i].startswith(self.header_char): tmp=fileLines[i].lstrip(self.header_char) header.append(tmp.strip()) else: break i+=1 cols=fileLines[i].split(sep) tmp=[] tmp1=[] for j in range(len(cols)): tmp1=cols[j].strip() if tmp1 !='': tmp.append(tmp1) cols=tmp i+=1 else: header={} while fileLines[i].startswith('#') or '=' in fileLines[i]: if fileLines[i].startswith('#') and cols==[]: cols=fileLines[i].strip('#') cols=cols.strip() cols=cols.split() elif fileLines[i].startswith('#'): tmp1=fileLines[i].strip('#') tmp1=tmp1.strip() self.headerLines.append(tmp1) elif not fileLines[i].startswith('#'): tmp=fileLines[i].split('=') tmp[0]=tmp[0].strip() tmp[1]=tmp[1].strip() if header=={}: header={str(tmp[0]):str(tmp[1])} else: header[str(tmp[0])]=str(tmp[1]) i+=1 while i<len(fileLines): if fileLines[i].startswith('#'): i=i+1 else: tmp=fileLines[i].split() for j in range(len(tmp)): tmp[j]=tmp[j].strip() data.append(tmp) i+=1 tmp=[] tmp1=[] for j in range(len(data)): for k in range(len(data[j])): tmp1=data[j][k].strip() if tmp1 !='': tmp.append(tmp1) data[j]=tmp tmp=[] tmp=[] for j in range(len(cols)): for k in range(len(data)): try: a=float(data[k][j]) tmp.append(a) except ValueError: tmp.append(data[k][j]) #else: # tmp.append(float(data[k][j])) # previously tmp.append(float(data[k][j])) tmp=array(tmp) if j == 0: dataCols={cols[j]:tmp} else: dataCols[cols[j]]=tmp tmp=[] return header,dataCols
https://github.com/NuGrid/NuGridPy/blob/eee8047446e398be77362d82c1d8b3310054fab0/nugridpy/ascii_table.py#L248-L346
how to read .csv file in an efficient way?
python
def getCSVReader(data, reader_type=csv.DictReader): """Take a Rave CSV output ending with a line with just EOF on it and return a DictReader""" f = StringIO(data[:-4]) # Remove \nEOF return reader_type(f)
https://github.com/mdsol/rwslib/blob/1a86bc072d408c009ed1de8bf6e98a1769f54d18/rwslib/extras/local_cv.py#L51-L54
how to read .csv file in an efficient way?
python
def parse_csv_file(url, res): """Parse given URL and write res with {scheme -> description}""" response = requests.get(url, stream=True) reader = csv.reader(response.iter_lines()) first_row = True for row in reader: if first_row: # skip first row first_row = False else: scheme, template, description, reference = row res[scheme] = description
https://github.com/wummel/linkchecker/blob/c2ce810c3fb00b895a841a7be6b2e78c64e7b042/scripts/update_iana_uri_schemes.py#L91-L102
how to read .csv file in an efficient way?
python
def write_csv_files(args, infilenames, outfilename): """Write csv file(s) to disk. Keyword arguments: args -- program arguments (dict) infilenames -- names of user-inputted and/or downloaded files (list) outfilename -- name of output text file (str) """ def csv_convert(line): """Strip punctuation and insert commas""" clean_line = [] for word in line.split(' '): clean_line.append(word.strip(string.punctuation)) return ', '.join(clean_line) if not outfilename.endswith('.csv'): outfilename = outfilename + '.csv' outfilename = overwrite_file_check(args, outfilename) all_text = [] # Text must be aggregated if writing to a single output file for i, infilename in enumerate(infilenames): parsed_text = get_parsed_text(args, infilename) if parsed_text: if args['multiple']: if not args['quiet']: print('Attempting to write to {0}.'.format(outfilename)) csv_text = [csv_convert(x) for x in parsed_text] print(csv_text) write_file(csv_text, outfilename) elif args['single']: all_text += parsed_text # Newline added between multiple files being aggregated if len(infilenames) > 1 and i < len(infilenames) - 1: all_text.append('\n') # Write all text to a single output file if args['single'] and all_text: if not args['quiet']: print('Attempting to write {0} page(s) to {1}.' .format(len(infilenames), outfilename)) csv_text = [csv_convert(x) for x in all_text] print(csv_text) write_file(csv_text, outfilename)
https://github.com/huntrar/scrape/blob/bf877f6da5df3ed0f2bea60a95acf7df63c88002/scrape/utils.py#L578-L622
how to read .csv file in an efficient way?
python
def prepare_csv_read(data, field_names, *args, **kwargs): """Prepare various input types for CSV parsing. Args: data (iter): Data to read field_names (tuple of str): Ordered names to assign to fields Returns: csv.DictReader: CSV reader suitable for parsing Raises: TypeError: Invalid value for data """ if hasattr(data, 'readlines') or isinstance(data, list): pass elif isinstance(data, basestring): data = open(data) else: raise TypeError('Unable to handle data of type %r' % type(data)) return csv.DictReader(data, field_names, *args, **kwargs)
https://github.com/JNRowe/upoints/blob/1e4b7a53ed2a06cd854523d54c36aabdccea3830/upoints/utils.py#L184-L203
how to read .csv file in an efficient way?
python
def read_csv(text, sep="\t"): """Create a DataFrame from CSV text""" import pandas as pd # no top level load to make a faster import of db return pd.read_csv(StringIO(text), sep="\t")
https://github.com/tamasgal/km3pipe/blob/7a9b59ac899a28775b5bdc5d391d9a5340d08040/km3pipe/db.py#L73-L76
how to read .csv file in an efficient way?
python
def _read_csv_impl(cls, url, delimiter=',', header=True, error_bad_lines=False, comment_char='', escape_char='\\', double_quote=True, quote_char='\"', skip_initial_space=True, column_type_hints=None, na_values=["NA"], line_terminator="\n", usecols=[], nrows=None, skiprows=0, verbose=True, store_errors=True, nrows_to_infer=100, true_values=[], false_values=[], _only_raw_string_substitutions=False, **kwargs): """ Constructs an SFrame from a CSV file or a path to multiple CSVs, and returns a pair containing the SFrame and optionally (if store_errors=True) a dict of filenames to SArrays indicating for each file, what are the incorrectly parsed lines encountered. Parameters ---------- store_errors : bool If true, the output errors dict will be filled. See `read_csv` for the rest of the parameters. """ # Pandas argument compatibility if "sep" in kwargs: delimiter = kwargs['sep'] del kwargs['sep'] if "quotechar" in kwargs: quote_char = kwargs['quotechar'] del kwargs['quotechar'] if "doublequote" in kwargs: double_quote = kwargs['doublequote'] del kwargs['doublequote'] if "comment" in kwargs: comment_char = kwargs['comment'] del kwargs['comment'] if comment_char is None: comment_char = '' if "lineterminator" in kwargs: line_terminator = kwargs['lineterminator'] del kwargs['lineterminator'] if len(kwargs) > 0: raise TypeError("Unexpected keyword arguments " + str(kwargs.keys())) parsing_config = dict() parsing_config["delimiter"] = delimiter parsing_config["use_header"] = header parsing_config["continue_on_failure"] = not error_bad_lines parsing_config["comment_char"] = comment_char parsing_config["escape_char"] = '\0' if escape_char is None else escape_char parsing_config["use_escape_char"] = escape_char is None parsing_config["double_quote"] = double_quote parsing_config["quote_char"] = quote_char parsing_config["skip_initial_space"] = skip_initial_space parsing_config["store_errors"] = store_errors parsing_config["line_terminator"] = line_terminator parsing_config["output_columns"] = usecols parsing_config["skip_rows"] =skiprows parsing_config["true_values"] = true_values parsing_config["false_values"] = false_values parsing_config["only_raw_string_substitutions"] = _only_raw_string_substitutions if type(na_values) is str: na_values = [na_values] if na_values is not None and len(na_values) > 0: parsing_config["na_values"] = na_values if nrows is not None: parsing_config["row_limit"] = nrows proxy = UnitySFrameProxy() internal_url = _make_internal_url(url) # Attempt to automatically detect the column types. Either produce a # list of types; otherwise default to all str types. column_type_inference_was_used = False if column_type_hints is None: try: # Get the first nrows_to_infer rows (using all the desired arguments). first_rows = SFrame.read_csv(url, nrows=nrows_to_infer, column_type_hints=type(None), header=header, delimiter=delimiter, comment_char=comment_char, escape_char=escape_char, double_quote=double_quote, quote_char=quote_char, skip_initial_space=skip_initial_space, na_values=na_values, line_terminator=line_terminator, usecols=usecols, skiprows=skiprows, verbose=verbose, true_values=true_values, false_values=false_values, _only_raw_string_substitutions=_only_raw_string_substitutions) column_type_hints = SFrame._infer_column_types_from_lines(first_rows) typelist = '[' + ','.join(t.__name__ for t in column_type_hints) + ']' if verbose: print("------------------------------------------------------") print("Inferred types from first %d line(s) of file as " % nrows_to_infer) print("column_type_hints="+ typelist) print("If parsing fails due to incorrect types, you can correct") print("the inferred type list above and pass it to read_csv in") print( "the column_type_hints argument") print("------------------------------------------------------") column_type_inference_was_used = True except RuntimeError as e: if type(e) == RuntimeError and ("cancel" in str(e.args[0]) or "Cancel" in str(e.args[0])): raise e # If the above fails, default back to str for all columns. column_type_hints = str if verbose: print('Could not detect types. Using str for each column.') if type(column_type_hints) is type: type_hints = {'__all_columns__': column_type_hints} elif type(column_type_hints) is list: type_hints = dict(list(zip(['__X%d__' % i for i in range(len(column_type_hints))], column_type_hints))) elif type(column_type_hints) is dict: # we need to fill in a potentially incomplete dictionary try: # Get the first nrows_to_infer rows (using all the desired arguments). first_rows = SFrame.read_csv(url, nrows=nrows_to_infer, column_type_hints=type(None), header=header, delimiter=delimiter, comment_char=comment_char, escape_char=escape_char, double_quote=double_quote, quote_char=quote_char, skip_initial_space=skip_initial_space, na_values=na_values, line_terminator=line_terminator, usecols=usecols, skiprows=skiprows, verbose=verbose, true_values=true_values, false_values=false_values, _only_raw_string_substitutions=_only_raw_string_substitutions) inferred_types = SFrame._infer_column_types_from_lines(first_rows) # make a dict of column_name to type inferred_types = dict(list(zip(first_rows.column_names(), inferred_types))) # overwrite with the user's specified types for key in column_type_hints: inferred_types[key] = column_type_hints[key] column_type_hints = inferred_types except RuntimeError as e: if type(e) == RuntimeError and ("cancel" in str(e) or "Cancel" in str(e)): raise e # If the above fails, default back to str for unmatched columns if verbose: print('Could not detect types. Using str for all unspecified columns.') type_hints = column_type_hints else: raise TypeError("Invalid type for column_type_hints. Must be a dictionary, list or a single type.") try: if (not verbose): glconnect.get_server().set_log_progress(False) with cython_context(): errors = proxy.load_from_csvs(internal_url, parsing_config, type_hints) except Exception as e: if type(e) == RuntimeError and "CSV parsing cancelled" in str(e.args[0]): raise e if column_type_inference_was_used: # try again if verbose: print("Unable to parse the file with automatic type inference.") print("Defaulting to column_type_hints=str") type_hints = {'__all_columns__': str} try: with cython_context(): errors = proxy.load_from_csvs(internal_url, parsing_config, type_hints) except: glconnect.get_server().set_log_progress(True) raise else: glconnect.get_server().set_log_progress(True) raise glconnect.get_server().set_log_progress(True) return (cls(_proxy=proxy), { f: SArray(_proxy = es) for (f, es) in errors.items() })
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/data_structures/sframe.py#L866-L1063
how to read .csv file in an efficient way?
python
def read_file(filename): """Read contents of the specified file. Parameters: ----------- filename : str The name of the file to be read Returns: lines : list of str The contents of the file, split by line """ infile = open(filename, 'r') lines = infile.readlines() infile.close() return lines
https://github.com/choderalab/pymbar/blob/69d1f0ff680e9ac1c6a51a5a207ea28f3ed86740/examples/parallel-tempering-2dpmf/parallel-tempering-2dpmf.py#L58-L76
how to read .csv file in an efficient way?
python
def read_sparse(cls, file_path: str): """Read a sparse representation from a tab-delimited text file. TODO: docstring""" with open(file_path) as fh: next(fh) # skip header line genes = next(fh)[1:-1].split('\t') cells = next(fh)[1:-1].split('\t') next(fh) m, n, nnz = [int(s) for s in next(fh)[:-1].split(' ')] t = pd.read_csv(file_path, sep=' ', skiprows=5, header=None, dtype={0: np.uint32, 1: np.uint32}) i = t[0].values - 1 j = t[1].values - 1 data = t[2].values assert data.size == nnz X = sparse.coo_matrix((data, (i,j)), shape=[m, n]).todense() return cls(X=X, genes=genes, cells=cells)
https://github.com/flo-compbio/genometools/blob/dd962bb26d60a0f14ca14d8c9a4dd75768962c7d/genometools/expression/matrix.py#L515-L538
how to read .csv file in an efficient way?
python
def get_csv_reader(csvfile, dialect=csv.excel, encoding="utf-8", **kwds): """Returns csv reader.""" try: # pylint: disable=pointless-statement unicode return UnicodeReader(csvfile, dialect=dialect, encoding=encoding, **kwds) except NameError: return csv.reader(csvfile, dialect=dialect, **kwds)
https://github.com/mkoura/dump2polarion/blob/f4bd24e9d5070e282aad15f1e8bb514c0525cd37/dump2polarion/csv_unicode.py#L8-L15
how to read .csv file in an efficient way?
python
def _read_csv_with_offset_pyarrow_on_ray( fname, num_splits, start, end, kwargs, header ): # pragma: no cover """Use a Ray task to read a chunk of a CSV into a pyarrow Table. Note: Ray functions are not detected by codecov (thus pragma: no cover) Args: fname: The filename of the file to open. num_splits: The number of splits (partitions) to separate the DataFrame into. start: The start byte offset. end: The end byte offset. kwargs: The kwargs for the pyarrow `read_csv` function. header: The header of the file. Returns: A list containing the split pyarrow Tables and the the number of rows of the tables as the last element. This is used to determine the total length of the DataFrame to build a default Index. """ bio = open(fname, "rb") # The header line for the CSV file first_line = bio.readline() bio.seek(start) to_read = header + first_line + bio.read(end - start) bio.close() table = csv.read_csv( BytesIO(to_read), parse_options=csv.ParseOptions(header_rows=1) ) chunksize = get_default_chunksize(table.num_columns, num_splits) chunks = [ pa.Table.from_arrays(table.columns[chunksize * i : chunksize * (i + 1)]) for i in range(num_splits) ] return chunks + [table.num_rows]
https://github.com/modin-project/modin/blob/5b77d242596560c646b8405340c9ce64acb183cb/modin/experimental/engines/pyarrow_on_ray/io.py#L23-L54
how to read .csv file in an efficient way?
python
def to_csv(self, output_file=None, *, fields=None, fields_to_explode=None, append=False, header=True, header_prefix='', sep=',', newline='\n'): """ Parameters ---------- output_file: str or file object or None The file to which output will be written. By default, any existing content is overwritten. Use `append=True` to open the file in append mode instead. If `output_file` is None, the generated CSV output is returned as a string instead of written to a file. fields: list or dict List of field names to export, or dictionary mapping output column names to attribute names of the generators. Examples: fields=['field_name_1', 'field_name_2'] fields={'COL1': 'field_name_1', 'COL2': 'field_name_2'} fields_to_explode: list Optional list of field names where each entry (which must itself be a sequence) is to be "exploded" into separate rows. (*Note:* this is not supported yet for CSV export.) append: bool If `True`, open the file in 'append' mode to avoid overwriting existing content. Default is `False`, i.e. any existing content will be overwritten. This argument only has an effect if `output_file` is given (i.e. if output happens to a file instead of returning a CSV string). header: bool or str or None If `header=False` or `header=None` then no header line will be written. If `header` is a string then this string will be used as the header line. If `header=True` then a header line will be automatically generated from the field names of the custom generator. header_prefix: str If `header=True` then the auto-generated header line will be prefixed with `header_prefix` (otherwise this argument has no effect). For example, set `header_prefix='#'` to make the header line start with '#'. Default: '' sep: str Field separator to use in the output. Default: ',' newline: str Line terminator to use in the output. Default: '\n' Returns ------- The return value depends on the value of `output_file`. If `output_file` is given, writes the output to the file and returns `None`. If `output_file` is `None`, returns a string containing the CSV output. """ assert isinstance(append, bool) if fields is None: raise NotImplementedError("TODO: derive field names automatically from the generator which produced this item list") if fields_to_explode is not None: raise NotImplementedError("TODO: the 'fields_to_explode' argument is not supported for CSV export yet.") if isinstance(fields, (list, tuple)): fields = {name: name for name in fields} header_line = _generate_csv_header_line(header=header, header_prefix=header_prefix, header_names=fields.keys(), sep=sep, newline=newline) if output_file is None: file_or_string = io.StringIO() elif isinstance(output_file, str): mode = 'a' if append else 'w' file_or_string = open(output_file, mode) # ensure parent directory of output file exits dirname = os.path.dirname(os.path.abspath(output_file)) if not os.path.exists(dirname): logger.debug(f"Creating parent directory of output file '{output_file}'") os.makedirs(dirname) elif isinstance(output_file, io.IOBase): file_or_string = output_file else: raise TypeError(f"Invalid output file: {output_file} (type: {type(output_file)})") retval = None attr_getters = [attrgetter(attr_name) for attr_name in fields.values()] try: # TODO: quick-and-dirty solution to enable writing to gzip files; tidy this up! # (Note that for regular file output we don't want to encode each line to a bytes # object because this seems to be ca. 2x slower). if isinstance(file_or_string, gzip.GzipFile): file_or_string.write(header_line.encode()) for x in self.items: line = sep.join([format(func(x)) for func in attr_getters]) + newline file_or_string.write(line.encode()) else: file_or_string.write(header_line) for x in self.items: line = sep.join([format(func(x)) for func in attr_getters]) + newline file_or_string.write(line) if output_file is None: retval = file_or_string.getvalue() finally: file_or_string.close() return retval
https://github.com/maxalbert/tohu/blob/43380162fadec99cdd5c5c3152dd6b7d3a9d39a8/tohu/v6/item_list.py#L182-L280
how to read .csv file in an efficient way?
python
def _ReadCsvDict(self, file_name, cols, required, deprecated): """Reads lines from file_name, yielding a dict of unicode values.""" assert file_name.endswith(".txt") table_name = file_name[0:-4] contents = self._GetUtf8Contents(file_name) if not contents: return eol_checker = util.EndOfLineChecker(StringIO.StringIO(contents), file_name, self._problems) # The csv module doesn't provide a way to skip trailing space, but when I # checked 15/675 feeds had trailing space in a header row and 120 had spaces # after fields. Space after header fields can cause a serious parsing # problem, so warn. Space after body fields can cause a problem time, # integer and id fields; they will be validated at higher levels. reader = csv.reader(eol_checker, skipinitialspace=True) raw_header = next(reader) header_occurrences = util.defaultdict(lambda: 0) header = [] valid_columns = [] # Index into raw_header and raw_row for i, h in enumerate(raw_header): h_stripped = h.strip() if not h_stripped: self._problems.CsvSyntax( description="The header row should not contain any blank values. " "The corresponding column will be skipped for the " "entire file.", context=(file_name, 1, [''] * len(raw_header), raw_header), type=problems.TYPE_ERROR) continue elif h != h_stripped: self._problems.CsvSyntax( description="The header row should not contain any " "space characters.", context=(file_name, 1, [''] * len(raw_header), raw_header), type=problems.TYPE_WARNING) header.append(h_stripped) valid_columns.append(i) header_occurrences[h_stripped] += 1 for name, count in header_occurrences.items(): if count > 1: self._problems.DuplicateColumn( header=name, file_name=file_name, count=count) self._schedule._table_columns[table_name] = header # check for unrecognized columns, which are often misspellings header_context = (file_name, 1, [''] * len(header), header) valid_cols = cols + [deprecated_name for (deprecated_name, _) in deprecated] unknown_cols = set(header) - set(valid_cols) if len(unknown_cols) == len(header): self._problems.CsvSyntax( description="The header row did not contain any known column " "names. The file is most likely missing the header row " "or not in the expected CSV format.", context=(file_name, 1, [''] * len(raw_header), raw_header), type=problems.TYPE_ERROR) else: for col in unknown_cols: # this is provided in order to create a nice colored list of # columns in the validator output self._problems.UnrecognizedColumn(file_name, col, header_context) # check for missing required columns missing_cols = set(required) - set(header) for col in missing_cols: # this is provided in order to create a nice colored list of # columns in the validator output self._problems.MissingColumn(file_name, col, header_context) # check for deprecated columns for (deprecated_name, new_name) in deprecated: if deprecated_name in header: self._problems.DeprecatedColumn(file_name, deprecated_name, new_name, header_context) line_num = 1 # First line read by reader.next() above for raw_row in reader: line_num += 1 if len(raw_row) == 0: # skip extra empty lines in file continue if len(raw_row) > len(raw_header): self._problems.OtherProblem('Found too many cells (commas) in line ' '%d of file "%s". Every row in the file ' 'should have the same number of cells as ' 'the header (first line) does.' % (line_num, file_name), (file_name, line_num), type=problems.TYPE_WARNING) if len(raw_row) < len(raw_header): self._problems.OtherProblem('Found missing cells (commas) in line ' '%d of file "%s". Every row in the file ' 'should have the same number of cells as ' 'the header (first line) does.' % (line_num, file_name), (file_name, line_num), type=problems.TYPE_WARNING) # raw_row is a list of raw bytes which should be valid utf-8. Convert each # valid_columns of raw_row into Unicode. valid_values = [] unicode_error_columns = [] # index of valid_values elements with an error for i in valid_columns: try: valid_values.append(raw_row[i].decode('utf-8')) except UnicodeDecodeError: # Replace all invalid characters with REPLACEMENT CHARACTER (U+FFFD) valid_values.append(codecs.getdecoder("utf8") (raw_row[i], errors="replace")[0]) unicode_error_columns.append(len(valid_values) - 1) except IndexError: break # The error report may contain a dump of all values in valid_values so # problems can not be reported until after converting all of raw_row to # Unicode. for i in unicode_error_columns: self._problems.InvalidValue(header[i], valid_values[i], 'Unicode error', (file_name, line_num, valid_values, header)) # We strip ALL whitespace from around values. This matches the behavior # of both the Google and OneBusAway GTFS parser. valid_values = [value.strip() for value in valid_values] d = dict(zip(header, valid_values)) yield (d, line_num, header, valid_values)
https://github.com/google/transitfeed/blob/eb2991a3747ba541b2cb66502b305b6304a1f85f/transitfeed/loader.py#L147-L280
how to read .csv file in an efficient way?
python
def read_file(self, filename): """ Read a text file and provide feedback to the user. :param filename: The pathname of the file to read (a string). :returns: The contents of the file (a string). """ logger.info("Reading file: %s", format_path(filename)) contents = self.context.read_file(filename) num_lines = len(contents.splitlines()) logger.debug("Read %s from %s.", pluralize(num_lines, 'line'), format_path(filename)) return contents.rstrip()
https://github.com/xolox/python-update-dotdee/blob/04d5836f0d217e32778745b533beeb8159d80c32/update_dotdee/__init__.py#L184-L197
how to read .csv file in an efficient way?
python
def read_file(filename): """Read a file.""" logging.debug(_('Reading file: %s'), filename) try: with open(filename) as readable: return readable.read() except OSError: logging.error(_('Error reading file: %s'), filename) return ''
https://github.com/NoviceLive/intellicoder/blob/6cac5ebfce65c370dbebe47756a1789b120ef982/intellicoder/utils.py#L65-L73
how to read .csv file in an efficient way?
python
def load_csv_file(csv_file): """ load csv file and check file content format Args: csv_file (str): csv file path, csv file content is like below: Returns: list: list of parameters, each parameter is in dict format Examples: >>> cat csv_file username,password test1,111111 test2,222222 test3,333333 >>> load_csv_file(csv_file) [ {'username': 'test1', 'password': '111111'}, {'username': 'test2', 'password': '222222'}, {'username': 'test3', 'password': '333333'} ] """ if not os.path.isabs(csv_file): project_working_directory = tests_def_mapping["PWD"] or os.getcwd() # make compatible with Windows/Linux csv_file = os.path.join(project_working_directory, *csv_file.split("/")) if not os.path.isfile(csv_file): # file path not exist raise exceptions.CSVNotFound(csv_file) csv_content_list = [] with io.open(csv_file, encoding='utf-8') as csvfile: reader = csv.DictReader(csvfile) for row in reader: csv_content_list.append(row) return csv_content_list
https://github.com/HttpRunner/HttpRunner/blob/f259551bf9c8ba905eae5c1afcf2efea20ae0871/httprunner/loader.py#L58-L98
how to read .csv file in an efficient way?
python
def _readFile(self, fname, sldir): ''' Private method that reads in the data file and organizes it within this object. ''' if sldir.endswith('/'): fname = str(sldir)+str(fname) else: fname = str(sldir)+'/'+str(fname) f=open(fname,'r') # read header line line=f.readline() cols = [] ispec = 0 for i in range(1,len(line.split('|'))): col = line.split('|')[i].strip() if '-' in col: ispec += 1 col = col.split('-')[1] cols.append(col) col_num={} col_tot = len(cols) print('number of species: ', str(ispec)) print('number of cols: ', str(col_tot)) col_num={} for a,b in zip(cols,list(range(col_tot))): col_num[a]=b # read remainder of the file lines=f.readlines() data=[] for i in range(len(lines)): v=lines[i].split() vv=array(v,dtype='float') data.append(vv) ilines=i print("There are "+str(ilines)+" time steps found.") return data,col_num,cols,col_tot,ilines
https://github.com/NuGrid/NuGridPy/blob/eee8047446e398be77362d82c1d8b3310054fab0/nugridpy/ppn.py#L150-L192
how to read .csv file in an efficient way?
python
def is_csv_file(inputfile): """ Return whether the provided file is a CSV file or not. This checks if the first row of the file can be splitted by ',' and if the resulting line contains more than 4 columns (Markers, linkage group, chromosome, trait). """ try: stream = open(inputfile) row = stream.readline() except (IOError, UnicodeDecodeError): # pragma: no cover return False finally: stream.close() content = row.strip().split(',') return inputfile.endswith('.csv') and len(content) >= 4
https://github.com/PBR/MQ2/blob/6d84dea47e6751333004743f588f03158e35c28d/MQ2/plugins/csv_plugin.py#L37-L52
how to read .csv file in an efficient way?
python
def _load_csv_file(csv_file): """ load csv file and check file content format @param csv_file: csv file path e.g. csv file content: username,password test1,111111 test2,222222 test3,333333 @return list of parameter, each parameter is in dict format e.g. [ {'username': 'test1', 'password': '111111'}, {'username': 'test2', 'password': '222222'}, {'username': 'test3', 'password': '333333'} ] """ csv_content_list = [] with io.open(csv_file, encoding='utf-8') as csvfile: reader = csv.DictReader(csvfile) for row in reader: csv_content_list.append(row) return csv_content_list
https://github.com/RockFeng0/rtsf/blob/fbc0d57edaeca86418af3942472fcc6d3e9ce591/rtsf/p_common.py#L259-L284
how to read .csv file in an efficient way?
python
def read_csv_with_different_type(csv_name, column_types_dict, usecols=None): """Returns a DataFrame from a .csv file stored in /data/raw/. Reads the CSV as string. """ csv_path = os.path.join(DATA_FOLDER, csv_name) csv = pd.read_csv( csv_path, usecols=usecols, encoding="utf-8", dtype=column_types_dict, engine="python", ) for key_column, val_type in column_types_dict.items(): if val_type == str: csv[key_column] = csv[key_column].str.strip() return csv
https://github.com/lappis-unb/salic-ml/blob/1b3ebc4f8067740999897ccffd9892dc94482a93/src/salicml/utils/read_csv.py#L18-L34
how to read .csv file in an efficient way?
python
def read(self, filename): """ Reads the file specified and tokenizes the data for parsing. """ try: with open(filename, 'r') as _file: self._filename = filename self.readstream(_file) return True except IOError: self._filename = None return False
https://github.com/xtrementl/focus/blob/cbbbc0b49a7409f9e0dc899de5b7e057f50838e4/focus/parser/lexer.py#L199-L211
how to read .csv file in an efficient way?
python
def savecsv(self, filename): '''save waypoints to a file in human-readable CSV file''' f = open(filename, mode='w') headers = ["Seq", "Frame", "Cmd", "P1", "P2", "P3", "P4", "X", "Y", "Z"] print(self.csv_line(headers)) f.write(self.csv_line(headers) + "\n") for w in self.wploader.wpoints: if getattr(w, 'comment', None): # f.write("# %s\n" % w.comment) pass out_list = [ w.seq, self.pretty_enum_value('MAV_FRAME', w.frame), self.pretty_enum_value('MAV_CMD', w.command), self.pretty_parameter_value(w.param1), self.pretty_parameter_value(w.param2), self.pretty_parameter_value(w.param3), self.pretty_parameter_value(w.param4), self.pretty_parameter_value(w.x), self.pretty_parameter_value(w.y), self.pretty_parameter_value(w.z), ] print(self.csv_line(out_list)) f.write(self.csv_line(out_list) + "\n") f.close()
https://github.com/ArduPilot/MAVProxy/blob/f50bdeff33064876f7dc8dc4683d278ff47f75d5/MAVProxy/modules/mavproxy_wp.py#L787-L810
how to read .csv file in an efficient way?
python
def read(self, source, compressed=False, encoding='UTF-8'): """ Iterates over a file in s3 split on newline. Yields a line in file. """ buf = '' for block in self.cat(source, compressed=compressed, encoding=encoding): buf += block if '\n' in buf: ret, buf = buf.rsplit('\n', 1) for line in ret.split('\n'): yield line lines = buf.split('\n') for line in lines[:-1]: yield line # only yield the last line if the line has content in it if lines[-1]: yield lines[-1]
https://github.com/nteract/papermill/blob/7423a303f3fa22ec6d03edf5fd9700d659b5a6fa/papermill/s3.py#L421-L442
how to read .csv file in an efficient way?
python
def read_file(self, infile): """Read a reST file into a string. """ try: with open(infile, 'rt') as file: return file.read() except UnicodeDecodeError as e: err_exit('Error reading %s: %s' % (infile, e)) except (IOError, OSError) as e: err_exit('Error reading %s: %s' % (infile, e.strerror or e))
https://github.com/Jarn/jarn.viewdoc/blob/59ae82fd1658889c41096c1d8c08dcb1047dc349/jarn/viewdoc/viewdoc.py#L294-L303
how to read .csv file in an efficient way?
python
def loadcsv(filename): """Load data from CSV file. Returns a single dict with column names as keys. """ dataframe = _pd.read_csv(filename) data = {} for key, value in dataframe.items(): data[key] = value.values return data
https://github.com/petebachant/PXL/blob/d7d06cb74422e1ac0154741351fbecea080cfcc0/pxl/io.py#L58-L67
how to read .csv file in an efficient way?
python
def readFile( self, fileName, buffer, numberOfBytesToRead, numberOfBytesRead, offset, dokanFileInfo, ): """Read a file. :param fileName: name of file to read :type fileName: ctypes.c_wchar_p :param buffer: buffer for content read :type buffer: ctypes.c_void_p :param numberOfBytesToRead: number of bytes to read :type numberOfBytesToRead: ctypes.c_ulong :param numberOfBytesRead: number of bytes read :type numberOfBytesRead: ctypes.POINTER(ctypes.c_ulong) :param offset: byte offset :type offset: ctypes.c_longlong :param dokanFileInfo: used by Dokan :type dokanFileInfo: PDOKAN_FILE_INFO :return: error code :rtype: ctypes.c_int """ try: ret = self.operations('readFile', fileName, numberOfBytesToRead, offset) data = ctypes.create_string_buffer( ret[:numberOfBytesToRead], numberOfBytesToRead ) ctypes.memmove(buffer, data, numberOfBytesToRead) sizeRead = ctypes.c_ulong(len(ret)) ctypes.memmove( numberOfBytesRead, ctypes.byref(sizeRead), ctypes.sizeof(ctypes.c_ulong) ) return d1_onedrive.impl.drivers.dokan.const.DOKAN_SUCCESS except Exception: # logging.error('%s', e) return d1_onedrive.impl.drivers.dokan.const.DOKAN_ERROR
https://github.com/DataONEorg/d1_python/blob/3ac4d4f3ca052d3e8641a6a329cab526c8ddcb0d/client_onedrive/src/d1_onedrive/impl/drivers/dokan/dokan.py#L222-L262
how to read .csv file in an efficient way?
python
def write_csv(path, data): """This function writes comma-separated <data> to <path>. Parameter <path> is either a pathname or a file-like object that supports the |write()| method.""" fd = _try_open_file(path, 'w', 'The first argument must be a pathname or an object that supports write() method') for v in data: fd.write(",".join([str(x) for x in v])) fd.write("\n") _try_close_file(fd, path)
https://github.com/ska-sa/purr/blob/4c848768d0485d0f88b30850d0d5372221b21b66/Purr/Plugins/local_pychart/chart_data.py#L258-L268
how to read .csv file in an efficient way?
python
def read_csv_as_integer(csv_name, integer_columns, usecols=None): """Returns a DataFrame from a .csv file stored in /data/raw/. Converts columns specified by 'integer_columns' to integer. """ csv_path = os.path.join(DATA_FOLDER, csv_name) csv = pd.read_csv(csv_path, low_memory=False, usecols=usecols) for column in integer_columns: csv = csv[pd.to_numeric(csv[column], errors="coerce").notnull()] csv[integer_columns] = csv[integer_columns].apply(pd.to_numeric) return csv
https://github.com/lappis-unb/salic-ml/blob/1b3ebc4f8067740999897ccffd9892dc94482a93/src/salicml/utils/read_csv.py#L37-L46
how to read .csv file in an efficient way?
python
def _get_data_from_csv_files(self): """Get data from input csv files.""" all_df = [] for file_name in self._input_csv_files: with _util.open_local_or_gcs(file_name, mode='r') as f: all_df.append(pd.read_csv(f, names=self._headers)) df = pd.concat(all_df, ignore_index=True) return df
https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/google/datalab/ml/_metrics.py#L106-L114
how to read .csv file in an efficient way?
python
def to_csv(self, filename=None, *, fields=None, append=False, header=True, header_prefix='', sep=',', newline='\n'): """ Parameters ---------- filename: str or None The file to which output will be written. By default, any existing content is overwritten. Use `append=True` to open the file in append mode instead. If filename is None, the generated CSV output is returned instead of written to a file. fields: list or dict List of field names to export, or dictionary mapping output column names to attribute names of the generators. Examples: fields=['field_name_1', 'field_name_2'] fields={'COL1': 'field_name_1', 'COL2': 'field_name_2'} append: bool If `True`, open the file in 'append' mode to avoid overwriting existing content. Default is `False`, i.e. any existing content will be overwritten. This argument only has an effect if `filename` is given (i.e. if output happens to a file instead of returning a CSV string). header: bool or str or None If `header=False` or `header=None` then no header line will be written. If `header` is a string then this string will be used as the header line. If `header=True` then a header line will be automatically generated from the field names of the custom generator. header_prefix: str If `header=True` then the auto-generated header line will be prefixed with `header_prefix` (otherwise this argument has no effect). For example, set `header_prefix='#'` to make the header line start with '#'. Default: '' sep: str Field separator to use in the output. Default: ',' newline: str Line terminator to use in the output. Default: '\n' Returns ------- The return value depends on the value of `filename`. If `filename` is given, writes the output to the file and returns `None`. If `filename` is `None`, returns a string containing the CSV output. """ assert isinstance(append, bool) if fields is None: raise NotImplementedError("TODO: derive field names automatically from the generator which produced this item list") if isinstance(fields, (list, tuple)): fields = {name: name for name in fields} header_line = _generate_csv_header_line(header=header, header_prefix=header_prefix, header_names=fields.keys(), sep=sep, newline=newline) if filename is not None: # ensure parent directory of output file exits dirname = os.path.dirname(os.path.abspath(filename)) if not os.path.exists(dirname): os.makedirs(dirname) file_or_string = open(filename, 'a' if append else 'w') if (filename is not None) else io.StringIO() retval = None attr_getters = [attrgetter(attr_name) for attr_name in fields.values()] try: file_or_string.write(header_line) for x in self.items: line = sep.join([format(func(x)) for func in attr_getters]) + newline file_or_string.write(line) if filename is None: retval = file_or_string.getvalue() finally: file_or_string.close() return retval
https://github.com/maxalbert/tohu/blob/43380162fadec99cdd5c5c3152dd6b7d3a9d39a8/tohu/v4/item_list.py#L132-L206
how to read .csv file in an efficient way?
python
def read_file(filename): """ Reads the lines of a file into a list, and returns the list :param filename: String - path and name of the file :return: List - lines within the file """ lines = [] with open(filename) as f: for line in f: if len(line.strip()) != 0: lines.append(line.strip()) return lines
https://github.com/NetworkEng/fping.py/blob/991507889561aa6eb9ee2ad821adf460883a9c5d/fping/fping.py#L214-L225
how to read .csv file in an efficient way?
python
def write_csv_to_file(d): """ Writes columns of data to a target CSV file. :param dict d: A dictionary containing one list for every data column. Keys: int, Values: list :return None: """ logger_csvs.info("enter write_csv_to_file") try: for filename, data in d.items(): try: l_columns = _reorder_csv(data, filename) rows = zip(*l_columns) with open(filename, 'w+') as f: w = csv.writer(f) for row in rows: row2 = decimal_precision(row) w.writerow(row2) except TypeError as e: print("Error: Unable to write values to CSV file, {}:\n" "(1) The data table may have 2 or more identical variables. Please correct the LiPD file manually\n" "(2) There may have been an error trying to prep the values for file write. The 'number' field in the data columns may be a 'string' instead of an 'integer' data type".format(filename)) print(e) except Exception as e: print("Error: CSV file not written, {}, {}:\n" "The data table may have 2 or more identical variables. Please correct the LiPD file manually".format(filename, e)) except AttributeError as e: logger_csvs.error("write_csv_to_file: Unable to write CSV File: {}".format(e, exc_info=True)) logger_csvs.info("exit write_csv_to_file") return
https://github.com/nickmckay/LiPD-utilities/blob/5dab6bbeffc5effd68e3a6beaca6b76aa928e860/Python/lipd/csvs.py#L247-L277
how to read .csv file in an efficient way?
python
def read_file(self, file_path): """ read file specified via 'file_path' and return its content - raises an ConuException if there is an issue accessing the file :param file_path: str, path to the file to read :return: str (not bytes), content of the file """ try: with open(self.p(file_path)) as fd: return fd.read() except IOError as ex: logger.error("error while accessing file %s: %r", file_path, ex) raise ConuException("There was an error while accessing file %s: %r", file_path, ex)
https://github.com/user-cont/conu/blob/08caae7bb6bdd265b55bb106c3da6a7946a5a352/conu/apidefs/filesystem.py#L106-L119
how to read .csv file in an efficient way?
python
def read_file(filepath): """ Retrieves the contents of the specified file. This function performs simple caching so that the same file isn't read more than once per process. :param filepath: the file to read :type filepath: str :returns: str """ with _FILE_CACHE_LOCK: if filepath not in _FILE_CACHE: _FILE_CACHE[filepath] = _read_file(filepath) return _FILE_CACHE[filepath]
https://github.com/jayclassless/tidypy/blob/3c3497ca377fbbe937103b77b02b326c860c748f/src/tidypy/util.py#L221-L236
how to read .csv file in an efficient way?
python
def read_file(path): """Read the file from the given path. If ``path`` is an absolute path, reads a file from the local filesystem. For relative paths, read the file using the storage backend configured using :ref:`CA_FILE_STORAGE <settings-ca-file-storage>`. """ if os.path.isabs(path): with wrap_file_exceptions(): with open(path, 'rb') as stream: return stream.read() with wrap_file_exceptions(): stream = ca_storage.open(path) try: return stream.read() finally: stream.close()
https://github.com/mathiasertl/django-ca/blob/976d7ea05276320f20daed2a6d59c8f5660fe976/ca/django_ca/utils.py#L714-L731
how to read .csv file in an efficient way?
python
def read_file(self, file_path): """ read file specified via 'file_path' and return its content - raises an ConuException if there is an issue accessing the file :param file_path: str, path to the file to read :return: str (not bytes), content of the file """ try: with open(self.cont_path(file_path)) as fd: return fd.read() except IOError as ex: logger.error("error while accessing file %s: %r", file_path, ex) raise ColinException( "There was an error while accessing file %s: %r" % (file_path, ex))
https://github.com/user-cont/colin/blob/00bb80e6e91522e15361935f813e8cf13d7e76dc/colin/core/target.py#L169-L182
how to read .csv file in an efficient way?
python
def _read_file(self, filename): """ Read contents of given file. """ try: with open(filename, "r") as fhandle: stats = float(fhandle.readline().rstrip("\n")) except Exception: stats = None return stats
https://github.com/python-diamond/Diamond/blob/0f3eb04327d6d3ed5e53a9967d6c9d2c09714a47/src/collectors/memory_lxc/memory_lxc.py#L76-L86
how to read .csv file in an efficient way?
python
def read_file(self, cfgparser, file): """Read configuration from file.""" if hasattr(file, 'readline'): # we have a file object if sys.version_info >= (3, 2): cfgparser.read_file(file) # Added in Python 3.2 else: cfgparser.readfp(file) # Deprecated since Python 3.2 else: # we have a file name cfgparser.read(file)
https://github.com/NatLibFi/Skosify/blob/1d269987f10df08e706272dcf6a86aef4abebcde/skosify/config.py#L101-L113
how to read .csv file in an efficient way?
python
def parse_csv(file_stream, expected_columns=None): """ Parse csv file and return a stream of dictionaries representing each row. First line of CSV file must contain column headers. Arguments: file_stream: input file expected_columns (set[unicode]): columns that are expected to be present Yields: dict: CSV line parsed into a dictionary. """ reader = unicodecsv.DictReader(file_stream, encoding="utf-8") if expected_columns and set(expected_columns) - set(reader.fieldnames): raise ValidationError(ValidationMessages.MISSING_EXPECTED_COLUMNS.format( expected_columns=", ".join(expected_columns), actual_columns=", ".join(reader.fieldnames) )) # "yield from reader" would be nicer, but we're on python2.7 yet. for row in reader: yield row
https://github.com/edx/edx-enterprise/blob/aea91379ab0a87cd3bc798961fce28b60ee49a80/enterprise/admin/utils.py#L101-L123
how to read .csv file in an efficient way?
python
def read_csv(filename): """Pull locations from a user's CSV file. Read gpsbabel_'s CSV output format .. _gpsbabel: http://www.gpsbabel.org/ Args: filename (str): CSV file to parse Returns: tuple of dict and list: List of locations as ``str`` objects """ field_names = ('latitude', 'longitude', 'name') data = utils.prepare_csv_read(filename, field_names, skipinitialspace=True) locations = {} args = [] for index, row in enumerate(data, 1): name = '%02i:%s' % (index, row['name']) locations[name] = (row['latitude'], row['longitude']) args.append(name) return locations, args
https://github.com/JNRowe/upoints/blob/1e4b7a53ed2a06cd854523d54c36aabdccea3830/upoints/edist.py#L520-L541
how to read .csv file in an efficient way?
python
def _read_file(folder, filename): ''' Reads and returns the contents of a file ''' path = os.path.join(folder, filename) try: with salt.utils.files.fopen(path, 'rb') as contents: return salt.utils.data.decode(contents.readlines()) except (OSError, IOError): return ''
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/incron.py#L132-L141
how to read .csv file in an efficient way?
python
def read(filename,**kwargs): """ Read a generic input file into a recarray. Accepted file formats: [.fits,.fz,.npy,.csv,.txt,.dat] Parameters: filename : input file name kwargs : keyword arguments for the reader Returns: recarray : data array """ base,ext = os.path.splitext(filename) if ext in ('.fits','.fz'): # Abstract fits here... return fitsio.read(filename,**kwargs) elif ext in ('.npy'): return np.load(filename,**kwargs) elif ext in ('.csv'): return np.recfromcsv(filename,**kwargs) elif ext in ('.txt','.dat'): return np.genfromtxt(filename,**kwargs) msg = "Unrecognized file type: %s"%filename raise ValueError(msg)
https://github.com/DarkEnergySurvey/ugali/blob/21e890b4117fc810afb6fb058e8055d564f03382/ugali/utils/fileio.py#L20-L42
how to read .csv file in an efficient way?
python
def _get_csv_from_columns(table, filename, csvs): """ Search a data tables for column values. Return a dict of column values :param dict d: Table data :return dict: Column values. ref by var name """ csvs[filename] = OrderedDict() try: if "columns" in table: try: for _name, _column in table["columns"].items(): csvs[filename][_name] = {"number": _column["number"], "values": _column["values"]} except KeyError as ke: print("Error: get_csv_from_columns: {}, {}".format(filename, ke)) except Exception as e: print("Error: get_csv_from_columns: inner: {}, {}".format(filename, e)) logger_csvs.error("get_csv_from_columns: inner: {}, {}".format(filename, e)) except Exception as e: print("Error: get_csv_from_columns: {}, {}".format(filename, e)) logger_csvs.error("get_csv_from_columns: {}, {}".format(filename, e)) return csvs
https://github.com/nickmckay/LiPD-utilities/blob/5dab6bbeffc5effd68e3a6beaca6b76aa928e860/Python/lipd/csvs.py#L389-L410
how to read .csv file in an efficient way?
python
def read_file(self, file: Union[IO, asyncio.StreamWriter]=None): '''Read from connection to file. Args: file: A file object or a writer stream. ''' if file: file_is_async = hasattr(file, 'drain') while True: data = yield from self._connection.read(4096) if not data: break if file: file.write(data) if file_is_async: yield from file.drain() self._data_event_dispatcher.notify_read(data)
https://github.com/ArchiveTeam/wpull/blob/ddf051aa3322479325ba20aa778cb2cb97606bf5/wpull/protocol/ftp/stream.py#L43-L64
how to read .csv file in an efficient way?
python
def export_csv(self, filename, delimiter=',', line_terminator='\n', header=True, quote_level=csv.QUOTE_NONNUMERIC, double_quote=True, escape_char='\\', quote_char='\"', na_rep='', file_header='', file_footer='', line_prefix='', _no_prefix_on_first_value=False, **kwargs): """ Writes an SFrame to a CSV file. Parameters ---------- filename : string The location to save the CSV. delimiter : string, optional This describes the delimiter used for writing csv files. line_terminator: string, optional The newline character header : bool, optional If true, the column names are emitted as a header. quote_level: csv.QUOTE_ALL | csv.QUOTE_NONE | csv.QUOTE_NONNUMERIC, optional The quoting level. If csv.QUOTE_ALL, every field is quoted. if csv.quote_NONE, no field is quoted. If csv.QUOTE_NONNUMERIC, only non-numeric fileds are quoted. csv.QUOTE_MINIMAL is interpreted as csv.QUOTE_NONNUMERIC. double_quote : bool, optional If True, quotes are escaped as two consecutive quotes escape_char : string, optional Character which begins a C escape sequence quote_char: string, optional Character used to quote fields na_rep: string, optional The value used to denote a missing value. file_header: string, optional A string printed to the start of the file file_footer: string, optional A string printed to the end of the file line_prefix: string, optional A string printed at the start of each value line """ # Pandas argument compatibility if "sep" in kwargs: delimiter = kwargs['sep'] del kwargs['sep'] if "quotechar" in kwargs: quote_char = kwargs['quotechar'] del kwargs['quotechar'] if "doublequote" in kwargs: double_quote = kwargs['doublequote'] del kwargs['doublequote'] if "lineterminator" in kwargs: line_terminator = kwargs['lineterminator'] del kwargs['lineterminator'] if len(kwargs) > 0: raise TypeError("Unexpected keyword arguments " + str(list(kwargs.keys()))) write_csv_options = {} write_csv_options['delimiter'] = delimiter write_csv_options['escape_char'] = escape_char write_csv_options['double_quote'] = double_quote write_csv_options['quote_char'] = quote_char if quote_level == csv.QUOTE_MINIMAL: write_csv_options['quote_level'] = 0 elif quote_level == csv.QUOTE_ALL: write_csv_options['quote_level'] = 1 elif quote_level == csv.QUOTE_NONNUMERIC: write_csv_options['quote_level'] = 2 elif quote_level == csv.QUOTE_NONE: write_csv_options['quote_level'] = 3 write_csv_options['header'] = header write_csv_options['line_terminator'] = line_terminator write_csv_options['na_value'] = na_rep write_csv_options['file_header'] = file_header write_csv_options['file_footer'] = file_footer write_csv_options['line_prefix'] = line_prefix # undocumented option. Disables line prefix on the first value line write_csv_options['_no_prefix_on_first_value'] = _no_prefix_on_first_value url = _make_internal_url(filename) self.__proxy__.save_as_csv(url, write_csv_options)
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/data_structures/sframe.py#L2828-L2917
how to read .csv file in an efficient way?
python
def read_unicode_csv_fileobj(fileobj, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL, lineterminator='\n', encoding='utf-8', skiprows=0): """fileobj can be a StringIO in Py3, but should be a BytesIO in Py2.""" # Python 3 version if sys.version_info[0] >= 3: # Next, get the csv reader, with unicode delimiter and quotechar csv_reader = csv.reader(fileobj, delimiter=delimiter, quotechar=quotechar, quoting=quoting, lineterminator=lineterminator) # Now, return the (already decoded) unicode csv_reader generator # Skip rows if necessary for skip_ix in range(skiprows): next(csv_reader) for row in csv_reader: yield row # Python 2 version else: # Next, get the csv reader, passing delimiter and quotechar as # bytestrings rather than unicode csv_reader = csv.reader(fileobj, delimiter=delimiter.encode(encoding), quotechar=quotechar.encode(encoding), quoting=quoting, lineterminator=lineterminator) # Iterate over the file and decode each string into unicode # Skip rows if necessary for skip_ix in range(skiprows): next(csv_reader) for row in csv_reader: yield [cell.decode(encoding) for cell in row]
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/util/__init__.py#L113-L141
how to read .csv file in an efficient way?
python
def read_vcf(vcf_file): """ Read a vcf file to a dict of lists. :param str vcf_file: Path to a vcf file. :return: dict of lists of vcf records :rtype: dict """ vcf_dict = [] with open(vcf_file, 'r') as invcf: for line in invcf: if line.startswith('#'): continue line = line.strip().split() vcf_dict.append((line[0], line[1], line[3], line[4])) return vcf_dict
https://github.com/BD2KGenomics/protect/blob/06310682c50dcf8917b912c8e551299ff7ee41ce/src/protect/mutation_calling/common.py#L146-L161
how to read .csv file in an efficient way?
python
def write_csv(header, contents, sep=",", filename="stdout", thousands=False, tee=False, align=True, comment=False): """ Write csv that are aligned with the column headers. >>> header = ["x_value", "y_value"] >>> contents = [(1, 100), (2, 200)] >>> write_csv(header, contents) x_value, y_value 1, 100 2, 200 """ from jcvi.formats.base import must_open formatted = load_csv(header, contents, sep=sep, thousands=thousands, align=align) if comment: formatted[0] = '#' + formatted[0][1:] formatted = "\n".join(formatted) fw = must_open(filename, "w") print(formatted, file=fw) if tee and filename != "stdout": print(formatted)
https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/utils/table.py#L111-L133
how to read .csv file in an efficient way?
python
def read(path, encoding="utf-8"): """Read the content of the file. Args: path (str): Path to the file encoding (str): File encoding. Default: utf-8 Returns: str: File content or empty string if there was an error """ try: with io.open(path, encoding=encoding) as f: return f.read() except Exception as e: logger.error("read: %s failed. Error: %s", path, e) return ""
https://github.com/alefnula/tea/blob/f5a0a724a425ec4f9dd2c7fe966ef06faf3a15a3/tea/shell/__init__.py#L381-L396
how to read .csv file in an efficient way?
python
def read_file(name): """Read file name (without extension) to string.""" cur_path = os.path.dirname(__file__) exts = ('txt', 'rst') for ext in exts: path = os.path.join(cur_path, '.'.join((name, ext))) if os.path.exists(path): with open(path, 'rt') as file_obj: return file_obj.read() return ''
https://github.com/ryan-roemer/sphinx-bootstrap-theme/blob/69585281a300116fa9da37c29c333ab1cc5462ce/setup.py#L21-L31
how to read .csv file in an efficient way?
python
def write(self, iterable): """Writes values from iterable into CSV file""" io_error_text = _("Error writing to file {filepath}.") io_error_text = io_error_text.format(filepath=self.path) try: with open(self.path, "wb") as csvfile: csv_writer = csv.writer(csvfile, self.dialect) for line in iterable: csv_writer.writerow( list(encode_gen(line, encoding=self.encoding))) except IOError: txt = \ _("Error opening file {filepath}.").format(filepath=self.path) try: post_command_event(self.main_window, self.StatusBarMsg, text=txt) except TypeError: # The main window does not exist any more pass return False
https://github.com/manns/pyspread/blob/0e2fd44c2e0f06605efc3058c20a43a8c1f9e7e0/pyspread/src/lib/__csv.py#L434-L459
how to read .csv file in an efficient way?
python
def output_csv(filehandle: TextIO, values: Iterable[str]) -> None: """ Write a line of CSV. POOR; does not escape things properly. DEPRECATED. Args: filehandle: file to write to values: values """ line = ",".join(values) filehandle.write(line + "\n")
https://github.com/RudolfCardinal/pythonlib/blob/0b84cb35f38bd7d8723958dae51b480a829b7227/cardinal_pythonlib/rnc_text.py#L59-L68
how to read .csv file in an efficient way?
python
def from_csv(filename_or_buffer, copy_index=True, **kwargs): """Shortcut to read a csv file using pandas and convert to a DataFrame directly. :rtype: DataFrame """ import pandas as pd return from_pandas(pd.read_csv(filename_or_buffer, **kwargs), copy_index=copy_index)
https://github.com/vaexio/vaex/blob/a45b672f8287afca2ada8e36b74b604b9b28dd85/packages/vaex-core/vaex/__init__.py#L407-L413
how to read .csv file in an efficient way?
python
def read_table(filename, usecols=(0, 1), sep='\t', comment='#', encoding='utf-8', skip=0): """Parse data files from the data directory Parameters ---------- filename: string Full path to file usecols: list, default [0, 1] A list of two elements representing the columns to be parsed into a dictionary. The first element will be used as keys and the second as values. Defaults to the first two columns of `filename`. sep : string, default '\t' Field delimiter. comment : str, default '#' Indicates remainder of line should not be parsed. If found at the beginning of a line, the line will be ignored altogether. This parameter must be a single character. encoding : string, default 'utf-8' Encoding to use for UTF when reading/writing (ex. `utf-8`) skip: int, default 0 Number of lines to skip at the beginning of the file Returns ------- A dictionary with the same length as the number of lines in `filename` """ with io.open(filename, 'r', encoding=encoding) as f: # skip initial lines for _ in range(skip): next(f) # filter comment lines lines = (line for line in f if not line.startswith(comment)) d = dict() for line in lines: columns = line.split(sep) key = columns[usecols[0]].lower() value = columns[usecols[1]].rstrip('\n') d[key] = value return d
https://github.com/elyase/geotext/blob/21a8a7f5eebea40f270beef9ede4d9a57e3c81c3/geotext/geotext.py#L15-L60