query
stringlengths 9
60
| language
stringclasses 1
value | code
stringlengths 105
25.7k
| url
stringlengths 91
217
|
---|---|---|---|
read text file line by line
|
python
|
def _ReadLine(self, text_file_object, max_len=None, depth=0):
"""Reads a line from a text file.
Args:
text_file_object (dfvfs.TextFile): text file.
max_len (Optional[int]): maximum number of bytes a single line can take,
where None means all remaining bytes should be read.
depth (Optional[int]): number of new lines the parser encountered.
Returns:
str: single line read from the file-like object, or the maximum number of
characters, if max_len defined and line longer than the defined size.
Raises:
UnicodeDecodeError: if the text cannot be decoded using the specified
encoding.
"""
line = text_file_object.readline(size=max_len)
if not line:
return ''
if line in self._EMPTY_LINES:
if depth == self._MAXIMUM_DEPTH:
return ''
return self._ReadLine(text_file_object, max_len=max_len, depth=depth + 1)
return line.strip()
|
https://github.com/log2timeline/plaso/blob/9c564698d2da3ffbe23607a3c54c0582ea18a6cc/plaso/parsers/text_parser.py#L315-L343
|
read text file line by line
|
python
|
def readline(self):
"""
Read and return a line of text.
:rtype: str
:return: the next line of text in the file, including the
newline character
"""
_complain_ifclosed(self.closed)
line = self.f.readline()
if self.__encoding:
return line.decode(self.__encoding, self.__errors)
else:
return line
|
https://github.com/crs4/pydoop/blob/f375be2a06f9c67eaae3ce6f605195dbca143b2b/pydoop/hdfs/file.py#L107-L120
|
read text file line by line
|
python
|
def _ReadLine(self, file_object):
"""Reads a line from the file object.
Args:
file_object (dfvfs.FileIO): file-like object.
Returns:
str: line read from the file-like object.
"""
if len(self._buffer) < self._buffer_size:
content = file_object.read(self._buffer_size)
content = content.decode(self._encoding)
self._buffer = ''.join([self._buffer, content])
line, new_line, self._buffer = self._buffer.partition('\n')
if not line and not new_line:
line = self._buffer
self._buffer = ''
self._current_offset += len(line)
# Strip carriage returns from the text.
if line.endswith('\r'):
line = line[:-len('\r')]
if new_line:
line = ''.join([line, '\n'])
self._current_offset += len('\n')
return line
|
https://github.com/log2timeline/plaso/blob/9c564698d2da3ffbe23607a3c54c0582ea18a6cc/plaso/parsers/text_parser.py#L487-L516
|
read text file line by line
|
python
|
def ReadLine(self, file_object):
"""Reads a line.
Args:
file_object (dfvfs.FileIO): file-like object.
Returns:
str: line read from the lines buffer.
"""
line, _, self.lines = self.lines.partition('\n')
if not line:
self.ReadLines(file_object)
line, _, self.lines = self.lines.partition('\n')
return line
|
https://github.com/log2timeline/plaso/blob/9c564698d2da3ffbe23607a3c54c0582ea18a6cc/plaso/parsers/text_parser.py#L518-L532
|
read text file line by line
|
python
|
def readline(self):
"""Read a chunk of the output"""
_LOGGER.info("reading line")
line = self.read(self.line_length)
if len(line) < self.line_length:
_LOGGER.info("all lines read")
return line
|
https://github.com/aws/aws-encryption-sdk-python/blob/d182155d5fb1ef176d9e7d0647679737d5146495/src/aws_encryption_sdk/streaming_client.py#L276-L282
|
read text file line by line
|
python
|
def readlines(filepath):
"""
read lines from a textfile
:param filepath:
:return: list[line]
"""
with open(filepath, 'rt') as f:
lines = f.readlines()
lines = map(str.strip, lines)
lines = [l for l in lines if l]
return lines
|
https://github.com/dade-ai/snipy/blob/408520867179f99b3158b57520e2619f3fecd69b/snipy/io/fileutil.py#L32-L42
|
read text file line by line
|
python
|
def read(self, source, compressed=False, encoding='UTF-8'):
"""
Iterates over a file in s3 split on newline.
Yields a line in file.
"""
buf = ''
for block in self.cat(source, compressed=compressed, encoding=encoding):
buf += block
if '\n' in buf:
ret, buf = buf.rsplit('\n', 1)
for line in ret.split('\n'):
yield line
lines = buf.split('\n')
for line in lines[:-1]:
yield line
# only yield the last line if the line has content in it
if lines[-1]:
yield lines[-1]
|
https://github.com/nteract/papermill/blob/7423a303f3fa22ec6d03edf5fd9700d659b5a6fa/papermill/s3.py#L421-L442
|
read text file line by line
|
python
|
def readline( file, skip_blank=False ):
"""Read a line from provided file, skipping any blank or comment lines"""
while 1:
line = file.readline()
#print "every line: %r" % line
if not line: return None
if line[0] != '#' and not ( skip_blank and line.isspace() ):
return line
|
https://github.com/bxlab/bx-python/blob/09cb725284803df90a468d910f2274628d8647de/lib/bx/align/maf.py#L203-L210
|
read text file line by line
|
python
|
def read_line(self):
""" Consume one line from the stream. """
while True:
newline_idx = self.buf.find(b"\n")
if newline_idx >= 0:
res = self.buf[:newline_idx]
self.buf = self.buf[newline_idx + 1:]
return res
chunk = self.f.recv(4096)
if not chunk:
raise EndOfStreamError()
self.buf += chunk
|
https://github.com/niklasb/webkit-server/blob/c9e3a8394b8c51000c35f8a56fb770580562b544/webkit_server.py#L475-L486
|
read text file line by line
|
python
|
def readline(self, size=None):
"""Reads a single line of text.
The functions reads one entire line from the file-like object. A trailing
end-of-line indicator (newline by default) is kept in the byte string (but
may be absent when a file ends with an incomplete line). An empty byte
string is returned only when end-of-file is encountered immediately.
Args:
size (Optional[int]): maximum byte size to read. If present and
non-negative, it is a maximum byte count (including the trailing
end-of-line) and an incomplete line may be returned.
Returns:
bytes: line of text.
Raises:
ValueError: if the specified size is less than zero or greater
than the maximum size allowed.
"""
if size is not None and size < 0:
raise ValueError('Invalid size value smaller than zero.')
if size is not None and size > self.MAXIMUM_READ_BUFFER_SIZE:
raise ValueError(
'Invalid size value exceeds maximum value {0:d}.'.format(
self.MAXIMUM_READ_BUFFER_SIZE))
if not self._lines:
if self._lines_buffer_offset >= self._file_object_size:
return b''
read_size = size
if not read_size:
read_size = self.MAXIMUM_READ_BUFFER_SIZE
if self._lines_buffer_offset + read_size > self._file_object_size:
size = self._file_object_size - self._lines_buffer_offset
self._file_object.seek(self._lines_buffer_offset, os.SEEK_SET)
read_buffer = self._file_object.read(read_size)
self._lines_buffer_offset += len(read_buffer)
self._lines = read_buffer.split(self.end_of_line)
if self._lines_buffer:
self._lines[0] = b''.join([self._lines_buffer, self._lines[0]])
self._lines_buffer = b''
if read_buffer[self._end_of_line_length:] != self.end_of_line:
self._lines_buffer = self._lines.pop()
for index, line in enumerate(self._lines):
self._lines[index] = b''.join([line, self.end_of_line])
if (self._lines_buffer and
self._lines_buffer_offset >= self._file_object_size):
self._lines.append(self._lines_buffer)
self._lines_buffer = b''
if not self._lines:
line = self._lines_buffer
self._lines_buffer = b''
elif not size or size >= len(self._lines[0]):
line = self._lines.pop(0)
else:
line = self._lines[0]
self._lines[0] = line[size:]
line = line[:size]
self._current_offset += len(line)
return line
|
https://github.com/log2timeline/plaso/blob/9c564698d2da3ffbe23607a3c54c0582ea18a6cc/plaso/lib/line_reader_file.py#L63-L137
|
read text file line by line
|
python
|
def readlines(filename, encoding='utf-8'):
"""
Read lines from file ('filename')
Return lines and encoding
"""
text, encoding = read(filename, encoding)
return text.split(os.linesep), encoding
|
https://github.com/spyder-ide/spyder/blob/f76836ce1b924bcc4efd3f74f2960d26a4e528e0/spyder/utils/encoding.py#L260-L266
|
read text file line by line
|
python
|
def read_line(self, fid):
"""Read a line from a file string and check it isn't either empty or commented before returning."""
lin = '#'
while lin[0] == '#':
lin = fid.readline().strip()
if lin == '':
return lin
return lin
|
https://github.com/SheffieldML/GPy/blob/54c32d79d289d622fb18b898aee65a2a431d90cf/GPy/util/mocap.py#L514-L521
|
read text file line by line
|
python
|
def _read_line(self, f):
"""
Reads one non empty line (if it's a comment, it skips it).
"""
l = f.readline().strip()
while l == "" or l[0] == "#": # comment or an empty line
l = f.readline().strip()
return l
|
https://github.com/vlukes/dicom2fem/blob/3056c977ca7119e01984d3aa0c4448a1c6c2430f/dicom2fem/meshio.py#L1718-L1725
|
read text file line by line
|
python
|
def read_line(self):
"""
Interrupted respecting reader for stdin.
Raises EOFError if the end of stream has been reached
"""
try:
line = self.inp.readline().strip()
except KeyboardInterrupt:
raise EOFError()
# i3status sends EOF, or an empty line
if not line:
raise EOFError()
return line
|
https://github.com/enkore/i3pystatus/blob/14cfde967cecf79b40e223e35a04600f4c875af7/i3pystatus/core/io.py#L31-L46
|
read text file line by line
|
python
|
def readline(self, size=None):
"""Reads a single line of text.
The functions reads one entire line from the file-like object. A trailing
end-of-line indicator (newline by default) is kept in the string (but may
be absent when a file ends with an incomplete line). An empty string is
returned only when end-of-file is encountered immediately.
Args:
size (Optional[int]): maximum byte size to read. If present and
non-negative, it is a maximum byte count (including the trailing
end-of-line) and an incomplete line may be returned.
Returns:
str: line of text.
Raises:
UnicodeDecodeError: if a line cannot be decoded.
ValueError: if the size is smaller than zero or exceeds the maximum
(as defined by _MAXIMUM_READ_BUFFER_SIZE).
"""
if size is not None and size < 0:
raise ValueError('Invalid size value smaller than zero.')
if size is not None and size > self._MAXIMUM_READ_BUFFER_SIZE:
raise ValueError('Invalid size value exceeds maximum.')
if not self._lines:
if self._lines_buffer_offset >= self._file_object_size:
return ''
read_size = size
if not read_size:
read_size = self._MAXIMUM_READ_BUFFER_SIZE
if self._lines_buffer_offset + read_size > self._file_object_size:
read_size = self._file_object_size - self._lines_buffer_offset
self._file_object.seek(self._lines_buffer_offset, os.SEEK_SET)
read_buffer = self._file_object.read(read_size)
self._lines_buffer_offset += len(read_buffer)
self._lines = read_buffer.split(self._end_of_line)
if self._lines_buffer:
self._lines[0] = b''.join([self._lines_buffer, self._lines[0]])
self._lines_buffer = b''
# Move a partial line from the lines list to the lines buffer.
if read_buffer[self._end_of_line_length:] != self._end_of_line:
self._lines_buffer = self._lines.pop()
for index, line in enumerate(self._lines):
self._lines[index] = b''.join([line, self._end_of_line])
if (self._lines_buffer and
self._lines_buffer_offset >= self._file_object_size):
self._lines.append(self._lines_buffer)
self._lines_buffer = b''
if not self._lines:
line = self._lines_buffer
self._lines_buffer = b''
elif not size or size >= len(self._lines[0]):
line = self._lines.pop(0)
else:
line = self._lines[0]
self._lines[0] = line[size:]
line = line[:size]
last_offset = self._current_offset
self._current_offset += len(line)
decoded_line = line.decode(self._encoding)
# Remove a byte-order mark at the start of the file.
if last_offset == 0 and decoded_line[0] == '\ufeff':
decoded_line = decoded_line[1:]
return decoded_line
|
https://github.com/log2timeline/dfvfs/blob/2b3ccd115f9901d89f383397d4a1376a873c83c4/dfvfs/helpers/text_file.py#L63-L144
|
read text file line by line
|
python
|
def read_lines_from_file(file_path: str) -> List[str]:
""" Read text lines from a file """
# check if the file exists?
with open(file_path) as csv_file:
content = csv_file.readlines()
return content
|
https://github.com/MisterY/price-database/blob/b4fd366b7763891c690fe3000b8840e656da023e/pricedb/utils.py#L5-L10
|
read text file line by line
|
python
|
def read_text(self, encoding=None, errors=None, newline=None):
"""Read this path as one large chunk of text.
This function reads in the entire file as one big piece of text and
returns it. The *encoding*, *errors*, and *newline* keywords are
passed to :meth:`open`.
This is not a good way to read files unless you know for sure that they
are small.
"""
with self.open (mode='rt', encoding=encoding, errors=errors, newline=newline) as f:
return f.read()
|
https://github.com/pkgw/pwkit/blob/d40957a1c3d2ea34e7ceac2267ee9635135f2793/pwkit/io.py#L964-L976
|
read text file line by line
|
python
|
def read_line(self):
"""Read data until \n is found
"""
buf = b''
while not self.eof and buf.endswith(b'\n') is False:
buf += yield from self.read_byte()
if self.eof:
buf = b''
# Remove \n character
buf = buf.replace(b'\n', b'')
return buf
|
https://github.com/rmb938/vmw-cloudinit-metadata/blob/b667b2a0e10e11dbd6cf058d9b5be70b97b7950e/vmw_cloudinit_metadata/vspc/async_telnet.py#L172-L185
|
read text file line by line
|
python
|
def _read_nlines(filename, nlines):
"""
Read at most nlines lines from file filename.
If nlines is < 0, the entire file is read.
"""
if nlines < 0:
with open(filename, 'r') as fh:
return fh.readlines()
lines = []
with open(filename, 'r') as fh:
for lineno, line in enumerate(fh):
if lineno == nlines: break
lines.append(line)
return lines
|
https://github.com/materialsproject/pymatgen/blob/4ca558cf72f8d5f8a1f21dfdfc0181a971c186da/pymatgen/io/abinit/pseudos.py#L50-L64
|
read text file line by line
|
python
|
def readline(self):
"""Reads (and optionally parses) a single line."""
line = self.file.readline()
if self.grammar and line:
try:
return self.grammar.parseString(line).asDict()
except ParseException:
return self.readline()
else:
return line
|
https://github.com/pyparsing/pyparsing/blob/f0264bd8d1a548a50b3e5f7d99cfefd577942d14/examples/sparser.py#L282-L291
|
read text file line by line
|
python
|
def read_txt_file(filepath):
"""read text from `filepath` and remove linebreaks
"""
if sys.version > '3':
with open(filepath,'r',encoding='utf-8') as txt_file:
return txt_file.readlines()
else:
with open(filepath) as txt_file:
return txt_file.readlines()
|
https://github.com/fbngrm/babelpy/blob/ff305abecddd66aed40c32f0010485cf192e5f17/babelpy/reader.py#L3-L11
|
read text file line by line
|
python
|
def _read_file(self, filename):
"""Return the lines from the given file, ignoring lines that start with
comments"""
result = []
with open(filename, 'r') as f:
lines = f.read().split('\n')
for line in lines:
nocomment = line.strip().split('#')[0].strip()
if nocomment:
result.append(nocomment)
return result
|
https://github.com/pglass/nose-blacklist/blob/68e340ecea45d98e3a5ebf8aa9bf7975146cc20d/noseblacklist/plugin.py#L71-L81
|
read text file line by line
|
python
|
def __read_line(self):
"""Read one line from the server.
An internal buffer is used to read data from the server
(blocks of Client.read_size bytes). If the buffer
is not empty, we try to find an entire line to return.
If we failed, we try to read new content from the server for
Client.read_timeout seconds. If no data can be
retrieved, it is considered as a fatal error and an 'Error'
exception is raised.
:rtype: string
:return: the read line
"""
ret = b""
while True:
try:
pos = self.__read_buffer.index(CRLF)
ret = self.__read_buffer[:pos]
self.__read_buffer = self.__read_buffer[pos + len(CRLF):]
break
except ValueError:
pass
try:
nval = self.sock.recv(self.read_size)
self.__dprint(nval)
if not len(nval):
break
self.__read_buffer += nval
except (socket.timeout, ssl.SSLError):
raise Error("Failed to read data from the server")
if len(ret):
m = self.__size_expr.match(ret)
if m:
raise Literal(int(m.group(1)))
m = self.__respcode_expr.match(ret)
if m:
if m.group(1) == b"BYE":
raise Error("Connection closed by server")
if m.group(1) == b"NO":
self.__parse_error(m.group(2))
raise Response(m.group(1), m.group(2))
return ret
|
https://github.com/tonioo/sievelib/blob/88822d1f1daf30ef3dd9ac74911301b0773ef3c8/sievelib/managesieve.py#L136-L181
|
read text file line by line
|
python
|
def readlines(p,
encoding=None,
strip=False,
comment=None,
normalize=None,
linenumbers=False):
"""
Read a `list` of lines from a text file.
:param p: File path (or `list` or `tuple` of text)
:param encoding: Registered codec.
:param strip: If `True`, strip leading and trailing whitespace.
:param comment: String used as syntax to mark comment lines. When not `None`, \
commented lines will be stripped. This implies `strip=True`.
:param normalize: 'NFC', 'NFKC', 'NFD', 'NFKD'
:param linenumbers: return also line numbers.
:return: `list` of text lines or pairs (`int`, text or `None`).
"""
if comment:
strip = True
if isinstance(p, (list, tuple)):
res = [l.decode(encoding) if encoding else l for l in p]
else:
with Path(p).open(encoding=encoding or 'utf-8') as fp:
res = fp.readlines()
if strip:
res = [l.strip() or None for l in res]
if comment:
res = [None if l and l.startswith(comment) else l for l in res]
if normalize:
res = [unicodedata.normalize(normalize, l) if l else l for l in res]
if linenumbers:
return [(n, l) for n, l in enumerate(res, 1)]
return [l for l in res if l is not None]
|
https://github.com/clld/clldutils/blob/7b8587ef5b56a2fc6cafaff90bc5004355c2b13f/src/clldutils/path.py#L96-L129
|
read text file line by line
|
python
|
def read_lines (self, mode='rt', noexistok=False, **kwargs):
"""Generate a sequence of lines from the file pointed to by this path, by
opening as a regular file and iterating over it. The lines therefore
contain their newline characters. If *noexistok*, a nonexistent file
will result in an empty sequence rather than an exception. *kwargs*
are passed to :meth:`Path.open`.
"""
try:
with self.open (mode=mode, **kwargs) as f:
for line in f:
yield line
except IOError as e:
if e.errno != 2 or not noexistok:
raise
|
https://github.com/pkgw/pwkit/blob/d40957a1c3d2ea34e7ceac2267ee9635135f2793/pwkit/io.py#L803-L817
|
read text file line by line
|
python
|
def readline(self, max_len=-1):
"""read from the file until a newline is encountered
.. note::
this method will block if there isn't already a full line available
from the data source
:param max_len: stop reading a single line after this many bytes
:type max_len: int
:returns:
a string of the line it read from the file, including the newline
at the end
"""
buf = self._rbuf
newline, chunksize = self.NEWLINE, self.CHUNKSIZE
buf.seek(0)
text = buf.read()
if len(text) >= max_len >= 0:
buf.seek(0)
buf.truncate()
buf.write(text[max_len:])
return text[:max_len]
while text.find(newline) < 0:
text = self._read_chunk(chunksize)
if text is None:
text = ''
continue
if buf.tell() + len(text) >= max_len >= 0:
text = buf.getvalue() + text
buf.seek(0)
buf.truncate()
buf.write(text[max_len:])
return text[:max_len]
if not text:
break
buf.write(text)
else:
# found a newline
rc = buf.getvalue()
index = rc.find(newline) + len(newline)
buf.seek(0)
buf.truncate()
buf.write(rc[index:])
return rc[:index]
# hit the end of the file, no more newlines
rc = buf.getvalue()
buf.seek(0)
buf.truncate()
return rc
|
https://github.com/teepark/greenhouse/blob/8fd1be4f5443ba090346b5ec82fdbeb0a060d956/greenhouse/io/files.py#L93-L147
|
read text file line by line
|
python
|
def readLine(self):
""" The method that reads a line and processes it.
"""
# Read line
line = self._f.readline().decode('ascii', 'ignore')
if not line:
raise EOFError()
line = line.strip()
if line.startswith('v '):
# self._vertices.append( *self.readTuple(line) )
self._v.append(self.readTuple(line))
elif line.startswith('vt '):
self._vt.append(self.readTuple(line, 3))
elif line.startswith('vn '):
self._vn.append(self.readTuple(line))
elif line.startswith('f '):
self._faces.append(self.readFace(line))
elif line.startswith('#'):
pass # Comment
elif line.startswith('mtllib '):
logger.warning('Notice reading .OBJ: material properties are '
'ignored.')
elif any(line.startswith(x) for x in ('g ', 's ', 'o ', 'usemtl ')):
pass # Ignore groups and smoothing groups, obj names, material
elif not line.strip():
pass
else:
logger.warning('Notice reading .OBJ: ignoring %s command.'
% line.strip())
|
https://github.com/glue-viz/glue-vispy-viewers/blob/54a4351d98c1f90dfb1a557d1b447c1f57470eea/glue_vispy_viewers/extern/vispy/io/wavefront.py#L87-L117
|
read text file line by line
|
python
|
def readtxt(filepath):
""" read file as is"""
with open(filepath, 'rt') as f:
lines = f.readlines()
return ''.join(lines)
|
https://github.com/dade-ai/snipy/blob/408520867179f99b3158b57520e2619f3fecd69b/snipy/io/fileutil.py#L52-L56
|
read text file line by line
|
python
|
def readline(self, size=None):
"""Read a single line of text with EOF."""
# TODO: Add support for Unicode.
(line, nl) = self.__buffer.read_until_nl(self.__retrieve_data)
if self.__sf.access_type_has_universal_nl and nl is not None:
self.__newlines[nl] = True
return line
|
https://github.com/dsoprea/PySecure/blob/ff7e01a0a77e79564cb00b6e38b4e6f9f88674f0/pysecure/adapters/sftpa.py#L960-L969
|
read text file line by line
|
python
|
def read(self, file, *, fs):
"""
Write a row on the next line of given file.
Prefix is used for newlines.
"""
for line in file:
yield line.rstrip(self.eol)
|
https://github.com/python-bonobo/bonobo/blob/70c8e62c4a88576976e5b52e58d380d6e3227ab4/bonobo/nodes/io/file.py#L60-L66
|
read text file line by line
|
python
|
def readline(self, size = None):
'''Next line from the decrypted file, as a string.
Retain newline. A non-negative size argument limits the maximum
number of bytes to return (an incomplete line may be returned then).
Return an empty string at EOF.'''
if self.closed: raise ValueError('file closed')
if self._mode in _allowed_write:
raise Exception('file opened for write only')
if self._read_finished: return None
line = b''
while not line.endswith(b'\n') and not self._read_finished and (size is None or len(line) <= size):
line += self.read(1)
return line
|
https://github.com/ricmoo/pyscrypt/blob/131ca39acee4963afd704b4c4631497e4fe34c97/pyscrypt/file.py#L276-L291
|
read text file line by line
|
python
|
def readline(self, size=None):
"""Reads one line from the stream."""
if self._pos >= self.limit:
return self.on_exhausted()
if size is None:
size = self.limit - self._pos
else:
size = min(size, self.limit - self._pos)
try:
line = self._readline(size)
except (ValueError, IOError):
return self.on_disconnect()
if size and not line:
return self.on_disconnect()
self._pos += len(line)
return line
|
https://github.com/core/uricore/blob/dc5ef4be7bd93da4c39e5c1cbd1ae4f3ad3f1f2a/uricore/wkz_wsgi.py#L119-L134
|
read text file line by line
|
python
|
def read(self):
"""
Read in all unread lines and return them as a single string.
"""
lines = self.readlines()
if lines:
try:
return ''.join(lines)
except TypeError:
return ''.join(force_text(line) for line in lines)
else:
return None
|
https://github.com/bgreenlee/pygtail/blob/d2caeb6fece041d5d6c5ecf600dc5a9e46c8d890/pygtail/core.py#L142-L153
|
read text file line by line
|
python
|
def read_lines(self, file_path, empty_lines=False, signal_ready=True):
"""Fetch lines from file.
In case the file handler changes (logrotate), reopen the file.
:param file_path: Path to file
:param empty_lines: Return empty lines
:param signal_ready: Report signal ready on start
"""
file_handler, file_id = self._get_file(file_path)
file_handler.seek(0, os.SEEK_END)
if signal_ready:
self.signal_ready()
while self.thread_server.is_alive():
line = six.text_type(file_handler.readline(), "utf-8")
if line:
yield line
continue
elif empty_lines:
yield line
time.sleep(0.1)
if file_id != self._get_file_id(os.stat(file_path)) and os.path.isfile(file_path):
file_handler, file_id = self._get_file(file_path)
|
https://github.com/Cymmetria/honeycomb/blob/33ea91b5cf675000e4e85dd02efe580ea6e95c86/honeycomb/servicemanager/base_service.py#L171-L197
|
read text file line by line
|
python
|
def _readline(sock, buf):
"""Read line of text from the socket.
Read a line of text (delimited by "\r\n") from the socket, and
return that line along with any trailing characters read from the
socket.
Args:
sock: Socket object, should be connected.
buf: String, zero or more characters, returned from an earlier
call to _readline or _readvalue (pass an empty string on the
first call).
Returns:
A tuple of (buf, line) where line is the full line read from the
socket (minus the "\r\n" characters) and buf is any trailing
characters read after the "\r\n" was found (which may be an empty
string).
"""
chunks = []
last_char = b''
while True:
# We're reading in chunks, so "\r\n" could appear in one chunk,
# or across the boundary of two chunks, so we check for both
# cases.
# This case must appear first, since the buffer could have
# later \r\n characters in it and we want to get the first \r\n.
if last_char == b'\r' and buf[0:1] == b'\n':
# Strip the last character from the last chunk.
chunks[-1] = chunks[-1][:-1]
return buf[1:], b''.join(chunks)
elif buf.find(b'\r\n') != -1:
before, sep, after = buf.partition(b"\r\n")
chunks.append(before)
return after, b''.join(chunks)
if buf:
chunks.append(buf)
last_char = buf[-1:]
buf = _recv(sock, RECV_SIZE)
if not buf:
raise MemcacheUnexpectedCloseError()
|
https://github.com/pinterest/pymemcache/blob/f3a348f4ce2248cce8b398e93e08d984fb9100e5/pymemcache/client/base.py#L1103-L1148
|
read text file line by line
|
python
|
def readline(prev, filename=None, mode='r', trim=str.rstrip, start=1, end=sys.maxsize):
"""This pipe get filenames or file object from previous pipe and read the
content of file. Then, send the content of file line by line to next pipe.
The start and end parameters are used to limit the range of reading from file.
:param prev: The previous iterator of pipe.
:type prev: Pipe
:param filename: The files to be read. If None, use previous pipe input as filenames.
:type filename: None|str|unicode|list|tuple
:param mode: The mode to open file. default is 'r'
:type mode: str
:param trim: The function to trim the line before send to next pipe.
:type trim: function object.
:param start: if star is specified, only line number larger or equal to start will be sent.
:type start: integer
:param end: The last line number to read.
:type end: integer
:returns: generator
"""
if prev is None:
if filename is None:
raise Exception('No input available for readline.')
elif is_str_type(filename):
file_list = [filename, ]
else:
file_list = filename
else:
file_list = prev
for fn in file_list:
if isinstance(fn, file_type):
fd = fn
else:
fd = open(fn, mode)
try:
if start <= 1 and end == sys.maxsize:
for line in fd:
yield trim(line)
else:
for line_no, line in enumerate(fd, 1):
if line_no < start:
continue
yield trim(line)
if line_no >= end:
break
finally:
if fd != fn:
fd.close()
|
https://github.com/GaryLee/cmdlet/blob/5852a63fc2c7dd723a3d7abe18455f8dacb49433/cmdlet/cmds.py#L490-L539
|
read text file line by line
|
python
|
def read(self):
"""Loads all lines in memory"""
lines = self.readlines()
if self.metadata and 'encoding' in self.metadata:
encoding = self.metadata['encoding']
else:
encoding = 'utf-8'
if sys.version < '3':
return "\n".join( unicode(line, 'utf-8') if isinstance(line, str) else line for line in lines)
else:
return "\n".join( str(line, 'utf-8') if isinstance(line, bytes) else line for line in lines)
|
https://github.com/proycon/clam/blob/09d15cfc26d7cbe0f5976cdd5424dc446d10dbf3/clam/common/data.py#L332-L342
|
read text file line by line
|
python
|
def _read_file(self):
"""
reads the file and cleans into standard text ready for parsing
"""
self.raw = []
with open(self.fname, 'r') as f:
for line in f:
#print(line)
if line.startswith('#'):
pass # comment
elif line.strip('\n') == '':
pass # space
else:
self.raw.append(line.strip('\n'))
|
https://github.com/acutesoftware/AIKIF/blob/fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03/aikif/toolbox/parse_desc.py#L46-L59
|
read text file line by line
|
python
|
def _read_file(path):
'''
Reads and returns the contents of a text file
'''
try:
with salt.utils.files.flopen(path, 'rb') as contents:
return [salt.utils.stringutils.to_str(line) for line in contents.readlines()]
except (OSError, IOError):
return ''
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/debian_ip.py#L220-L228
|
read text file line by line
|
python
|
def readlines(self):
"""Returns a list of all lines (optionally parsed) in the file."""
if self.grammar:
tot = []
# Used this way instead of a 'for' loop against
# self.file.readlines() so that there wasn't two copies of the file
# in memory.
while 1:
line = self.file.readline()
if not line:
break
tot.append(line)
return tot
return self.file.readlines()
|
https://github.com/pyparsing/pyparsing/blob/f0264bd8d1a548a50b3e5f7d99cfefd577942d14/examples/sparser.py#L293-L306
|
read text file line by line
|
python
|
def readline(self, limit=-1, delim=b'\n'):
"""Read a single line.
If EOF is reached before a full line can be read, a partial line is
returned. If *limit* is specified, at most this many bytes will be read.
"""
self._check_readable()
chunks = []
while True:
chunk = self._buffer.get_chunk(limit, delim)
if not chunk:
break
chunks.append(chunk)
if chunk.endswith(delim):
break
if limit >= 0:
limit -= len(chunk)
if limit == 0:
break
if not chunks and not self._buffer.eof and self._buffer.error:
raise compat.saved_exc(self._buffer.error)
return b''.join(chunks)
|
https://github.com/geertj/gruvi/blob/1d77ca439600b6ea7a19aa1ee85dca0f3be3f3f8/lib/gruvi/stream.py#L271-L292
|
read text file line by line
|
python
|
def read(*paths):
"""Read a text file."""
basedir = os.path.dirname(__file__)
fullpath = os.path.join(basedir, *paths)
contents = io.open(fullpath, encoding='utf-8').read().strip()
return contents
|
https://github.com/datahq/dataflows/blob/2c5e5e01e09c8b44e0ff36d85b3f2f4dcf4e8465/setup.py#L12-L17
|
read text file line by line
|
python
|
def read_line(self, line):
"""
Match a line of input according to the format specified and return a
tuple of the resulting values
"""
if not self._read_line_init:
self.init_read_line()
match = self._re.match(line)
assert match is not None, f"Format mismatch (line = {line})"
matched_values = []
for i in range(self._re.groups):
cvt_re = self._match_exps[i]
cvt_div = self._divisors[i]
cvt_fn = self._in_cvt_fns[i]
match_str = match.group(i + 1)
match0 = re.match(cvt_re, match_str)
if match0 is not None:
if cvt_fn == "float":
if "." in match_str:
val = float(match_str)
else:
val = int(match_str) / cvt_div
elif cvt_fn == "int":
val = int(match_str)
else:
sys.stderr.write(
f"Unrecognized conversion function: {cvt_fn}\n"
)
else:
sys.stderr.write(
f"Format conversion failed: {match_str}\n"
)
matched_values.append(val)
return tuple(matched_values)
|
https://github.com/ml4ai/delphi/blob/6d03d8aafeab99610387c51b89c99738ff2abbe3/delphi/translators/for2py/format.py#L99-L138
|
read text file line by line
|
python
|
def _readline(self):
"""Read exactly one line from the device
Returns:
None on no data
"""
logging.info('%s: reading line', self.port)
if len(self._lines) > 1:
return self._lines.pop(0)
tail = ''
if len(self._lines):
tail = self._lines.pop()
try:
tail += self._read()
except socket.error:
logging.exception('%s: No new data', self.port)
time.sleep(0.1)
self._lines += LINESEPX.split(tail)
if len(self._lines) > 1:
return self._lines.pop(0)
|
https://github.com/openthread/openthread/blob/0208d10563aa21c518092985c78ecf9cd223ab74/tools/harness-thci/OpenThread.py#L129-L151
|
read text file line by line
|
python
|
def readline(self, size=None):
"""Read a single line from rfile buffer and return it.
Args:
size (int): minimum amount of data to read
Returns:
bytes: One line from rfile.
"""
if size is not None:
data = self.rfile.readline(size)
self.bytes_read += len(data)
self._check_length()
return data
# User didn't specify a size ...
# We read the line in chunks to make sure it's not a 100MB line !
res = []
while True:
data = self.rfile.readline(256)
self.bytes_read += len(data)
self._check_length()
res.append(data)
# See https://github.com/cherrypy/cherrypy/issues/421
if len(data) < 256 or data[-1:] == LF:
return EMPTY.join(res)
|
https://github.com/cherrypy/cheroot/blob/2af3b1798d66da697957480d3a8b4831a405770b/cheroot/server.py#L282-L308
|
read text file line by line
|
python
|
def readline(self):
"""
Get the next line from the input buffer.
"""
self.line_number += 1
if self.line_number > len(self.lines):
return ''
return self.lines[self.line_number - 1]
|
https://github.com/f3at/feat/blob/15da93fc9d6ec8154f52a9172824e25821195ef8/tools/pep8.py#L501-L508
|
read text file line by line
|
python
|
def readlines(self, size=None):
"""Reads a file into a list of strings. It calls :meth:`readline`
until the file is read to the end. It does support the optional
`size` argument if the underlaying stream supports it for
`readline`.
"""
last_pos = self._pos
result = []
if size is not None:
end = min(self.limit, last_pos + size)
else:
end = self.limit
while 1:
if size is not None:
size -= last_pos - self._pos
if self._pos >= end:
break
result.append(self.readline(size))
if size is not None:
last_pos = self._pos
return result
|
https://github.com/core/uricore/blob/dc5ef4be7bd93da4c39e5c1cbd1ae4f3ad3f1f2a/uricore/wkz_wsgi.py#L136-L156
|
read text file line by line
|
python
|
def _read_plain(self, lines):
"""
Read text fragments from a plain format text file.
:param list lines: the lines of the plain text file
:param dict parameters: additional parameters for parsing
(e.g., class/id regex strings)
:raises: ValueError: if the id regex is not valid
"""
self.log(u"Parsing fragments from plain text format")
id_format = self._get_id_format()
lines = [line.strip() for line in lines]
pairs = []
i = 1
for line in lines:
identifier = id_format % i
text = line.strip()
pairs.append((identifier, [text]))
i += 1
self._create_text_fragments(pairs)
|
https://github.com/readbeyond/aeneas/blob/9d95535ad63eef4a98530cfdff033b8c35315ee1/aeneas/textfile.py#L898-L917
|
read text file line by line
|
python
|
def readlines(self, encoding=None):
"""Reads from the file and returns result as a list of lines."""
try:
encoding = encoding or ENCODING
with codecs.open(self.path, encoding=None) as fi:
return fi.readlines()
except:
return []
|
https://github.com/jeffrimko/Auxly/blob/5aae876bcb6ca117c81d904f9455764cdc78cd48/lib/auxly/filesys.py#L139-L146
|
read text file line by line
|
python
|
def readline(self, size=None):
"""
Read a line from the stream, including the trailing
new line character. If `size` is set, don't read more
than `size` bytes, even if the result does not represent
a complete line.
The last line read may not include a trailing new line
character if one was not present in the underlying stream.
"""
if self._pos >= self.length:
return ''
if size:
amount = min(size, (self.length - self._pos))
else:
amount = self.length - self._pos
out = self.stream.readline(amount)
self._pos += len(out)
return out
|
https://github.com/dcrosta/sendlib/blob/51ea5412a70cf83a62d51d5c515c0eeac725aea0/sendlib.py#L338-L356
|
read text file line by line
|
python
|
def readline(self, timeout):
"""
Read a line from the socket. We assume no data is pending after the
line, so it's okay to attempt large reads.
"""
buf = self.__remainder
while not linefeed_byte in buf:
buf += self._read_timeout(timeout)
n = buf.index(linefeed_byte)
self.__remainder = buf[n + 1:]
buf = buf[:n]
if (len(buf) > 0) and (buf[-1] == cr_byte_value):
buf = buf[:-1]
return u(buf)
|
https://github.com/NikolayDachev/jadm/blob/12bb550445edfcd87506f7cba7a6a35d413c5511/lib/paramiko-1.14.1/paramiko/packet.py#L264-L277
|
read text file line by line
|
python
|
def readlines(self, offset=0):
"""Open the file for reading and yield lines as they are added"""
try:
with open(self._filepath) as fp:
# For full read go through existing lines in file
if self._full_read:
fp.seek(offset)
for row in fp:
yield row
# Okay now dynamically tail the file
if self._tail:
while True:
current = fp.tell()
row = fp.readline()
if row:
yield row
else:
fp.seek(current)
time.sleep(self._sleep)
except IOError as err:
print('Error reading the file {0}: {1}'.format(self._filepath, err))
return
|
https://github.com/SuperCowPowers/bat/blob/069e6bc52843dc07760969c531cc442ca7da8e0c/bat/utils/file_tailer.py#L27-L50
|
read text file line by line
|
python
|
def read_lines(in_file):
"""Returns a list of lines from a input markdown file."""
with open(in_file, 'r') as inf:
in_contents = inf.read().split('\n')
return in_contents
|
https://github.com/rasbt/markdown-toclify/blob/517cde672bebda5371130b87c1fcb7184d141a02/markdown_toclify/markdown_toclify.py#L33-L38
|
read text file line by line
|
python
|
def _readline(self):
"""Read a line from the server. Data is read from the socket until a character ``\n`` is found
:return: the read line
:rtype: string
"""
line = ''
while 1:
readable, _, __ = select.select([self.sock], [], [], 0.5)
if self._stop:
break
if not readable:
continue
data = readable[0].recv(1)
if data == '\n':
break
line += unicode(data, self.encoding)
return line
|
https://github.com/Diaoul/pyjulius/blob/48f2752ff4e0f3bd7b578754b1c583cabdc24b09/pyjulius/core.py#L156-L174
|
read text file line by line
|
python
|
def read(self, log_file):
""" Read messages from .log file """
if os.path.isdir(os.path.dirname(log_file)) and os.path.isfile(log_file):
with open(log_file, 'r') as LogFile:
data = LogFile.readlines()
data = "".join(line for line in data)
else:
data = ''
return data
|
https://github.com/Clivern/PyLogging/blob/46a1442ec63796302ec7fe3d49bd06a0f7a2fe70/pylogging/storage.py#L22-L30
|
read text file line by line
|
python
|
def readline(self):
"""Return a line content (with a trailing newline) if there are content. Return '' otherwise."""
try:
r = next(self.iterator) + os.linesep
return r
except StopIteration:
return ""
|
https://github.com/RobinNil/file_read_backwards/blob/e56443095b58aae309fbc43a0943eba867dc8500/file_read_backwards/file_read_backwards.py#L59-L66
|
read text file line by line
|
python
|
def readline(self):
"""Get the next line from the input buffer."""
if self.line_number >= self.total_lines:
return ''
line = self.lines[self.line_number]
self.line_number += 1
if self.indent_char is None and line[:1] in WHITESPACE:
self.indent_char = line[0]
return line
|
https://github.com/fabioz/PyDev.Debugger/blob/ed9c4307662a5593b8a7f1f3389ecd0e79b8c503/third_party/pep8/pycodestyle.py#L1575-L1583
|
read text file line by line
|
python
|
def readline(self, size=None):
"""
Read one entire line from the file. A trailing newline character is
kept in the string (but may be absent when a file ends with an
incomplete line). If the size argument is present and non-negative, it
is a maximum byte count (including the trailing newline) and an
incomplete line may be returned. An empty string is returned only when
EOF is encountered immediately.
@note: Unlike stdio's C{fgets()}, the returned string contains null
characters (C{'\\0'}) if they occurred in the input.
@param size: maximum length of returned string.
@type size: int
@return: next line of the file, or an empty string if the end of the
file has been reached.
@rtype: str
"""
# it's almost silly how complex this function is.
if self._closed:
raise IOError('File is closed')
if not (self._flags & self.FLAG_READ):
raise IOError('File not open for reading')
line = self._rbuffer
while True:
if self._at_trailing_cr and (self._flags & self.FLAG_UNIVERSAL_NEWLINE) and (len(line) > 0):
# edge case: the newline may be '\r\n' and we may have read
# only the first '\r' last time.
if line[0] == '\n':
line = line[1:]
self._record_newline('\r\n')
else:
self._record_newline('\r')
self._at_trailing_cr = False
# check size before looking for a linefeed, in case we already have
# enough.
if (size is not None) and (size >= 0):
if len(line) >= size:
# truncate line and return
self._rbuffer = line[size:]
line = line[:size]
self._pos += len(line)
return line
n = size - len(line)
else:
n = self._bufsize
if ('\n' in line) or ((self._flags & self.FLAG_UNIVERSAL_NEWLINE) and ('\r' in line)):
break
try:
new_data = self._read(n)
except EOFError:
new_data = None
if (new_data is None) or (len(new_data) == 0):
self._rbuffer = ''
self._pos += len(line)
return line
line += new_data
self._realpos += len(new_data)
# find the newline
pos = line.find('\n')
if self._flags & self.FLAG_UNIVERSAL_NEWLINE:
rpos = line.find('\r')
if (rpos >= 0) and ((rpos < pos) or (pos < 0)):
pos = rpos
xpos = pos + 1
if (line[pos] == '\r') and (xpos < len(line)) and (line[xpos] == '\n'):
xpos += 1
self._rbuffer = line[xpos:]
lf = line[pos:xpos]
line = line[:pos] + '\n'
if (len(self._rbuffer) == 0) and (lf == '\r'):
# we could read the line up to a '\r' and there could still be a
# '\n' following that we read next time. note that and eat it.
self._at_trailing_cr = True
else:
self._record_newline(lf)
self._pos += len(line)
return line
|
https://github.com/bitprophet/ssh/blob/e8bdad4c82a50158a749233dca58c29e47c60b76/ssh/file.py#L165-L242
|
read text file line by line
|
python
|
def _readline(self, prompt='', callback=None):
""" Reads one line of input from the user.
Parameters
----------
prompt : str, optional
The prompt to print before reading the line.
callback : callable, optional
A callback to execute with the read line. If not specified, input is
read *synchronously* and this method does not return until it has
been read.
Returns
-------
If a callback is specified, returns nothing. Otherwise, returns the
input string with the trailing newline stripped.
"""
if self._reading:
raise RuntimeError('Cannot read a line. Widget is already reading.')
if not callback and not self.isVisible():
# If the user cannot see the widget, this function cannot return.
raise RuntimeError('Cannot synchronously read a line if the widget '
'is not visible!')
self._reading = True
self._show_prompt(prompt, newline=False)
if callback is None:
self._reading_callback = None
while self._reading:
QtCore.QCoreApplication.processEvents()
return self._get_input_buffer(force=True).rstrip('\n')
else:
self._reading_callback = lambda: \
callback(self._get_input_buffer(force=True).rstrip('\n'))
|
https://github.com/cloud9ers/gurumate/blob/075dc74d1ee62a8c6b7a8bf2b271364f01629d1e/environment/lib/python2.7/site-packages/IPython/frontend/qt/console/console_widget.py#L1770-L1807
|
read text file line by line
|
python
|
def readline(self, size=-1):
"""Read one entire line from the file. If size is present
and non-negative, return a string with at most that
size, which may be an incomplete line.
"""
if self.closed:
raise ValueError("I/O operation on closed file")
pos = self.buffer.find(b"\n") + 1
if pos == 0:
# no newline found.
while True:
buf = self.fileobj.read(self.blocksize)
self.buffer += buf
if not buf or b"\n" in buf:
pos = self.buffer.find(b"\n") + 1
if pos == 0:
# no newline found.
pos = len(self.buffer)
break
if size != -1:
pos = min(size, pos)
buf = self.buffer[:pos]
self.buffer = self.buffer[pos:]
self.position += len(buf)
return buf
|
https://github.com/pypa/pipenv/blob/cae8d76c210b9777e90aab76e9c4b0e53bb19cde/pipenv/vendor/distlib/_backport/tarfile.py#L837-L864
|
read text file line by line
|
python
|
def readline(self):
"""Get the next line including the newline or '' on EOF."""
self.lineno += 1
if self._buffer:
return self._buffer.pop()
else:
return self.input.readline()
|
https://github.com/jelmer/python-fastimport/blob/5cef9e037b7d7b37f58f522ac9ea4e343e6a1dff/fastimport/parser.py#L196-L202
|
read text file line by line
|
python
|
def readlines(self, hint=-1):
"""Read lines until EOF, and return them as a list.
If *hint* is specified, then stop reading lines as soon as the total
size of all lines exceeds *hint*.
"""
self._check_readable()
lines = []
chunks = []
bytes_read = 0
while True:
chunk = self._buffer.get_chunk(-1, b'\n')
if not chunk:
break
chunks.append(chunk)
if chunk.endswith(b'\n'):
lines.append(b''.join(chunks))
del chunks[:]
bytes_read += len(lines[-1])
if hint >= 0 and bytes_read > hint:
break
if chunks:
lines.append(b''.join(chunks))
if not lines and not self._buffer.eof and self._buffer.error:
raise compat.saved_exc(self._buffer.error)
return lines
|
https://github.com/geertj/gruvi/blob/1d77ca439600b6ea7a19aa1ee85dca0f3be3f3f8/lib/gruvi/stream.py#L295-L320
|
read text file line by line
|
python
|
def readlines(self, sizehint=-1):
"""
readlines([size]) -> list of strings, each a line from the file.
Call readline() repeatedly and return a list of the lines so read.
The optional size argument, if given, is an approximate bound on the
total number of bytes in the lines returned.
"""
if self.closed:
raise ValueError('I/O operation on closed file')
lines = []
while True:
line = self.readline(sizehint)
if not line:
break
lines.append(line)
if sizehint >= 0:
sizehint -= len(line)
if sizehint <= 0:
break
return lines
|
https://github.com/gholt/swiftly/blob/5bcc1c65323b1caf1f85adbefd9fc4988c072149/swiftly/filelikeiter.py#L117-L137
|
read text file line by line
|
python
|
def readlines(self, sizehint=None):
"""Reads until EOF using :meth:`readline()`.
:param sizehint: if it's present, instead of reading up to EOF,
whole lines totalling approximately ``sizehint``
bytes (or more to accommodate a final whole line)
:type sizehint: :class:`numbers.Integral`
:returns: a list containing the lines read
:rtype: :class:`~typing.List`\ [:class:`bytes`]
"""
wrapped = self.wrapped
try:
readlines = wrapped.readlines
except AttributeError:
lines = []
while 1:
line = wrapped.readline()
if line:
lines.append(line)
else:
break
return lines
return readlines() if sizehint is None else readlines(sizehint)
|
https://github.com/dahlia/sqlalchemy-imageattach/blob/b4bafa73f3bb576ecf67ed7b40b702704a0fbdc8/sqlalchemy_imageattach/file.py#L74-L97
|
read text file line by line
|
python
|
def readline(self):
"""
Read until a new-line character is encountered
"""
line = ""
n_pos = -1
try:
while n_pos < 0:
line += self.next_chunk()
n_pos = line.find('\n')
except StopIteration:
pass
if n_pos >= 0:
line, extra = line[:n_pos+1], line[n_pos+1:]
self.unshift(extra)
return line
|
https://github.com/scrapinghub/exporters/blob/c9fb01db1771ada4672bbffd67cb46e1f7802ab9/exporters/iterio.py#L110-L126
|
read text file line by line
|
python
|
def readline(self):
"""Read one line from the pseudoterminal, and return it as unicode.
Can block if there is nothing to read. Raises :exc:`EOFError` if the
terminal was closed.
"""
try:
s = self.fileobj.readline()
except (OSError, IOError) as err:
if err.args[0] == errno.EIO:
# Linux-style EOF
self.flag_eof = True
raise EOFError('End Of File (EOF). Exception style platform.')
raise
if s == b'':
# BSD-style EOF (also appears to work on recent Solaris (OpenIndiana))
self.flag_eof = True
raise EOFError('End Of File (EOF). Empty string style platform.')
return s
|
https://github.com/pypa/pipenv/blob/cae8d76c210b9777e90aab76e9c4b0e53bb19cde/pipenv/vendor/ptyprocess/ptyprocess.py#L530-L549
|
read text file line by line
|
python
|
def readline(self, timeout):
"""
Read a line from the socket. We assume no data is pending after the
line, so it's okay to attempt large reads.
"""
buf = self.__remainder
while not '\n' in buf:
buf += self._read_timeout(timeout)
n = buf.index('\n')
self.__remainder = buf[n+1:]
buf = buf[:n]
if (len(buf) > 0) and (buf[-1] == '\r'):
buf = buf[:-1]
return buf
|
https://github.com/bitprophet/ssh/blob/e8bdad4c82a50158a749233dca58c29e47c60b76/ssh/packet.py#L271-L284
|
read text file line by line
|
python
|
def readline(self, size=None):
"""Read a single line from rfile buffer and return it.
Args:
size (int): minimum amount of data to read
Returns:
bytes: One line from rfile.
"""
data = EMPTY
if size == 0:
return data
while True:
if size and len(data) >= size:
return data
if not self.buffer:
self._fetch()
if not self.buffer:
# EOF
return data
newline_pos = self.buffer.find(LF)
if size:
if newline_pos == -1:
remaining = size - len(data)
data += self.buffer[:remaining]
self.buffer = self.buffer[remaining:]
else:
remaining = min(size - len(data), newline_pos)
data += self.buffer[:remaining]
self.buffer = self.buffer[remaining:]
else:
if newline_pos == -1:
data += self.buffer
self.buffer = EMPTY
else:
data += self.buffer[:newline_pos]
self.buffer = self.buffer[newline_pos:]
|
https://github.com/cherrypy/cheroot/blob/2af3b1798d66da697957480d3a8b4831a405770b/cheroot/server.py#L541-L582
|
read text file line by line
|
python
|
def readlines(self):
"""A generator producing lines from the file."""
# If the file is not open, there's nothing to return
if not self._fh:
raise StopIteration
at_eof = False
while True:
# Clean the buffer sometimes.
if self._bufoffset > (self._maxreadsize / 2):
self._buf = self._buf[self._bufoffset:]
self._bufoffset = 0
# Fill up the buffer if necessary.
if len(self._buf) < self._maxreadsize:
at_eof = not self._read(self._maxreadsize)
# Look for the next line.
try:
next_newline = self._buf.index("\n", self._bufoffset)
line = self._buf[self._bufoffset:next_newline]
self._bufoffset = next_newline + 1
# Save the current file offset for yielding and advance the file offset.
offset = self._offset
self._offset += len(line) + 1
if self._longline:
# This is the remaining chunk of a long line, we're not going
# to yield it.
self._longline = False
else:
yield line, offset
except ValueError:
# Reached the end of the buffer without finding any newlines.
if not at_eof:
# Line is longer than the half the buffer size? - Nope
logger.warning("Skipping over longline at %s:%d", self._path,
self._offset)
self._bufoffset = len(self._buf) - 1
self._longline = True
raise StopIteration
|
https://github.com/derpston/python-multitail2/blob/4f05311da3b18f7a8cfe2877e68e35e88c07298d/src/multitail2.py#L94-L135
|
read text file line by line
|
python
|
def read_next_line(self):
"""Read another line from the file."""
next_line = self.file.readline()
if not next_line or next_line[-1:] != '\n':
# no newline on last line of file
self.file = None
else:
# trim newline characters
next_line = next_line[:-1]
expanded = next_line.expandtabs()
edit = urwid.Edit("", expanded, allow_tab=True)
edit.set_edit_pos(0)
edit.original_text = next_line
self.lines.append(edit)
return next_line
|
https://github.com/cwoebker/pen/blob/996dfcdc018f2fc14a376835a2622fb4a7230a2f/pen/edit.py#L31-L50
|
read text file line by line
|
python
|
def readlines(self, n, echo=None):
"""
Read *n* lines from channel.
Args:
n(int): The number of lines to read.
echo(bool): Whether to write the read data to stdout.
Returns:
list of bytes: *n* lines which include new line characters.
Raises:
EOFError: If the channel was closed before *n* lines were read.
"""
return [
self.until(b'\n', echo)
for _ in range(n)
]
|
https://github.com/edibledinos/pwnypack/blob/e0a5a8e6ef3f4f1f7e1b91ee379711f4a49cb0e6/pwnypack/flow.py#L451-L469
|
read text file line by line
|
python
|
def readline(self, f):
"""A helper method that only reads uncommented lines"""
while True:
line = f.readline()
if len(line) == 0:
raise EOFError
line = line[:line.find('#')]
line = line.strip()
if len(line) > 0:
return line
|
https://github.com/molmod/molmod/blob/a7b5b4364ed514ad4c465856c05b5eda1cb561e0/molmod/io/cp2k.py#L204-L213
|
read text file line by line
|
python
|
def filelines(fname,strip=False):
'''read lines from a file into lines...optional strip'''
with open(fname,'r') as f:
lines = f.readlines();
if strip:
lines[:] = [line.strip() for line in lines]
return lines;
|
https://github.com/noobermin/pys/blob/e01b74210c65eb96d019bb42e0a3c9e6676da943/pys/__init__.py#L69-L75
|
read text file line by line
|
python
|
def readlines(self, sizehint=None):
"""
Read all remaining lines using `readline` and return them as a list.
If the optional ``sizehint`` argument is present, instead of reading up
to EOF, whole lines totalling approximately sizehint bytes (possibly
after rounding up to an internal buffer size) are read.
:param int sizehint: desired maximum number of bytes to read.
:returns: list of lines read from the file.
"""
lines = []
byte_count = 0
while True:
line = self.readline()
if len(line) == 0:
break
lines.append(line)
byte_count += len(line)
if (sizehint is not None) and (byte_count >= sizehint):
break
return lines
|
https://github.com/paramiko/paramiko/blob/cf7d49d66f3b1fbc8b0853518a54050182b3b5eb/paramiko/file.py#L336-L356
|
read text file line by line
|
python
|
def read_file(file_path):
'''
read file
'''
lines = []
with open(file_path, "r") as tf:
lines = [line.strip("\n") for line in tf.readlines() if not line.startswith("#")]
# filter empty lines
lines = [line for line in lines if line]
return lines
|
https://github.com/apache/incubator-heron/blob/ad10325a0febe89ad337e561ebcbe37ec5d9a5ac/heron/tools/admin/src/python/standalone.py#L739-L748
|
read text file line by line
|
python
|
def readLine(self):
""" read a line
Maintains its own buffer, callers of the transport should not mix
calls to readBytes and readLine.
"""
if self.buf is None:
self.buf = []
# Buffer may already have a line if we've received unilateral
# response(s) from the server
if len(self.buf) == 1 and b"\n" in self.buf[0]:
(line, b) = self.buf[0].split(b"\n", 1)
self.buf = [b]
return line
while True:
b = self.readBytes(4096)
if b"\n" in b:
result = b"".join(self.buf)
(line, b) = b.split(b"\n", 1)
self.buf = [b]
return result + line
self.buf.append(b)
|
https://github.com/facebook/watchman/blob/d416c249dd8f463dc69fc2691d0f890598c045a9/python/pywatchman/__init__.py#L311-L333
|
read text file line by line
|
python
|
def read_text_from_file(path: str) -> str:
""" Reads text file contents """
with open(path) as text_file:
content = text_file.read()
return content
|
https://github.com/MisterY/gnucash-portfolio/blob/bfaad8345a5479d1cd111acee1939e25c2a638c2/gnucash_portfolio/lib/fileutils.py#L3-L8
|
read text file line by line
|
python
|
def read_line(line):
"""Reads lines of XML and delimits, strips, and returns."""
name, value = '', ''
if '=' in line:
name, value = line.split('=', 1)
return [name.strip(), value.strip()]
|
https://github.com/refindlyllc/rets/blob/c615dfc272cff0825fd3b50863c46afc3e33916f/rets/parsers/login.py#L48-L55
|
read text file line by line
|
python
|
def _readline(self):
"""Read exactly one line from the device, nonblocking.
Returns:
None on no data
"""
if len(self.lines) > 1:
return self.lines.pop(0)
tail = ''
if len(self.lines):
tail = self.lines.pop()
try:
tail += self._read()
except socket.error:
logging.exception('No new data')
time.sleep(0.1)
self.lines += linesepx.split(tail)
if len(self.lines) > 1:
return self.lines.pop(0)
|
https://github.com/openthread/openthread/blob/0208d10563aa21c518092985c78ecf9cd223ab74/tools/harness-automation/autothreadharness/open_thread_controller.py#L149-L170
|
read text file line by line
|
python
|
def readline(self, size=-1):
"""Read one line or up to `size` bytes from the file.
:Parameters:
- `size` (optional): the maximum number of bytes to read
"""
remainder = int(self.length) - self.__position
if size < 0 or size > remainder:
size = remainder
if size == 0:
return EMPTY
received = 0
data = StringIO()
while received < size:
chunk_data = self.readchunk()
pos = chunk_data.find(NEWLN, 0, size)
if pos != -1:
size = received + pos + 1
received += len(chunk_data)
data.write(chunk_data)
if pos != -1:
break
self.__position -= received - size
# Return 'size' bytes and store the rest.
data.seek(size)
self.__buffer = data.read()
data.seek(0)
return data.read(size)
|
https://github.com/mongodb/mongo-python-driver/blob/c29c21449e3aae74154207058cf85fd94018d4cd/gridfs/grid_file.py#L571-L603
|
read text file line by line
|
python
|
def readline(self):
"""Read one line from the pseudoterminal as bytes.
Can block if there is nothing to read. Raises :exc:`EOFError` if the
terminal was closed.
"""
buf = []
while 1:
try:
ch = self.read(1)
except EOFError:
return ''.join(buf)
buf.append(ch)
if ch == '\n':
return ''.join(buf)
|
https://github.com/spyder-ide/pywinpty/blob/f4461cde9f0c53047e61e9eff7f7ec21ecbc4573/winpty/ptyprocess.py#L196-L210
|
read text file line by line
|
python
|
def _readline(self, timeout=1):
"""
Read line from serial port.
:param timeout: timeout, default is 1
:return: stripped line or None
"""
line = self.port.readline(timeout=timeout)
return strip_escape(line.strip()) if line is not None else line
|
https://github.com/ARMmbed/icetea/blob/b2b97ac607429830cf7d62dae2e3903692c7c778/icetea_lib/Plugin/plugins/LocalAllocator/DutSerial.py#L388-L396
|
read text file line by line
|
python
|
def tkreadline(file=None):
"""Read a line from file while running Tk mainloop.
If the file is not line-buffered then the Tk mainloop will stop
running after one character is typed. The function will still work
but Tk widgets will stop updating. This should work OK for stdin and
other line-buffered filehandles. If file is omitted, reads from
sys.stdin.
The file must have a readline method. If it does not have a fileno
method (which can happen e.g. for the status line input on the
graphics window) then the readline method is simply called directly.
"""
if file is None:
file = sys.stdin
if not hasattr(file, "readline"):
raise TypeError("file must be a filehandle with a readline method")
# Call tkread now...
# BUT, if we get in here for something not GUI-related (e.g. terminal-
# focused code in a sometimes-GUI app) then skip tkread and simply call
# readline on the input eg. stdin. Otherwise we'd fail in _TkRead().read()
try:
fd = file.fileno()
except:
fd = None
if (fd and capable.OF_GRAPHICS):
tkread(fd, 0)
# if EOF was encountered on a tty, avoid reading again because
# it actually requests more data
if not select.select([fd],[],[],0)[0]:
return ''
return file.readline()
|
https://github.com/spacetelescope/stsci.tools/blob/9a022503ad24ca54ce83331482dfa3ff6de9f403/lib/stsci/tools/irafutils.py#L394-L430
|
read text file line by line
|
python
|
def readline(self, prompt='', use_raw=None):
"""Read a line of input. Prompt and use_raw exist to be
compatible with other input routines and are ignored.
EOFError will be raised on EOF.
"""
line = self.input.readline()
if not line: raise EOFError
return line.rstrip("\n")
|
https://github.com/rocky/python3-trepan/blob/14e91bc0acce090d67be145b1ac040cab92ac5f3/trepan/inout/scriptin.py#L53-L60
|
read text file line by line
|
python
|
def readline(self, size=-1):
"""Read one line delimited by '\n' from the file.
A trailing newline character is kept in the string. It may be absent when a
file ends with an incomplete line. If the size argument is non-negative,
it specifies the maximum string size (counting the newline) to return.
A negative size is the same as unspecified. Empty string is returned
only when EOF is encountered immediately.
Args:
size: Maximum number of bytes to read. If not specified, readline stops
only on '\n' or EOF.
Returns:
The data read as a string.
Raises:
IOError: When this buffer is closed.
"""
self._check_open()
if size == 0 or not self._remaining():
return ''
data_list = []
newline_offset = self._buffer.find_newline(size)
while newline_offset < 0:
data = self._buffer.read(size)
size -= len(data)
self._offset += len(data)
data_list.append(data)
if size == 0 or not self._remaining():
return ''.join(data_list)
self._buffer.reset(self._buffer_future.get_result())
self._request_next_buffer()
newline_offset = self._buffer.find_newline(size)
data = self._buffer.read_to_offset(newline_offset + 1)
self._offset += len(data)
data_list.append(data)
return ''.join(data_list)
|
https://github.com/GoogleCloudPlatform/appengine-gcs-client/blob/d11078331ecd915d753c886e96a80133599f3f98/python/src/cloudstorage/storage_api.py#L332-L372
|
read text file line by line
|
python
|
def readline(self):
" Reads a full line from the scanner and returns it. "
start = end = self.index
while end < len(self.text):
if self.text[end] == '\n':
end += 1
break
end += 1
result = self.text[start:end]
self.index = end
if result.endswith('\n'):
self.colno = 0
self.lineno += 1
else:
self.colno += end - start
return result
|
https://github.com/NiklasRosenstein-Python/nr-deprecated/blob/f9f8b89ea1b084841a8ab65784eaf68852686b2a/nr/strex.py#L200-L216
|
read text file line by line
|
python
|
def readlines(arg):
"""Read lines from a file into a list.
Removes whitespace and lines that start with '#'
"""
fin = open(arg)
lines_in = fin.readlines()
fin.close()
lines_out = []
for line in lines_in:
line = line.strip()
if not line or line[0] == '#':
continue
lines_out.append(line)
return lines_out
|
https://github.com/fermiPy/fermipy/blob/9df5e7e3728307fd58c5bba36fd86783c39fbad4/fermipy/diffuse/utils.py#L11-L25
|
read text file line by line
|
python
|
def readline(self, size = -1):
"""This reads and returns one entire line. A trailing newline is kept
in the string, but may be absent when a file ends with an incomplete
line. Note: This readline() looks for a \\r\\n pair even on UNIX
because this is what the pseudo tty device returns. So contrary to what
you may expect you will receive the newline as \\r\\n. An empty string
is returned when EOF is hit immediately. Currently, the size argument is
mostly ignored, so this behavior is not standard for a file-like
object. If size is 0 then an empty string is returned. """
if size == 0:
return self._empty_buffer
index = self.expect ([self._pty_newline, self.delimiter]) # delimiter default is EOF
if index == 0:
return self.before + self._pty_newline
return self.before
|
https://github.com/cloud9ers/gurumate/blob/075dc74d1ee62a8c6b7a8bf2b271364f01629d1e/environment/lib/python2.7/site-packages/IPython/external/pexpect/_pexpect.py#L909-L924
|
read text file line by line
|
python
|
def readlines(self, filepath):
"""Returns the list of strings extracted from the file using either
open() if it is a local file, or by copying over the network using
sftp over SSH."""
target = self._read_check(filepath)
if os.path.isfile(target):
with open(target) as f:
lines = f.readlines()
#If we got this file via SSH, delete it from the temp folder
if self.is_ssh(filepath):
os.remove(target)
else:
lines = []
return lines
|
https://github.com/rosenbrockc/fortpy/blob/1ed0757c52d549e41d9d44bdea68cb89529293a5/fortpy/tramp.py#L145-L161
|
read text file line by line
|
python
|
def readline(self, size = -1):
"""Read a line with approx. size. If size is negative,
read a whole line.
"""
if size < 0:
size = sys.maxint
elif size == 0:
return ''
# check for a newline already in buffer
nl, nllen = self._checkfornewline()
if nl >= 0:
# the next line was already in the buffer
nl = min(nl, size)
else:
# no line break in buffer - try to read more
size -= len(self.linebuffer)
while nl < 0 and size > 0:
buf = self.read(min(size, 100))
if not buf:
break
self.linebuffer += buf
size -= len(buf)
# check for a newline in buffer
nl, nllen = self._checkfornewline()
# we either ran out of bytes in the file, or
# met the specified size limit without finding a newline,
# so return current buffer
if nl < 0:
s = self.linebuffer
self.linebuffer = ''
return s
buf = self.linebuffer[:nl]
self.lastdiscard = self.linebuffer[nl:nl + nllen]
self.linebuffer = self.linebuffer[nl + nllen:]
# line is always returned with \n as newline char (except possibly
# for a final incomplete line in the file, which is handled above).
return buf + "\n"
|
https://github.com/nerdvegas/rez/blob/1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7/src/rez/backport/zipfile.py#L511-L553
|
read text file line by line
|
python
|
def get_lines(path, max_size=TEXT_THRESHOLD_SIZE, fatal=True, default=None):
"""
:param str|None path: Path of text file to return lines from
:param int|None max_size: Return contents only for files smaller than 'max_size' bytes
:param bool|None fatal: Abort execution on failure if True
:param list|None default: Object to return if lines couldn't be read
:return list|None: Lines from file contents
"""
if not path or not os.path.isfile(path) or (max_size and os.path.getsize(path) > max_size):
# Intended for small text files, pretend no contents for binaries
return default
try:
with io.open(path, "rt", errors="ignore") as fh:
return fh.readlines()
except Exception as e:
return abort("Can't read %s: %s", short(path), e, fatal=(fatal, default))
|
https://github.com/zsimic/runez/blob/14363b719a1aae1528859a501a22d075ce0abfcc/src/runez/file.py#L124-L141
|
read text file line by line
|
python
|
def _read_file(path):
'''
Reads and returns the contents of a file
'''
try:
with salt.utils.files.fopen(path, 'rb') as rfh:
lines = salt.utils.stringutils.to_unicode(rfh.read()).splitlines()
try:
lines.remove('')
except ValueError:
pass
return lines
except Exception:
return []
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/rh_ip.py#L933-L946
|
read text file line by line
|
python
|
def read_lines_from_file(cls_name, filename):
"""Read lines from file, parsing out header and metadata."""
with tf.io.gfile.GFile(filename, "rb") as f:
lines = [tf.compat.as_text(line)[:-1] for line in f]
header_line = "%s%s" % (_HEADER_PREFIX, cls_name)
if lines[0] != header_line:
raise ValueError("File {fname} does not seem to have been created from "
"{name}.save_to_file.".format(
fname=filename, name=cls_name))
metadata_dict = json.loads(lines[1][len(_METADATA_PREFIX):])
return lines[2:], metadata_dict
|
https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/core/features/text/text_encoder.py#L507-L517
|
read text file line by line
|
python
|
def _read_from_file(self):
"""
Read text fragments from file.
"""
# test if we can read the given file
if not gf.file_can_be_read(self.file_path):
self.log_exc(u"File '%s' cannot be read" % (self.file_path), None, True, OSError)
if self.file_format not in TextFileFormat.ALLOWED_VALUES:
self.log_exc(u"Text file format '%s' is not supported." % (self.file_format), None, True, ValueError)
# read the contents of the file
self.log([u"Reading contents of file '%s'", self.file_path])
with io.open(self.file_path, "r", encoding="utf-8") as text_file:
lines = text_file.readlines()
# clear text fragments
self.clear()
# parse the contents
map_read_function = {
TextFileFormat.MPLAIN: self._read_mplain,
TextFileFormat.MUNPARSED: self._read_munparsed,
TextFileFormat.PARSED: self._read_parsed,
TextFileFormat.PLAIN: self._read_plain,
TextFileFormat.SUBTITLES: self._read_subtitles,
TextFileFormat.UNPARSED: self._read_unparsed
}
map_read_function[self.file_format](lines)
# log the number of fragments
self.log([u"Parsed %d fragments", len(self.fragments)])
|
https://github.com/readbeyond/aeneas/blob/9d95535ad63eef4a98530cfdff033b8c35315ee1/aeneas/textfile.py#L638-L669
|
read text file line by line
|
python
|
def read(self, size=-1):
"Reads up to size bytes, but always completes the last line."
buf = self.fin.read(size)
if not buf:
return ''
lines = buf.splitlines()
# Read the rest of the last line if necessary
if not buf.endswith('\n'):
last = lines.pop()
partial = self.fin.readline()
lines.append(last + partial)
# Process the lines, concatenate them
lines = [self.process_line(line.rstrip('\n')) for line in lines]
return ''.join(lines)
|
https://github.com/chop-dbhi/varify-data-warehouse/blob/1600ee1bc5fae6c68fd03b23624467298570cca8/vdw/raw/utils/stream.py#L51-L64
|
read text file line by line
|
python
|
def readlines_bytes(self):
"""Read content into byte str line iterator."""
with open_zipfile_archive(self.path, self.filename) as file:
for line in file:
yield line.rstrip(b'\r\n')
|
https://github.com/portfoliome/foil/blob/b66d8cf4ab048a387d8c7a033b47e922ed6917d6/foil/fileio.py#L167-L172
|
read text file line by line
|
python
|
def readline(self):
"""Readline wrapper to force readline() to return str objects."""
line = self.fd.__class__.readline(self.fd)
if isinstance(line, bytes):
line = line.decode()
return line
|
https://github.com/radhermit/vimball/blob/3998bdb8d8c4852a388a259778f971f562f9ef37/vimball/base.py#L74-L79
|
read text file line by line
|
python
|
def _read_filepattern(filepattern, max_lines=None, split_on_newlines=True):
"""Reads files matching a wildcard pattern, yielding the contents.
Args:
filepattern: A wildcard pattern matching one or more files.
max_lines: If set, stop reading after reading this many lines.
split_on_newlines: A boolean. If true, then split files by lines and strip
leading and trailing whitespace from each line. Otherwise, treat each
file as a single string.
Yields:
The contents of the files as lines, if split_on_newlines is True, or
the entire contents of each file if False.
"""
filenames = sorted(tf.gfile.Glob(filepattern))
lines_read = 0
for filename in filenames:
with tf.gfile.Open(filename) as f:
if split_on_newlines:
for line in f:
yield line.strip()
lines_read += 1
if max_lines and lines_read >= max_lines:
return
else:
if max_lines:
doc = []
for line in f:
doc.append(line)
lines_read += 1
if max_lines and lines_read >= max_lines:
yield "".join(doc)
return
yield "".join(doc)
else:
yield f.read()
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/data_generators/tokenizer.py#L108-L145
|
read text file line by line
|
python
|
def _readfile(cls, filename):
""" Reads a file a utf-8 file,
and retuns character tokens.
:param filename: Name of file to be read.
"""
f = codecs.open(filename, encoding='utf-8')
filedata = f.read()
f.close()
tokenz = LM.tokenize(filedata, mode='c')
#print tokenz
return tokenz
|
https://github.com/gr33ndata/dysl/blob/649c1d6a1761f47d49a9842e7389f6df52039155/dysl/langid.py#L40-L51
|
read text file line by line
|
python
|
def read_file(filename):
"""
Reads the lines of a file into a list, and returns the list
:param filename: String - path and name of the file
:return: List - lines within the file
"""
lines = []
with open(filename) as f:
for line in f:
if len(line.strip()) != 0:
lines.append(line.strip())
return lines
|
https://github.com/NetworkEng/fping.py/blob/991507889561aa6eb9ee2ad821adf460883a9c5d/fping/fping.py#L214-L225
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.