query
stringlengths
9
60
language
stringclasses
1 value
code
stringlengths
105
25.7k
url
stringlengths
91
217
readonly array
python
def is_read_only(object): """ Returns if given object is read only ( built-in or extension ). :param object: Object. :type object: object :return: Is object read only. :rtype: bool """ try: attribute = "_trace__read__" setattr(object, attribute, True) delattr(object, attribute) return False except (TypeError, AttributeError): return True
https://github.com/KelSolaar/Foundations/blob/5c141330faf09dad70a12bc321f4c564917d0a91/foundations/trace.py#L91-L107
readonly array
python
def readArray(self): """ Reads an array from the stream. @warning: There is a very specific problem with AMF3 where the first three bytes of an encoded empty C{dict} will mirror that of an encoded C{{'': 1, '2': 2}} """ size = self.readInteger(False) if size & REFERENCE_BIT == 0: return self.context.getObject(size >> 1) size >>= 1 key = self.readBytes() if key == '': # integer indexes only -> python list result = [] self.context.addObject(result) for i in xrange(size): result.append(self.readElement()) return result result = pyamf.MixedArray() self.context.addObject(result) while key: result[key] = self.readElement() key = self.readBytes() for i in xrange(size): el = self.readElement() result[i] = el return result
https://github.com/jmgilman/Neolib/blob/228fafeaed0f3195676137732384a14820ae285c/neolib/pyamf/amf3.py#L904-L942
readonly array
python
def has_readonly(self, s): """Tests whether store `s` is read-only.""" for t in self.transitions: if list(t.lhs[s]) != list(t.rhs[s]): return False return True
https://github.com/ND-CSE-30151/tock/blob/b8d21901aaf0e6ac913c2afa855f5b5a882a16c6/tock/machines.py#L369-L374
readonly array
python
def read(self, bounds=None, window=None, masked=False): """ Performs a boundless read against the underlying array source Parameters ---------- bounds: bounding box in w, s, e, n order, iterable, optional window: rasterio-style window, optional bounds OR window are required, specifying both or neither will raise exception masked: boolean return a masked numpy array, default: False bounds OR window are required, specifying both or neither will raise exception Returns ------- Raster object with update affine and array info """ # Calculate the window if bounds and window: raise ValueError("Specify either bounds or window") if bounds: win = bounds_window(bounds, self.affine) elif window: win = window else: raise ValueError("Specify either bounds or window") c, _, _, f = window_bounds(win, self.affine) # c ~ west, f ~ north a, b, _, d, e, _, _, _, _ = tuple(self.affine) new_affine = Affine(a, b, c, d, e, f) nodata = self.nodata if nodata is None: nodata = -999 warnings.warn("Setting nodata to -999; specify nodata explicitly") if self.array is not None: # It's an ndarray already new_array = boundless_array( self.array, window=win, nodata=nodata, masked=masked) elif self.src: # It's an open rasterio dataset new_array = self.src.read( self.band, window=win, boundless=True, masked=masked) return Raster(new_array, new_affine, nodata)
https://github.com/perrygeo/python-rasterstats/blob/910455cd7c9c21eadf464927db72b38ef62b7dfb/src/rasterstats/io.py#L264-L311
readonly array
python
def read_array(fo, writer_schema, reader_schema=None): """Arrays are encoded as a series of blocks. Each block consists of a long count value, followed by that many array items. A block with count zero indicates the end of the array. Each item is encoded per the array's item schema. If a block's count is negative, then the count is followed immediately by a long block size, indicating the number of bytes in the block. The actual count in this case is the absolute value of the count written. """ if reader_schema: def item_reader(fo, w_schema, r_schema): return read_data(fo, w_schema['items'], r_schema['items']) else: def item_reader(fo, w_schema, _): return read_data(fo, w_schema['items']) read_items = [] block_count = read_long(fo) while block_count != 0: if block_count < 0: block_count = -block_count # Read block size, unused read_long(fo) for i in xrange(block_count): read_items.append(item_reader(fo, writer_schema, reader_schema)) block_count = read_long(fo) return read_items
https://github.com/fastavro/fastavro/blob/bafe826293e19eb93e77bbb0f6adfa059c7884b2/fastavro/_read_py.py#L276-L308
readonly array
python
def read_array(self, key, start=None, stop=None): """ read an array for the specified node (off of group """ import tables node = getattr(self.group, key) attrs = node._v_attrs transposed = getattr(attrs, 'transposed', False) if isinstance(node, tables.VLArray): ret = node[0][start:stop] else: dtype = getattr(attrs, 'value_type', None) shape = getattr(attrs, 'shape', None) if shape is not None: # length 0 axis ret = np.empty(shape, dtype=dtype) else: ret = node[start:stop] if dtype == 'datetime64': # reconstruct a timezone if indicated ret = _set_tz(ret, getattr(attrs, 'tz', None), coerce=True) elif dtype == 'timedelta64': ret = np.asarray(ret, dtype='m8[ns]') if transposed: return ret.T else: return ret
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/io/pytables.py#L2518-L2549
readonly array
python
def read_array(self, key, embedded=True): """Alias for read method that will read any type (e.g., String, KeyValue) and always return array. Args: key (string): The variable to read from the DB. embedded (boolean): Resolve embedded variables. Returns: (any): Results retrieved from DB """ return self.read(key, True, embedded)
https://github.com/ThreatConnect-Inc/tcex/blob/dd4d7a1ef723af1561687120191886b9a2fd4b47/tcex/tcex_playbook.py#L434-L445
readonly array
python
def read_be_array(fmt, count, fp): """ Reads an array from a file with big-endian data. """ arr = array.array(str(fmt)) if hasattr(arr, 'frombytes'): arr.frombytes(fp.read(count * arr.itemsize)) else: arr.fromstring(fp.read(count * arr.itemsize)) return fix_byteorder(arr)
https://github.com/psd-tools/psd-tools/blob/4952b57bcf1cf2c1f16fd9d6d51d4fa0b53bce4e/src/psd_tools/utils.py#L226-L235
readonly array
python
def is_read_only(cls, db: DATABASE_SUPPORTER_FWD_REF, logger: logging.Logger = None) -> bool: """Do we have read-only access?""" def convert_enums(row_): # All these columns are of type enum('N', 'Y'); # https://dev.mysql.com/doc/refman/5.0/en/enum.html return [True if x == 'Y' else (False if x == 'N' else None) for x in row_] # 1. Check per-database privileges. # We don't check SELECT privileges. We're just trying to ensure # nothing dangerous is present - for ANY database. # If we get an exception try: sql = """ SELECT db, /* must not have: */ Insert_priv, Update_priv, Delete_priv, Create_priv, Drop_priv, Index_priv, Alter_priv, Lock_tables_priv, Create_view_priv, Create_routine_priv, Alter_routine_priv, Execute_priv, Event_priv, Trigger_priv FROM mysql.db WHERE CONCAT(user, '@', host) = CURRENT_USER() """ rows = db.fetchall(sql) for row in rows: dbname = row[0] prohibited = convert_enums(row[1:]) if any(prohibited): if logger: logger.debug( "MySQL.is_read_only(): FAIL: database privileges " "wrong: dbname={}, prohibited={}".format( dbname, prohibited ) ) return False except mysql.OperationalError: # Probably: error 1142, "SELECT command denied to user 'xxx'@'yyy' # for table 'db'". This would be OK. pass # 2. Global privileges, e.g. as held by root try: sql = """ SELECT /* must not have: */ Insert_priv, Update_priv, Delete_priv, Create_priv, Drop_priv, Reload_priv, Shutdown_priv, Process_priv, File_priv, Grant_priv, Index_priv, Alter_priv, Show_db_priv, Super_priv, Lock_tables_priv, Execute_priv, Repl_slave_priv, Repl_client_priv, Create_view_priv, Create_routine_priv, Alter_routine_priv, Create_user_priv, Event_priv, Trigger_priv, Create_tablespace_priv FROM mysql.user WHERE CONCAT(user, '@', host) = CURRENT_USER() """ rows = db.fetchall(sql) if not rows or len(rows) > 1: return False prohibited = convert_enums(rows[0]) if any(prohibited): if logger: logger.debug( "MySQL.is_read_only(): FAIL: GLOBAL privileges " "wrong: prohibited={}".format(prohibited)) return False except mysql.OperationalError: # Probably: error 1142, "SELECT command denied to user 'xxx'@'yyy' # for table 'user'". This would be OK. pass return True
https://github.com/RudolfCardinal/pythonlib/blob/0b84cb35f38bd7d8723958dae51b480a829b7227/cardinal_pythonlib/rnc_db.py#L652-L734
readonly array
python
def _free_array(self, handle: int): """Frees the memory for the array with the given handle. Args: handle: The handle of the array whose memory should be freed. This handle must come from the _create_array method. """ with self._lock: if self._arrays[handle] is not None: self._arrays[handle] = None self._count -= 1
https://github.com/quantumlib/Cirq/blob/0827da80dd7880e5b923eb69407e980ed9bc0bd2/cirq/google/sim/mem_manager.py#L102-L112
readonly array
python
def _safe_read(self, amt): """Read the number of bytes requested, compensating for partial reads. Normally, we have a blocking socket, but a read() can be interrupted by a signal (resulting in a partial read). Note that we cannot distinguish between EOF and an interrupt when zero bytes have been read. IncompleteRead() will be raised in this situation. This function should be used when <amt> bytes "should" be present for reading. If the bytes are truly not available (due to EOF), then the IncompleteRead exception can be used to detect the problem. """ s = [] while amt > 0: chunk = self.fp.read(min(amt, MAXAMOUNT)) if not chunk: raise IncompleteRead(bytes(b'').join(s), amt) s.append(chunk) amt -= len(chunk) return bytes(b"").join(s)
https://github.com/PythonCharmers/python-future/blob/c423752879acc05eebc29b0bb9909327bd5c7308/src/future/backports/http/client.py#L669-L690
readonly array
python
def derive_readonly(self): """ Figures out what fields should be readonly. We iterate our field_config to find all that have a readonly of true """ readonly = list(self.readonly) for key, value in self.field_config.items(): if 'readonly' in value and value['readonly']: readonly.append(key) return readonly
https://github.com/nyaruka/smartmin/blob/488a676a4960555e4d216a7b95d6e01a4ad4efd8/smartmin/views.py#L914-L924
readonly array
python
def read(self, len=1024): 'read up to len bytes and return them, or empty string on EOF' return self._with_retry( functools.partial(self._read_attempt, len), self.gettimeout())
https://github.com/teepark/greenhouse/blob/8fd1be4f5443ba090346b5ec82fdbeb0a060d956/greenhouse/io/ssl.py#L114-L118
readonly array
python
def readMixedArray(self): """ Read mixed array. @rtype: L{pyamf.MixedArray} """ # TODO: something with the length/strict self.stream.read_ulong() # length obj = pyamf.MixedArray() self.context.addObject(obj) attrs = self.readObjectAttributes(obj) for key in attrs.keys(): try: key = int(key) except ValueError: pass obj[key] = attrs[key] return obj
https://github.com/jmgilman/Neolib/blob/228fafeaed0f3195676137732384a14820ae285c/neolib/pyamf/amf0.py#L223-L245
readonly array
python
def _read(self, n): """Read (up to) 'n' bytes from the underlying file. If any bytes have been pushed in with _unread() those are returned first.""" if n <= len(self._prefix): # the read can be fulfilled entirely from the prefix result = self._prefix[:n] self._prefix = self._prefix[n:] return result # otherwise we need to read some n -= len(self._prefix) result = self._prefix + self.f.read(n) self._prefix = "" return result
https://github.com/trec-kba/streamcorpus-pipeline/blob/8bb82ea1beb83c6b40ed03fa1659df2897c2292a/streamcorpus_pipeline/_spinn3r_feed_storage.py#L104-L116
readonly array
python
def read_only(self, value): """ Setter for **self.__read_only** attribute. :param value: Attribute value. :type value: bool """ if value is not None: assert type(value) is bool, "'{0}' attribute: '{1}' type is not 'bool'!".format("read_only", value) self.__read_only = value
https://github.com/KelSolaar/Umbra/blob/66f45f08d9d723787f1191989f8b0dda84b412ce/umbra/ui/views.py#L246-L256
readonly array
python
def s3_cache_readonly(self): """ Whether the Amazon S3 bucket is considered read only. If this is :data:`True` then the Amazon S3 bucket will only be used for :class:`~pip_accel.caches.s3.S3CacheBackend.get()` operations (all :class:`~pip_accel.caches.s3.S3CacheBackend.put()` operations will be disabled). - Environment variable: ``$PIP_ACCEL_S3_READONLY`` (refer to :func:`~humanfriendly.coerce_boolean()` for details on how the value of the environment variable is interpreted) - Configuration option: ``s3-readonly`` (also parsed using :func:`~humanfriendly.coerce_boolean()`) - Default: :data:`False` For details please refer to the :mod:`pip_accel.caches.s3` module. """ return coerce_boolean(self.get(property_name='s3_cache_readonly', environment_variable='PIP_ACCEL_S3_READONLY', configuration_option='s3-readonly', default=False))
https://github.com/paylogic/pip-accel/blob/ccad1b784927a322d996db593403b1d2d2e22666/pip_accel/config.py#L427-L448
readonly array
python
def read_array(path, mmap_mode=None): """Read a .npy array.""" file_ext = op.splitext(path)[1] if file_ext == '.npy': return np.load(path, mmap_mode=mmap_mode) raise NotImplementedError("The file extension `{}` ".format(file_ext) + "is not currently supported.")
https://github.com/kwikteam/phy/blob/7e9313dc364304b7d2bd03b92938347343703003/phy/io/array.py#L219-L225
readonly array
python
def p_read(p): """ statement : READ arguments """ gl.DATA_IS_USED = True reads = [] if p[2] is None: return for arg in p[2]: entry = arg.value if entry is None: p[0] = None return if isinstance(entry, symbols.VARARRAY): api.errmsg.syntax_error(p.lineno(1), "Cannot read '%s'. It's an array" % entry.name) p[0] = None return if isinstance(entry, symbols.VAR): if entry.class_ != CLASS.var: api.errmsg.syntax_error_cannot_assing_not_a_var(p.lineno(2), entry.name) p[0] = None return entry.accessed = True if entry.type_ == TYPE.auto: entry.type_ = _TYPE(gl.DEFAULT_TYPE) api.errmsg.warning_implicit_type(p.lineno(2), p[2], entry.type_) reads.append(make_sentence('READ', entry)) continue if isinstance(entry, symbols.ARRAYLOAD): reads.append(make_sentence('READ', symbols.ARRAYACCESS(entry.entry, entry.args, entry.lineno))) continue api.errmsg.syntax_error(p.lineno(1), "Syntax error. Can only read a variable or an array element") p[0] = None return p[0] = make_block(*reads)
https://github.com/boriel/zxbasic/blob/23b28db10e41117805bdb3c0f78543590853b132/zxbparser.py#L1632-L1674
readonly array
python
def read(self, memory, addr, length): """ Read the specified amount of bytes from the given memory at the given address """ if memory.id in self._read_requests: logger.warning('There is already a read operation ongoing for ' 'memory id {}'.format(memory.id)) return False rreq = _ReadRequest(memory, addr, length, self.cf) self._read_requests[memory.id] = rreq rreq.start() return True
https://github.com/bitcraze/crazyflie-lib-python/blob/f6ebb4eb315bbe6e02db518936ac17fb615b2af8/cflib/crazyflie/mem.py#L955-L970
readonly array
python
def read_array(self, dtype, count=-1, out=None): """Return numpy array from file in native byte order.""" fh = self._fh dtype = numpy.dtype(dtype) if count < 0: size = self._size if out is None else out.nbytes count = size // dtype.itemsize else: size = count * dtype.itemsize result = numpy.empty(count, dtype) if out is None else out if result.nbytes != size: raise ValueError('size mismatch') n = fh.readinto(result) if n != size: raise ValueError('failed to read %i bytes' % size) if not result.dtype.isnative: if not dtype.isnative: result.byteswap(True) result = result.newbyteorder() elif result.dtype.isnative != dtype.isnative: result.byteswap(True) if out is not None: if hasattr(out, 'flush'): out.flush() return result
https://github.com/nion-software/nionswift-io/blob/e9ae37f01faa9332c48b647f93afd5ef2166b155/nionswift_plugin/TIFF_IO/tifffile.py#L5598-L5629
readonly array
python
def _ReadTablesArray(self, file_object, tables_array_offset): """Reads the tables array. Args: file_object (file): file-like object. tables_array_offset (int): offset of the tables array relative to the start of the file. Returns: dict[int, KeychainDatabaseTable]: tables per identifier. Raises: ParseError: if the tables array cannot be read. """ # TODO: implement https://github.com/libyal/dtfabric/issues/12 and update # keychain_tables_array definition. data_type_map = self._GetDataTypeMap('keychain_tables_array') tables_array, _ = self._ReadStructureFromFileObject( file_object, tables_array_offset, data_type_map) tables = collections.OrderedDict() for table_offset in tables_array.table_offsets: self._ReadTable(tables, file_object, tables_array_offset + table_offset) return tables
https://github.com/log2timeline/plaso/blob/9c564698d2da3ffbe23607a3c54c0582ea18a6cc/plaso/parsers/mac_keychain.py#L667-L693
readonly array
python
def read(self, count=0): """ Read """ return self.f.read(count) if count > 0 else self.f.read()
https://github.com/timknip/pyswf/blob/3740cc80d7650156831e728ea0d408819e5671eb/swf/stream.py#L478-L480
readonly array
python
def read(self, n): """Read `n` chars from buffer""" r = self.buf[self.offset:self.offset + n] if isinstance(r, array): r = r.tostring() self.offset += n return r
https://github.com/brechtm/rinohtype/blob/40a63c4e5ad7550f62b6860f1812cb67cafb9dc7/src/rinoh/backend/pdf/xobject/purepng.py#L2071-L2077
readonly array
python
def _readall(self): """ Read and return all the bytes from the stream until EOF. Returns: bytes: Object content """ with _handle_oss_error(): return self._bucket.get_object(key=self._key).read()
https://github.com/Accelize/pycosio/blob/1cc1f8fdf5394d92918b7bae2bfa682169ccc48c/pycosio/storage/oss.py#L353-L361
readonly array
python
def read(self, offset): """ .. _read: Returns the value of the memory word at ``offset``. Might raise WriteOnlyError_, if the device is write-only. Might raise AddressError_, if the offset exceeds the size of the device. """ if(not self.mode & 0b01): raise WriteOnlyError("Device is Write-Only") if(offset >= self.size): raise AddressError("Offset({}) not in address space({})".format(offset, self.size)) return self.repr_[offset].getvalue()
https://github.com/daknuett/py_register_machine2/blob/599c53cd7576297d0d7a53344ed5d9aa98acc751/core/parts.py#L228-L241
readonly array
python
def write_array(self, obj): """ Writes a JavaArray :param obj: A JavaArray object """ classdesc = obj.get_class() self._writeStruct(">B", 1, (self.TC_ARRAY,)) self.write_classdesc(classdesc) self._writeStruct(">i", 1, (len(obj),)) # Add reference self.references.append(obj) logging.debug( "*** Adding ref 0x%X for array []", len(self.references) - 1 + self.BASE_REFERENCE_IDX, ) type_char = classdesc.name[0] assert type_char == self.TYPE_ARRAY type_char = classdesc.name[1] if type_char == self.TYPE_OBJECT: for o in obj: self._write_value(classdesc.name[1:], o) elif type_char == self.TYPE_ARRAY: for a in obj: self.write_array(a) else: log_debug("Write array of type %s" % type_char) for v in obj: log_debug("Writing: %s" % v) self._write_value(type_char, v)
https://github.com/tcalmant/python-javaobj/blob/e042c2cbf1ce9de659b6cb9290b5ccd5442514d1/javaobj/core.py#L1561-L1593
readonly array
python
def read(self, length=None): """Read the given amount of bytes.""" if length is None: return self.reader.read() result = self.reader.read(length) if len(result) != length: raise BufferError( 'No more data left to read (need {}, got {}: {}); last read {}' .format(length, len(result), repr(result), repr(self._last)) ) self._last = result return result
https://github.com/LonamiWebs/Telethon/blob/1ead9757d366b58c1e0567cddb0196e20f1a445f/telethon/extensions/binaryreader.py#L60-L73
readonly array
python
def read(self, n): """ return at most n array items, move the cursor. """ while len(self.pool) < n: self.cur = self.files.next() self.pool = numpy.append(self.pool, self.fetch(self.cur), axis=0) rt = self.pool[:n] if n == len(self.pool): self.pool = self.fetch(None) else: self.pool = self.pool[n:] return rt
https://github.com/rainwoodman/sharedmem/blob/b23e59c1ed0e28f7b6c96c17a04d55c700e06e3a/contrib/multipartstream.py#L31-L44
readonly array
python
def read(self, size=None): """Read at most size bytes from this buffer. Bytes read from this buffer are consumed and are permanently removed. Args: size: If provided, read no more than size bytes from the buffer. Otherwise, this reads the entire buffer. Returns: The bytes read from this buffer. """ if size is None: size = self.__size ret_list = [] while size > 0 and self.__buf: data = self.__buf.popleft() size -= len(data) ret_list.append(data) if size < 0: ret_list[-1], remainder = ret_list[-1][:size], ret_list[-1][size:] self.__buf.appendleft(remainder) ret = b''.join(ret_list) self.__size -= len(ret) return ret
https://github.com/google/apitools/blob/f3745a7ea535aa0e88b0650c16479b696d6fd446/apitools/base/py/compression.py#L123-L147
readonly array
python
def _on_read(self, sender, *args, **kwargs): """ Internal handler for reading from the device. """ data = kwargs.get('data', None) self.on_read(data=data) self._handle_message(data)
https://github.com/nutechsoftware/alarmdecoder/blob/b0c014089e24455228cb4402cf30ba98157578cd/alarmdecoder/decoder.py#L959-L966
readonly array
python
def _read(self): """get two list, each list contains two elements: name and nd.array value""" _, data_img_name, label_img_name = self.f.readline().strip('\n').split("\t") data = {} label = {} data[self.data_name], label[self.label_name] = self._read_img(data_img_name, label_img_name) return list(data.items()), list(label.items())
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/example/fcn-xs/data.py#L64-L70
readonly array
python
def _readall(self): """ Read and return all the bytes from the stream until EOF. Returns: bytes: Object content """ stream = _BytesIO() with _handle_azure_exception(): self._get_to_stream(stream=stream, **self._client_kwargs) return stream.getvalue()
https://github.com/Accelize/pycosio/blob/1cc1f8fdf5394d92918b7bae2bfa682169ccc48c/pycosio/storage/azure.py#L263-L273
readonly array
python
def emptyArray(key, add_label=None): """An array that starts empty""" result = { 'key': key, 'startEmpty': True } if add_label is not None: result['add'] = add_label result['style'] = {'add': 'btn-success'} return result
https://github.com/Hackerfleet/hfos/blob/b6df14eacaffb6be5c844108873ff8763ec7f0c9/hfos/schemata/defaultform.py#L165-L175
readonly array
python
def _can_read(self): """Wait data available for socket read :returns: True if data available or None if timeout or socket error :rtype: bool or None """ if self.__sock is None: return None if select.select([self.__sock], [], [], self.__timeout)[0]: return True else: self.__last_error = const.MB_TIMEOUT_ERR self.__debug_msg('timeout error') self.close() return None
https://github.com/sourceperl/pyModbusTCP/blob/993f6e2f5ab52eba164be049e42cea560c3751a5/pyModbusTCP/client.py#L740-L754
readonly array
python
def toarray(self): """ Returns the contents as a local array. Will likely cause memory problems for large objects. """ rdd = self._rdd if self._ordered else self._rdd.sortByKey() x = rdd.values().collect() return asarray(x).reshape(self.shape)
https://github.com/bolt-project/bolt/blob/9cd7104aa085498da3097b72696184b9d3651c51/bolt/spark/array.py#L1006-L1014
readonly array
python
def get_readonly_fields(self, request, obj=None): """The model can't be changed once the export is created""" if obj is None: return [] return super(ExportAdmin, self).get_readonly_fields(request, obj)
https://github.com/magopian/django-data-exports/blob/a73db486779d93046ad89c5bf582ff8ae869120f/data_exports/admin.py#L31-L35
readonly array
python
def _raw_read(self, where: int, size=1) -> bytes: """ Selects bytes from memory. Attempts to do so faster than via read_bytes. :param where: address to read from :param size: number of bytes to read :return: the bytes in memory """ map = self.memory.map_containing(where) start = map._get_offset(where) mapType = type(map) if mapType is FileMap: end = map._get_offset(where + size) if end > map._mapped_size: logger.warning(f"Missing {end - map._mapped_size} bytes at the end of {map._filename}") raw_data = map._data[map._get_offset(where): min(end, map._mapped_size)] if len(raw_data) < end: raw_data += b'\x00' * (end - len(raw_data)) data = b'' for offset in sorted(map._overlay.keys()): data += raw_data[len(data):offset] data += map._overlay[offset] data += raw_data[len(data):] elif mapType is AnonMap: data = bytes(map._data[start:start + size]) else: data = b''.join(self.memory[where:where + size]) assert len(data) == size, 'Raw read resulted in wrong data read which should never happen' return data
https://github.com/trailofbits/manticore/blob/54c5a15b1119c523ae54c09972413e8b97f11629/manticore/native/cpu/abstractcpu.py#L621-L653
readonly array
python
def read(self, n): '''Reads n bytes into the internal buffer''' bytes_wanted = n - self.buffer_length + self.pos + 1 if bytes_wanted > 0: self._buffer_bytes(bytes_wanted) end_pos = self.pos + n ret = self.buffer[self.pos + 1:end_pos + 1] self.pos = end_pos return ret
https://github.com/spotify/snakebite/blob/6a456e6100b0c1be66cc1f7f9d7f50494f369da3/snakebite/channel.py#L121-L130
readonly array
python
def _ensure_array(self, key, value): """Ensure an array field""" if key not in self._json_dict: self._json_dict[key] = [] self._size += 2 # brackets self._ensure_field(key) if len(self._json_dict[key]) > 0: # this array already has an entry, so add comma and space self._size += 2 if isinstance(value, str): self._size += 2 # quotes self._size += len(str(value)) self._json_dict[key].append(value)
https://github.com/kentik/kentikapi-py/blob/aa94c0b7eaf88409818b97967d7293e309e11bab/kentikapi/v5/tagging.py#L164-L180
readonly array
python
def write_array(fo, datum, schema): """Arrays are encoded as a series of blocks. Each block consists of a long count value, followed by that many array items. A block with count zero indicates the end of the array. Each item is encoded per the array's item schema. If a block's count is negative, then the count is followed immediately by a long block size, indicating the number of bytes in the block. The actual count in this case is the absolute value of the count written. """ if len(datum) > 0: write_long(fo, len(datum)) dtype = schema['items'] for item in datum: write_data(fo, item, dtype) write_long(fo, 0)
https://github.com/fastavro/fastavro/blob/bafe826293e19eb93e77bbb0f6adfa059c7884b2/fastavro/_write_py.py#L264-L280
readonly array
python
def read_zarr(store): """Read from a hierarchical Zarr array store. Parameters ---------- store The filename, a :class:`~typing.MutableMapping`, or a Zarr storage class. """ if isinstance(store, Path): store = str(store) import zarr f = zarr.open(store, mode='r') d = {} for key in f.keys(): _read_key_value_from_zarr(f, d, key) return AnnData(*AnnData._args_from_dict(d))
https://github.com/theislab/anndata/blob/34f4eb63710628fbc15e7050e5efcac1d7806062/anndata/readwrite/read.py#L369-L384
readonly array
python
def open_array(store=None, mode='a', shape=None, chunks=True, dtype=None, compressor='default', fill_value=0, order='C', synchronizer=None, filters=None, cache_metadata=True, cache_attrs=True, path=None, object_codec=None, chunk_store=None, **kwargs): """Open an array using file-mode-like semantics. Parameters ---------- store : MutableMapping or string, optional Store or path to directory in file system or name of zip file. mode : {'r', 'r+', 'a', 'w', 'w-'}, optional Persistence mode: 'r' means read only (must exist); 'r+' means read/write (must exist); 'a' means read/write (create if doesn't exist); 'w' means create (overwrite if exists); 'w-' means create (fail if exists). shape : int or tuple of ints, optional Array shape. chunks : int or tuple of ints, optional Chunk shape. If True, will be guessed from `shape` and `dtype`. If False, will be set to `shape`, i.e., single chunk for the whole array. dtype : string or dtype, optional NumPy dtype. compressor : Codec, optional Primary compressor. fill_value : object, optional Default value to use for uninitialized portions of the array. order : {'C', 'F'}, optional Memory layout to be used within each chunk. synchronizer : object, optional Array synchronizer. filters : sequence, optional Sequence of filters to use to encode chunk data prior to compression. cache_metadata : bool, optional If True, array configuration metadata will be cached for the lifetime of the object. If False, array metadata will be reloaded prior to all data access and modification operations (may incur overhead depending on storage and data access pattern). cache_attrs : bool, optional If True (default), user attributes will be cached for attribute read operations. If False, user attributes are reloaded from the store prior to all attribute read operations. path : string, optional Array path within store. object_codec : Codec, optional A codec to encode object arrays, only needed if dtype=object. chunk_store : MutableMapping or string, optional Store or path to directory in file system or name of zip file. Returns ------- z : zarr.core.Array Examples -------- >>> import numpy as np >>> import zarr >>> z1 = zarr.open_array('data/example.zarr', mode='w', shape=(10000, 10000), ... chunks=(1000, 1000), fill_value=0) >>> z1[:] = np.arange(100000000).reshape(10000, 10000) >>> z1 <zarr.core.Array (10000, 10000) float64> >>> z2 = zarr.open_array('data/example.zarr', mode='r') >>> z2 <zarr.core.Array (10000, 10000) float64 read-only> >>> np.all(z1[:] == z2[:]) True Notes ----- There is no need to close an array. Data are automatically flushed to the file system. """ # use same mode semantics as h5py # r : read only, must exist # r+ : read/write, must exist # w : create, delete if exists # w- or x : create, fail if exists # a : read/write if exists, create otherwise (default) # handle polymorphic store arg clobber = mode == 'w' store = normalize_store_arg(store, clobber=clobber) if chunk_store is not None: chunk_store = normalize_store_arg(chunk_store, clobber=clobber) path = normalize_storage_path(path) # API compatibility with h5py compressor, fill_value = _kwargs_compat(compressor, fill_value, kwargs) # ensure fill_value of correct type if fill_value is not None: fill_value = np.array(fill_value, dtype=dtype)[()] # ensure store is initialized if mode in ['r', 'r+']: if contains_group(store, path=path): err_contains_group(path) elif not contains_array(store, path=path): err_array_not_found(path) elif mode == 'w': init_array(store, shape=shape, chunks=chunks, dtype=dtype, compressor=compressor, fill_value=fill_value, order=order, filters=filters, overwrite=True, path=path, object_codec=object_codec, chunk_store=chunk_store) elif mode == 'a': if contains_group(store, path=path): err_contains_group(path) elif not contains_array(store, path=path): init_array(store, shape=shape, chunks=chunks, dtype=dtype, compressor=compressor, fill_value=fill_value, order=order, filters=filters, path=path, object_codec=object_codec, chunk_store=chunk_store) elif mode in ['w-', 'x']: if contains_group(store, path=path): err_contains_group(path) elif contains_array(store, path=path): err_contains_array(path) else: init_array(store, shape=shape, chunks=chunks, dtype=dtype, compressor=compressor, fill_value=fill_value, order=order, filters=filters, path=path, object_codec=object_codec, chunk_store=chunk_store) # determine read only status read_only = mode == 'r' # instantiate array z = Array(store, read_only=read_only, synchronizer=synchronizer, cache_metadata=cache_metadata, cache_attrs=cache_attrs, path=path, chunk_store=chunk_store) return z
https://github.com/zarr-developers/zarr/blob/fb8e6d5ea6bc26e451e5cf0eaaee36977556d5b5/zarr/creation.py#L352-L489
readonly array
python
def read(self, n): '''read some bytes''' if len(self.buf) == 0: self._recv() if len(self.buf) > 0: if n > len(self.buf): n = len(self.buf) ret = self.buf[:n] self.buf = self.buf[n:] if self._debug >= 2: for b in ret: self.debug("read 0x%x" % ord(b), 2) return ret return ''
https://github.com/JdeRobot/base/blob/303b18992785b2fe802212f2d758a60873007f1f/src/drivers/MAVLinkServer/MAVProxy/pymavlink/mavutil.py#L1740-L1753
readonly array
python
def save_array(self): """Save array""" title = _( "Save array") if self.array_filename is None: self.array_filename = getcwd_or_home() self.redirect_stdio.emit(False) filename, _selfilter = getsavefilename(self, title, self.array_filename, _("NumPy arrays")+" (*.npy)") self.redirect_stdio.emit(True) if filename: self.array_filename = filename data = self.delegate.get_value( self.currentIndex() ) try: import numpy as np np.save(self.array_filename, data) except Exception as error: QMessageBox.critical(self, title, _("<b>Unable to save array</b>" "<br><br>Error message:<br>%s" ) % str(error))
https://github.com/spyder-ide/spyder/blob/f76836ce1b924bcc4efd3f74f2960d26a4e528e0/spyder/plugins/variableexplorer/widgets/collectionseditor.py#L1157-L1177
readonly array
python
def load(self, f, skip): """Load the array data from a file-like object""" array = self.get() counter = 0 counter_limit = array.size convert = array.dtype.type while counter < counter_limit: line = f.readline() words = line.split() for word in words: if counter >= counter_limit: raise FileFormatError("Wrong array data: too many values.") if not skip: array.flat[counter] = convert(word) counter += 1
https://github.com/molmod/molmod/blob/a7b5b4364ed514ad4c465856c05b5eda1cb561e0/molmod/io/number_state.py#L139-L153
readonly array
python
def array( item_processor, # type: Processor alias=None, # type: Optional[Text] nested=None, # type: Optional[Text] omit_empty=False, # type: bool hooks=None # type: Optional[Hooks] ): # type: (...) -> RootProcessor """ Create an array processor that can be used to parse and serialize array data. XML arrays may be nested within an array element, or they may be embedded within their parent. A nested array would look like: .. sourcecode:: xml <root-element> <some-element>ABC</some-element> <nested-array> <array-item>0</array-item> <array-item>1</array-item> </nested-array> </root-element> The corresponding embedded array would look like: .. sourcecode:: xml <root-element> <some-element>ABC</some-element> <array-item>0</array-item> <array-item>1</array-item> </root-element> An array is considered required when its item processor is configured as being required. :param item_processor: A declxml processor object for the items of the array. :param alias: If specified, the name given to the array when read from XML. If not specified, then the name of the item processor is used instead. :param nested: If the array is a nested array, then this should be the name of the element under which all array items are located. If not specified, then the array is treated as an embedded array. Can also be specified using supported XPath syntax. :param omit_empty: If True, then nested arrays will be omitted when serializing if they are empty. Only valid when nested is specified. Note that an empty array may only be omitted if it is not itself contained within an array. That is, for an array of arrays, any empty arrays in the outer array will always be serialized to prevent information about the original array from being lost when serializing. :param hooks: A Hooks object. :return: A declxml processor object. """ processor = _Array(item_processor, alias, nested, omit_empty) return _processor_wrap_if_hooks(processor, hooks)
https://github.com/gatkin/declxml/blob/3a2324b43aee943e82a04587fbb68932c6f392ba/declxml.py#L374-L430
readonly array
python
def make_array_access(id_, lineno, arglist): """ Creates an array access. A(x1, x2, ..., xn). This is an RVALUE (Read the element) """ return symbols.ARRAYACCESS.make_node(id_, arglist, lineno)
https://github.com/boriel/zxbasic/blob/23b28db10e41117805bdb3c0f78543590853b132/zxbparser.py#L293-L297
readonly array
python
def write_array_empty(self, key, value): """ write a 0-len array """ # ugly hack for length 0 axes arr = np.empty((1,) * value.ndim) self._handle.create_array(self.group, key, arr) getattr(self.group, key)._v_attrs.value_type = str(value.dtype) getattr(self.group, key)._v_attrs.shape = value.shape
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/io/pytables.py#L2702-L2709
readonly array
python
def readBytes(self): """ Reads and returns a utf-8 encoded byte array. """ length, is_reference = self._readLength() if is_reference: return self.context.getString(length) if length == 0: return '' result = self.stream.read(length) self.context.addString(result) return result
https://github.com/jmgilman/Neolib/blob/228fafeaed0f3195676137732384a14820ae285c/neolib/pyamf/amf3.py#L847-L862
readonly array
python
def read(self, size_or_buffer, timeout = None): r"""Read data from the endpoint. The parameter size_or_buffer is either the number of bytes to read or an array object where the data will be put in and timeout is the time limit of the operation. The transfer type and endpoint address are automatically inferred. The method returns either an array object or the number of bytes actually read. For details, see the Device.read() method. """ return self.device.read(self, size_or_buffer, timeout)
https://github.com/pyusb/pyusb/blob/ffe6faf42c6ad273880b0b464b9bbf44c1d4b2e9/usb/core.py#L376-L389
readonly array
python
async def can_read(self): "See if there's data that can be read." if not (self._reader and self._writer): await self.connect() return self._parser.can_read()
https://github.com/NoneGG/aredis/blob/204caad740ac13e5760d46444a2ba7632982a046/aredis/connection.py#L398-L402
readonly array
python
def read(self, size=None): """Read `size` bytes or if size is not provided everything is read. :param size: the number of bytes read. """ if self._pos >= self.limit: return self.on_exhausted() if size is None or size == -1: # -1 is for consistence with file size = self.limit to_read = min(self.limit - self._pos, size) try: read = self._read(to_read) except (IOError, ValueError): return self.on_disconnect() if to_read and len(read) != to_read: return self.on_disconnect() self._pos += len(read) return read
https://github.com/core/uricore/blob/dc5ef4be7bd93da4c39e5c1cbd1ae4f3ad3f1f2a/uricore/wkz_wsgi.py#L100-L117
readonly array
python
def _read_bin(self, stream, byte_order): ''' Read data from a binary stream. Raise StopIteration if the property could not be read. ''' try: return _read_array(stream, self.dtype(byte_order), 1)[0] except IndexError: raise StopIteration
https://github.com/dranjan/python-plyfile/blob/9f8e8708d3a071229cf292caae7d13264e11c88b/plyfile.py#L841-L850
readonly array
python
def readonce(self, size = None): """ Read from current buffer. If current buffer is empty, returns an empty string. You can use `prepareRead` to read the next chunk of data. This is not a coroutine method. """ if self.eof: raise EOFError if self.errored: raise IOError('Stream is broken before EOF') if size is not None and size < len(self.data) - self.pos: ret = self.data[self.pos: self.pos + size] self.pos += size return ret else: ret = self.data[self.pos:] self.pos = len(self.data) if self.dataeof: self.eof = True return ret
https://github.com/hubo1016/vlcp/blob/239055229ec93a99cc7e15208075724ccf543bd1/vlcp/event/stream.py#L98-L118
readonly array
python
def setReadOnly(self, state): """ Sets whether or not this edit is read only. :param state | <bool> """ self._readOnly = state for editor in self.editors(): editor.setReadOnly(state)
https://github.com/bitesofcode/projexui/blob/f18a73bec84df90b034ca69b9deea118dbedfc4d/projexui/widgets/xserialedit.py#L340-L349
readonly array
python
def read(self, size=None): """Reads a byte string from the file-like object at the current offset. The function will read a byte string of the specified size or all of the remaining data if no size was specified. Args: size (Optional[int]): number of bytes to read, where None is all remaining data. Returns: bytes: data read. Raises: IOError: if the read failed. OSError: if the read failed. """ if not self._is_open: raise IOError('Not opened.') return self._vshadow_store.read(size)
https://github.com/log2timeline/dfvfs/blob/2b3ccd115f9901d89f383397d4a1376a873c83c4/dfvfs/file_io/vshadow_file_io.py#L78-L98
readonly array
python
def read(self, addr, size): '''Read access. :param addr: i2c slave address :type addr: char :param size: size of transfer :type size: int :returns: data byte array :rtype: array.array('B') ''' self.set_addr(addr | 0x01) self.set_size(size) self.start() while not self.is_ready: pass return self.get_data(size)
https://github.com/SiLab-Bonn/basil/blob/99052482d9334dd1f5598eb2d2fb4d5399a32291/basil/HL/i2c.py#L110-L127
readonly array
python
def read_all(self, n, check_rekey=False): """ Read as close to N bytes as possible, blocking as long as necessary. @param n: number of bytes to read @type n: int @return: the data read @rtype: str @raise EOFError: if the socket was closed before all the bytes could be read """ out = '' # handle over-reading from reading the banner line if len(self.__remainder) > 0: out = self.__remainder[:n] self.__remainder = self.__remainder[n:] n -= len(out) if PY22: return self._py22_read_all(n, out) while n > 0: got_timeout = False try: x = self.__socket.recv(n) if len(x) == 0: raise EOFError() out += x n -= len(x) except socket.timeout: got_timeout = True except socket.error, e: # on Linux, sometimes instead of socket.timeout, we get # EAGAIN. this is a bug in recent (> 2.6.9) kernels but # we need to work around it. if (type(e.args) is tuple) and (len(e.args) > 0) and (e.args[0] == errno.EAGAIN): got_timeout = True elif (type(e.args) is tuple) and (len(e.args) > 0) and (e.args[0] == errno.EINTR): # syscall interrupted; try again pass elif self.__closed: raise EOFError() else: raise if got_timeout: if self.__closed: raise EOFError() if check_rekey and (len(out) == 0) and self.__need_rekey: raise NeedRekeyException() self._check_keepalive() return out
https://github.com/bitprophet/ssh/blob/e8bdad4c82a50158a749233dca58c29e47c60b76/ssh/packet.py#L191-L239
readonly array
python
def try_read(self, address, size): """Try to read memory content at specified address. If any location was not written before, it returns a tuple (False, None). Otherwise, it returns (True, memory content). """ value = 0x0 for i in range(0, size): addr = address + i if addr in self._memory: value |= self._read_byte(addr) << (i * 8) else: return False, None return True, value
https://github.com/programa-stic/barf-project/blob/18ed9e5eace55f7bf6015ec57f037c364099021c/barf/core/reil/emulator/memory.py#L149-L166
readonly array
python
def _do_read(self, collection, version, symbol, index_range=None): """ index_range is a 2-tuple of integers - a [from, to) range of segments to be read. Either from or to can be None, indicating no bound. """ from_index = index_range[0] if index_range else None to_index = version['up_to'] if index_range and index_range[1] and index_range[1] < version['up_to']: to_index = index_range[1] segment_count = version.get('segment_count') if from_index is None else None spec = _spec_fw_pointers_aware(symbol, version, from_index, to_index) data = bytearray() i = -1 for i, x in enumerate(sorted(collection.find(spec), key=itemgetter('segment'))): data.extend(decompress(x['data']) if x['compressed'] else x['data']) # Check that the correct number of segments has been returned if segment_count is not None and i + 1 != segment_count: raise OperationFailure("Incorrect number of segments returned for {}:{}. Expected: {}, but got {}. {}".format( symbol, version['version'], segment_count, i + 1, collection.database.name + '.' + collection.name)) dtype = self._dtype(version['dtype'], version.get('dtype_metadata', {})) rtn = np.frombuffer(data, dtype=dtype).reshape(version.get('shape', (-1))) return rtn
https://github.com/manahl/arctic/blob/57e110b6e182dbab00e7e214dc26f7d9ec47c120/arctic/store/_ndarray_store.py#L353-L378
readonly array
python
def _read_rec_with_var( self, colnums, rows, dtype, offsets, isvar, vstorage): """ Read columns from a table into a rec array, including variable length columns. This is special because, for efficiency, it involves reading from the main table as normal but skipping the columns in the array that are variable. Then reading the variable length columns, with accounting for strides appropriately. row and column numbers should be checked before calling this function """ colnumsp = colnums+1 if rows is None: nrows = self._info['nrows'] else: nrows = rows.size array = numpy.zeros(nrows, dtype=dtype) # read from the main table first wnotvar, = numpy.where(isvar == False) # noqa if wnotvar.size > 0: # this will be contiguous (not true for slices) thesecol = colnumsp[wnotvar] theseoff = offsets[wnotvar] self._FITS.read_columns_as_rec_byoffset(self._ext+1, thesecol, theseoff, array, rows) for i in xrange(thesecol.size): name = array.dtype.names[wnotvar[i]] colnum = thesecol[i]-1 self._rescale_and_convert_field_inplace( array, name, self._info['colinfo'][colnum]['tscale'], self._info['colinfo'][colnum]['tzero']) array = self._maybe_decode_fits_ascii_strings_to_unicode_py3(array) # now read the variable length arrays we may be able to speed this up # by storing directly instead of reading first into a list wvar, = numpy.where(isvar == True) # noqa if wvar.size > 0: # this will be contiguous (not true for slices) thesecol = colnumsp[wvar] for i in xrange(thesecol.size): colnump = thesecol[i] name = array.dtype.names[wvar[i]] dlist = self._FITS.read_var_column_as_list( self._ext+1, colnump, rows) if (isinstance(dlist[0], str) or (IS_PY3 and isinstance(dlist[0], bytes))): is_string = True else: is_string = False if array[name].dtype.descr[0][1][1] == 'O': # storing in object array # get references to each, no copy made for irow, item in enumerate(dlist): if IS_PY3 and isinstance(item, bytes): item = item.decode('ascii') array[name][irow] = item else: for irow, item in enumerate(dlist): if IS_PY3 and isinstance(item, bytes): item = item.decode('ascii') if is_string: array[name][irow] = item else: ncopy = len(item) if IS_PY3: ts = array[name].dtype.descr[0][1][1] if ts != 'S' and ts != 'U': array[name][irow][0:ncopy] = item[:] else: array[name][irow] = item else: array[name][irow][0:ncopy] = item[:] return array
https://github.com/esheldon/fitsio/blob/a6f07919f457a282fe240adad9d2c30906b71a15/fitsio/hdu/table.py#L1102-L1188
readonly array
python
def read(self, n=-1): """Read and return up to n bytes. If the argument is omitted, None, or negative, data is read and returned until EOF is reached.. """ if n is None or n < 0: buf = self._readbuffer[self._offset:] self._readbuffer = b'' self._offset = 0 while not self._eof: buf += self._read1(self.MAX_N) return buf end = n + self._offset if end < len(self._readbuffer): buf = self._readbuffer[self._offset:end] self._offset = end return buf n = end - len(self._readbuffer) buf = self._readbuffer[self._offset:] self._readbuffer = b'' self._offset = 0 while n > 0 and not self._eof: data = self._read1(n) if n < len(data): self._readbuffer = data self._offset = n buf += data[:n] break buf += data n -= len(data) return buf
https://github.com/mikusjelly/apkutils/blob/2db1ed0cdb610dfc55bfd77266e9a91e4764bba4/apkutils/apkfile.py#L824-L855
readonly array
python
def read(self, size=None): """Reads a byte string from the file-like object at the current offset. The function will read a byte string of the specified size or all of the remaining data if no size was specified. Args: size (Optional[int]): number of bytes to read, where None is all remaining data. Returns: bytes: data read. Raises: IOError: if the read failed. OSError: if the read failed. """ if not self._is_open: raise IOError('Not opened.') return self._fsapfs_file_entry.read(size=size)
https://github.com/log2timeline/dfvfs/blob/2b3ccd115f9901d89f383397d4a1376a873c83c4/dfvfs/file_io/apfs_file_io.py#L74-L94
readonly array
python
def read(self, size=None): """Reads a byte string from the file-like object at the current offset. The function will read a byte string of the specified size or all of the remaining data if no size was specified. Args: size (Optional[int]): number of bytes to read, where None is all remaining data. Returns: bytes: data read. Raises: IOError: if the read failed. OSError: if the read failed. """ if not self._is_open: raise IOError('Not opened.') if self._fsntfs_data_stream: return self._fsntfs_data_stream.read(size=size) return self._fsntfs_file_entry.read(size=size)
https://github.com/log2timeline/dfvfs/blob/2b3ccd115f9901d89f383397d4a1376a873c83c4/dfvfs/file_io/ntfs_file_io.py#L82-L104
readonly array
python
def read(self, size=None): """Reads a byte string from the file-like object at the current offset. The function will read a byte string of the specified size or all of the remaining data if no size was specified. Args: size (Optional[int]): number of bytes to read, where None is all remaining data. Returns: bytes: data read. Raises: IOError: if the read failed. OSError: if the read failed. """ if not self._is_open: raise IOError('Not opened.') if size is None: size = self._size - self._file_object.tell() return self._file_object.read(size)
https://github.com/log2timeline/dfvfs/blob/2b3ccd115f9901d89f383397d4a1376a873c83c4/dfvfs/file_io/os_file_io.py#L106-L129
readonly array
python
def read(self, to_read, timeout_ms): """Reads data from this file. in to_read of type int Number of bytes to read. in timeout_ms of type int Timeout (in ms) to wait for the operation to complete. Pass 0 for an infinite timeout. return data of type str Array of data read. raises :class:`OleErrorNotimpl` The method is not implemented yet. """ if not isinstance(to_read, baseinteger): raise TypeError("to_read can only be an instance of type baseinteger") if not isinstance(timeout_ms, baseinteger): raise TypeError("timeout_ms can only be an instance of type baseinteger") data = self._call("read", in_p=[to_read, timeout_ms]) return data
https://github.com/sethmlarson/virtualbox-python/blob/706c8e3f6e3aee17eb06458e73cbb4bc2d37878b/virtualbox/library.py#L20871-L20894
readonly array
python
def readinto(self, byte_array): """ Read data into a byte array, upto the size of the byte array. :param byte_array: A byte array/memory view to pour bytes into. :type byte_array: ``bytearray`` or ``memoryview`` """ max_size = len(byte_array) data = self.read(max_size) bytes_read = len(data) byte_array[:bytes_read] = data return bytes_read
https://github.com/splunk/splunk-sdk-python/blob/a245a4eeb93b3621730418008e31715912bcdcd8/splunklib/binding.py#L1322-L1333
readonly array
python
def do_read(self, args): """read <addr> <objid> <prop> [ <indx> ]""" args = args.split() if _debug: ReadPropertyConsoleCmd._debug("do_read %r", args) try: addr, obj_id, prop_id = args[:3] obj_id = ObjectIdentifier(obj_id).value datatype = get_datatype(obj_id[0], prop_id) if not datatype: raise ValueError("invalid property for object type") # build a request request = ReadPropertyRequest( objectIdentifier=obj_id, propertyIdentifier=prop_id, ) request.pduDestination = Address(addr) if len(args) == 4: request.propertyArrayIndex = int(args[3]) if _debug: ReadPropertyConsoleCmd._debug(" - request: %r", request) # make an IOCB iocb = IOCB(request) if _debug: ReadPropertyConsoleCmd._debug(" - iocb: %r", iocb) # give it to the application deferred(this_application.request_io, iocb) # wait for it to complete iocb.wait() # do something for success if iocb.ioResponse: apdu = iocb.ioResponse # should be an ack if not isinstance(apdu, ReadPropertyACK): if _debug: ReadPropertyConsoleCmd._debug(" - not an ack") return # find the datatype datatype = get_datatype(apdu.objectIdentifier[0], apdu.propertyIdentifier) if _debug: ReadPropertyConsoleCmd._debug(" - datatype: %r", datatype) if not datatype: raise TypeError("unknown datatype") # special case for array parts, others are managed by cast_out if issubclass(datatype, Array) and (apdu.propertyArrayIndex is not None): if apdu.propertyArrayIndex == 0: value = apdu.propertyValue.cast_out(Unsigned) else: value = apdu.propertyValue.cast_out(datatype.subtype) else: value = apdu.propertyValue.cast_out(datatype) if _debug: ReadPropertyConsoleCmd._debug(" - value: %r", value) sys.stdout.write(str(value) + '\n') if hasattr(value, 'debug_contents'): value.debug_contents(file=sys.stdout) sys.stdout.flush() # do something for error/reject/abort if iocb.ioError: sys.stdout.write(str(iocb.ioError) + '\n') except Exception, error: ReadPropertyConsoleCmd._exception("exception: %r", error)
https://github.com/JoelBender/bacpypes/blob/4111b8604a16fa2b7f80d8104a43b9f3e28dfc78/samples/ReadProperty25.py#L41-L110
readonly array
python
def write(self, offset, value): """ .. _write: Writes the memory word at ``offset`` to ``value``. Might raise ReadOnlyError_, if the device is read-only. Might raise AddressError_, if the offset exceeds the size of the device. """ if(not self.mode & 0b10): raise ReadOnlyError("Device is Read-Only") if(offset >= self.size): raise AddressError("Offset({}) not in address space({})".format(offset, self.size)) self.repr_[offset].setvalue(value)
https://github.com/daknuett/py_register_machine2/blob/599c53cd7576297d0d7a53344ed5d9aa98acc751/core/parts.py#L243-L256
readonly array
python
def single_read(self, register): ''' Reads data from desired register only once. ''' comm_reg = (0b00010 << 3) + register if register == self.AD7730_STATUS_REG: bytes_num = 1 elif register == self.AD7730_DATA_REG: bytes_num = 3 elif register == self.AD7730_MODE_REG: bytes_num = 2 elif register == self.AD7730_FILTER_REG: bytes_num = 3 elif register == self.AD7730_DAC_REG: bytes_num = 1 elif register == self.AD7730_OFFSET_REG: bytes_num = 3 elif register == self.AD7730_GAIN_REG: bytes_num = 3 elif register == self.AD7730_TEST_REG: bytes_num = 3 command = [comm_reg] + ([0x00] * bytes_num) spi.SPI_write(self.CS, command) data = spi.SPI_read(bytes_num + 1) return data[1:]
https://github.com/MLAB-project/pymlab/blob/d18d858ae83b203defcf2aead0dbd11b3c444658/examples/I2CSPI_BRIDGEADC01.py#L160-L187
readonly array
python
def _retryable_read(self, func, read_pref, session, address=None, retryable=True, exhaust=False): """Execute an operation with at most one consecutive retries Returns func()'s return value on success. On error retries the same command once. Re-raises any exception thrown by func(). """ retryable = (retryable and self.retry_reads and not (session and session._in_transaction)) last_error = None retrying = False while True: try: server = self._select_server( read_pref, session, address=address) if not server.description.retryable_reads_supported: retryable = False with self._slaveok_for_server(read_pref, server, session, exhaust=exhaust) as (sock_info, slave_ok): if retrying and not retryable: # A retry is not possible because this server does # not support retryable reads, raise the last error. raise last_error return func(session, server, sock_info, slave_ok) except ServerSelectionTimeoutError: if retrying: # The application may think the write was never attempted # if we raise ServerSelectionTimeoutError on the retry # attempt. Raise the original exception instead. raise last_error # A ServerSelectionTimeoutError error indicates that there may # be a persistent outage. Attempting to retry in this case will # most likely be a waste of time. raise except ConnectionFailure as exc: if not retryable or retrying: raise retrying = True last_error = exc except OperationFailure as exc: if not retryable or retrying: raise if exc.code not in helpers._RETRYABLE_ERROR_CODES: raise retrying = True last_error = exc
https://github.com/mongodb/mongo-python-driver/blob/c29c21449e3aae74154207058cf85fd94018d4cd/pymongo/mongo_client.py#L1408-L1458
readonly array
python
def _read(self, limit = None): """Checks the file for new data and refills the buffer if it finds any.""" # The code that used to be here was self._fh.read(limit) # However, this broke on OSX. os.read, however, works fine, but doesn't # take the None argument or have any way to specify "read to the end". # This emulates that behaviour. while True: # Check that we haven't closed this file if not self._fh: return False dataread = os.read(self._fh.fileno(), limit or 65535) if len(dataread) > 0: self._buf += dataread if limit is not None: return True else: return False
https://github.com/derpston/python-multitail2/blob/4f05311da3b18f7a8cfe2877e68e35e88c07298d/src/multitail2.py#L49-L65
readonly array
python
def _interrupt_read(self): """ Read data from device. """ data = self._device.read(ENDPOINT, REQ_INT_LEN, timeout=TIMEOUT) LOGGER.debug('Read data: %r', data) return data
https://github.com/padelt/temper-python/blob/cbdbace7e6755b1d91a2603ab63c9cb778078f79/temperusb/temper.py#L398-L404
readonly array
python
async def _read(self, num_bytes) -> bytes: """ Reads a given number of bytes :param num_bytes: How many bytes to read :returns: incoming bytes """ while True: if self.in_waiting < num_bytes: await asyncio.sleep(self._asyncio_sleep_time) else: # Try to read bytes inbytes = self._serial_instance.read(num_bytes) # Just for safety, should never happen if not inbytes: await asyncio.sleep(self._asyncio_sleep_time) else: return inbytes
https://github.com/xvzf/asyncserial-py/blob/36b5867f44ad78ac3d96debb001e8145afef2366/asyncserial/async_serial_wrapper.py#L94-L114
readonly array
python
def without(self, *values): """ Return a version of the array that does not contain the specified value(s). """ if self._clean.isDict(): newlist = {} for i, k in enumerate(self.obj): # if k not in values: # use indexof to check identity if _(values).indexOf(k) is -1: newlist.set(k, self.obj[k]) else: newlist = [] for i, v in enumerate(self.obj): # if v not in values: # use indexof to check identity if _(values).indexOf(v) is -1: newlist.append(v) return self._wrap(newlist)
https://github.com/serkanyersen/underscore.py/blob/07c25c3f0f789536e4ad47aa315faccc0da9602f/src/underscore.py#L561-L579
readonly array
python
def memory_read16(self, addr, num_halfwords, zone=None): """Reads memory from the target system in units of 16-bits. Args: self (JLink): the ``JLink`` instance addr (int): start address to read from num_halfwords (int): number of half words to read zone (str): memory zone to read from Returns: List of halfwords read from the target system. Raises: JLinkException: if memory could not be read """ return self.memory_read(addr, num_halfwords, zone=zone, nbits=16)
https://github.com/square/pylink/blob/81dda0a191d923a8b2627c52cb778aba24d279d7/pylink/jlink.py#L2753-L2768
readonly array
python
def do_read(self, args): """read <addr> ( <objid> ( <prop> [ <indx> ] )... )...""" args = args.split() if _debug: ReadPropertyMultipleConsoleCmd._debug("do_read %r", args) try: i = 0 addr = args[i] i += 1 read_access_spec_list = [] while i < len(args): obj_id = ObjectIdentifier(args[i]).value i += 1 prop_reference_list = [] while i < len(args): prop_id = args[i] if prop_id not in PropertyIdentifier.enumerations: break i += 1 if prop_id in ('all', 'required', 'optional'): pass else: datatype = get_datatype(obj_id[0], prop_id) if not datatype: raise ValueError("invalid property for object type") # build a property reference prop_reference = PropertyReference( propertyIdentifier=prop_id, ) # check for an array index if (i < len(args)) and args[i].isdigit(): prop_reference.propertyArrayIndex = int(args[i]) i += 1 # add it to the list prop_reference_list.append(prop_reference) # check for at least one property if not prop_reference_list: raise ValueError("provide at least one property") # build a read access specification read_access_spec = ReadAccessSpecification( objectIdentifier=obj_id, listOfPropertyReferences=prop_reference_list, ) # add it to the list read_access_spec_list.append(read_access_spec) # check for at least one if not read_access_spec_list: raise RuntimeError("at least one read access specification required") # build the request request = ReadPropertyMultipleRequest( listOfReadAccessSpecs=read_access_spec_list, ) request.pduDestination = Address(addr) if _debug: ReadPropertyMultipleConsoleCmd._debug(" - request: %r", request) # make an IOCB iocb = IOCB(request) if _debug: ReadPropertyMultipleConsoleCmd._debug(" - iocb: %r", iocb) # give it to the application deferred(this_application.request_io, iocb) # wait for it to complete iocb.wait() # do something for success if iocb.ioResponse: apdu = iocb.ioResponse # should be an ack if not isinstance(apdu, ReadPropertyMultipleACK): if _debug: ReadPropertyMultipleConsoleCmd._debug(" - not an ack") return # loop through the results for result in apdu.listOfReadAccessResults: # here is the object identifier objectIdentifier = result.objectIdentifier if _debug: ReadPropertyMultipleConsoleCmd._debug(" - objectIdentifier: %r", objectIdentifier) # now come the property values per object for element in result.listOfResults: # get the property and array index propertyIdentifier = element.propertyIdentifier if _debug: ReadPropertyMultipleConsoleCmd._debug(" - propertyIdentifier: %r", propertyIdentifier) propertyArrayIndex = element.propertyArrayIndex if _debug: ReadPropertyMultipleConsoleCmd._debug(" - propertyArrayIndex: %r", propertyArrayIndex) # here is the read result readResult = element.readResult sys.stdout.write(propertyIdentifier) if propertyArrayIndex is not None: sys.stdout.write("[" + str(propertyArrayIndex) + "]") # check for an error if readResult.propertyAccessError is not None: sys.stdout.write(" ! " + str(readResult.propertyAccessError) + '\n') else: # here is the value propertyValue = readResult.propertyValue # find the datatype datatype = get_datatype(objectIdentifier[0], propertyIdentifier) if _debug: ReadPropertyMultipleConsoleCmd._debug(" - datatype: %r", datatype) if not datatype: raise TypeError("unknown datatype") # special case for array parts, others are managed by cast_out if issubclass(datatype, Array) and (propertyArrayIndex is not None): if propertyArrayIndex == 0: value = propertyValue.cast_out(Unsigned) else: value = propertyValue.cast_out(datatype.subtype) else: value = propertyValue.cast_out(datatype) if _debug: ReadPropertyMultipleConsoleCmd._debug(" - value: %r", value) sys.stdout.write(" = " + str(value) + '\n') sys.stdout.flush() # do something for error/reject/abort if iocb.ioError: sys.stdout.write(str(iocb.ioError) + '\n') except Exception, error: ReadPropertyMultipleConsoleCmd._exception("exception: %r", error)
https://github.com/JoelBender/bacpypes/blob/4111b8604a16fa2b7f80d8104a43b9f3e28dfc78/samples/ReadPropertyMultiple25.py#L43-L181
readonly array
python
def read(self, limit=-1): """Read content. See file.read""" remaining = self.len - self.parent_fd.tell() + self.offset if limit > remaining or limit == -1: limit = remaining return self.parent_fd.read(limit)
https://github.com/MediaFire/mediafire-python-open-sdk/blob/8f1f23db1b16f16e026f5c6777aec32d00baa05f/mediafire/subsetio.py#L48-L55
readonly array
python
def _xxhash(self): """ An xxhash.b64 hash of the array. Returns ------------- xx: int, xxhash.xxh64 hash of array. """ # repeat the bookkeeping to get a contiguous array inside # the function to avoid additional function calls # these functions are called millions of times so everything helps if self._modified_x or not hasattr(self, '_hashed_xx'): if self.flags['C_CONTIGUOUS']: hasher = xxhash.xxh64(self) self._hashed_xx = hasher.intdigest() else: # the case where we have sliced our nice # contiguous array into a non- contiguous block # for example (note slice *after* track operation): # t = util.tracked_array(np.random.random(10))[::-1] contiguous = np.ascontiguousarray(self) hasher = xxhash.xxh64(contiguous) self._hashed_xx = hasher.intdigest() self._modified_x = False return self._hashed_xx
https://github.com/mikedh/trimesh/blob/25e059bf6d4caa74f62ffd58ce4f61a90ee4e518/trimesh/caching.py#L195-L219
readonly array
python
def handle_read(self, *args, **kwargs): """ See comment in AsyncNotifier. """ self.read_events() self.process_events() if self.handle_read_callback is not None: self.handle_read_callback(self)
https://github.com/seb-m/pyinotify/blob/0f3f8950d12e4a6534320153eed1a90a778da4ae/python3/pyinotify.py#L1557-L1565
readonly array
python
def read(self, size=-1): """Read data from the ring buffer into a new buffer. This advances the read index after reading; calling :meth:`advance_read_index` is *not* necessary. :param size: The number of elements to be read. If not specified, all available elements are read. :type size: int, optional :returns: A new buffer containing the read data. Its size may be less than the requested *size*. :rtype: buffer """ if size < 0: size = self.read_available data = self._ffi.new('unsigned char[]', size * self.elementsize) size = self.readinto(data) return self._ffi.buffer(data, size * self.elementsize)
https://github.com/spatialaudio/python-pa-ringbuffer/blob/b4a5eaa9b53a437c05d196ed59e1791db159e4b0/src/pa_ringbuffer.py#L156-L175
readonly array
python
def _read_one_byte(self, fd): """Read a single byte, or raise OSError on failure.""" c = os.read(fd, 1) if not c: raise OSError return c
https://github.com/rfk/playitagainsam/blob/897cc8e8ca920a4afb8597b4a345361065a3f108/playitagainsam/recorder.py#L141-L146
readonly array
python
def clean_undefined(obj): """ Convert Undefined array entries to None (null) """ if isinstance(obj, list): return [ None if isinstance(item, Undefined) else item for item in obj ] if isinstance(obj, dict): for key in obj: obj[key] = clean_undefined(obj[key]) return obj
https://github.com/wq/html-json-forms/blob/4dfbfabeee924ba832a7a387ab3b02b6d51d9701/html_json_forms/utils.py#L270-L282
readonly array
python
def can_read(self): """Check if the field is readable """ sm = getSecurityManager() if not sm.checkPermission(permissions.View, self.context): return False return True
https://github.com/senaite/senaite.jsonapi/blob/871959f4b1c9edbb477e9456325527ca78e13ec6/src/senaite/jsonapi/datamanagers.py#L250-L256
readonly array
python
def save_intermediate_array(self, array, name): """Save intermediate array object as FITS.""" if self.intermediate_results: fits.writeto(name, array, overwrite=True)
https://github.com/guaix-ucm/numina/blob/6c829495df8937f77c2de9383c1038ffb3e713e3/numina/core/recipes.py#L166-L169
readonly array
python
def read_cell_array(fd, endian, header): """Read a cell array. Returns an array with rows of the cell array. """ array = [list() for i in range(header['dims'][0])] for row in range(header['dims'][0]): for col in range(header['dims'][1]): # read the matrix header and array vheader, next_pos, fd_var = read_var_header(fd, endian) varray = read_var_array(fd_var, endian, vheader) array[row].append(varray) # move on to next field fd.seek(next_pos) # pack and return the array if header['dims'][0] == 1: return squeeze(array[0]) return squeeze(array)
https://github.com/nephics/mat4py/blob/6c1a2ad903937437cc5f24f3c3f5aa2c5a77a1c1/mat4py/loadmat.py#L286-L302
readonly array
python
def _read(self, n, initial=False, _errnos=(errno.EAGAIN, errno.EINTR)): """Read exactly n bytes from the socket""" recv = self._quick_recv rbuf = self._read_buffer try: while len(rbuf) < n: try: s = recv(n - len(rbuf)) except socket.error as exc: if not initial and exc.errno in _errnos: continue raise if not s: raise IOError('Socket closed') rbuf += s except: self._read_buffer = rbuf raise result, self._read_buffer = rbuf[:n], rbuf[n:] return result
https://github.com/nerdvegas/rez/blob/1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7/src/rez/vendor/amqp/transport.py#L270-L290
readonly array
python
def read(self, n): """Read n bytes. Returns exactly n bytes of data unless the underlying raw IO stream reaches EOF. """ buf = self._read_buf pos = self._read_pos end = pos + n if end <= len(buf): # Fast path: the data to read is fully buffered. self._read_pos += n return self._update_pos(buf[pos:end]) # Slow path: read from the stream until enough bytes are read, # or until an EOF occurs or until read() would block. wanted = max(self.buffer_size, n) while len(buf) < end: chunk = self.raw.read(wanted) if not chunk: break buf += chunk self._read_buf = buf[end:] # Save the extra data in the buffer. self._read_pos = 0 return self._update_pos(buf[pos:end])
https://github.com/planetarypy/pvl/blob/ed92b284c4208439b033d28c9c176534c0faac0e/pvl/stream.py#L45-L70
readonly array
python
def read_var_array(fd, endian, header): """Read variable array (of any supported type).""" mc = inv_mclasses[header['mclass']] if mc in numeric_class_etypes: return read_numeric_array( fd, endian, header, set(compressed_numeric).union([numeric_class_etypes[mc]]) ) elif mc == 'mxSPARSE_CLASS': raise ParseError('Sparse matrices not supported') elif mc == 'mxCHAR_CLASS': return read_char_array(fd, endian, header) elif mc == 'mxCELL_CLASS': return read_cell_array(fd, endian, header) elif mc == 'mxSTRUCT_CLASS': return read_struct_array(fd, endian, header) elif mc == 'mxOBJECT_CLASS': raise ParseError('Object classes not supported') elif mc == 'mxFUNCTION_CLASS': raise ParseError('Function classes not supported') elif mc == 'mxOPAQUE_CLASS': raise ParseError('Anonymous function classes not supported')
https://github.com/nephics/mat4py/blob/6c1a2ad903937437cc5f24f3c3f5aa2c5a77a1c1/mat4py/loadmat.py#L354-L376
readonly array
python
def read(self, n): """Read n bytes.""" self.bitcount = self.bits = 0 return self.input.read(n)
https://github.com/nerdvegas/rez/blob/1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7/src/rez/vendor/amqp/serialization.py#L71-L74
readonly array
python
def _handle_uninitialized_read(self, addr, inspect=True, events=True): """ The default uninitialized read handler. Returns symbolic bytes. """ if self._uninitialized_read_handler is None: v = self.state.solver.Unconstrained("%s_%s" % (self.id, addr), self.width*self.state.arch.byte_width, key=self.variable_key_prefix + (addr,), inspect=inspect, events=events) return v.reversed if self.endness == "Iend_LE" else v else: return self._uninitialized_read_handler(self, addr, inspect=inspect, events=events)
https://github.com/angr/angr/blob/4e2f97d56af5419ee73bdb30482c8dd8ff5f3e40/angr/state_plugins/fast_memory.py#L42-L50
readonly array
python
def read(self, count): """ Reads up to C{count} bytes from the file. :rtype: list :return: the list of symbolic bytes read """ if self.pos > self.max_size: return [] else: size = min(count, self.max_size - self.pos) ret = [self.array[i] for i in range(self.pos, self.pos + size)] self.pos += size return ret
https://github.com/trailofbits/manticore/blob/54c5a15b1119c523ae54c09972413e8b97f11629/manticore/platforms/linux.py#L290-L302
readonly array
python
def read(self, size=None): """Reads a byte string from the file-like object at the current offset. The function will read a byte string of the specified size or all of the remaining data if no size was specified. Args: size (Optional[int]): number of bytes to read, where None is all remaining data. Returns: bytes: data read. Raises: IOError: if the read failed. OSError: if the read failed. """ if not self._is_open: raise IOError('Not opened.') if self._range_offset < 0 or self._range_size < 0: raise IOError('Invalid data range.') if self._current_offset < 0: raise IOError( 'Invalid current offset: {0:d} value less than zero.'.format( self._current_offset)) if self._current_offset >= self._range_size: return b'' if size is None: size = self._range_size if self._current_offset + size > self._range_size: size = self._range_size - self._current_offset self._file_object.seek( self._range_offset + self._current_offset, os.SEEK_SET) data = self._file_object.read(size) self._current_offset += len(data) return data
https://github.com/log2timeline/dfvfs/blob/2b3ccd115f9901d89f383397d4a1376a873c83c4/dfvfs/file_io/data_range_io.py#L127-L170
readonly array
python
def _read_mz(self, mz_offset, mz_len, mz_enc_len): '''reads a mz array from the currently open ibd file''' self.ibd.seek(mz_offset) data = self.ibd.read(mz_enc_len) self.ibd.seek(0, 2) data = self.mz_compression.decompress(data) return tuple(np.fromstring(data, dtype=self.mz_dtype))
https://github.com/alexandrovteam/pyimzML/blob/baae0bea7279f9439113d6b2f61be528c0462b3f/pyimzml/ImzMLWriter.py#L241-L247
readonly array
python
def read(self, size=None): """Reads a byte string from the file-like object at the current offset. The function will read a byte string of the specified size or all of the remaining data if no size was specified. Args: size (Optional[int]): number of bytes to read, where None is all remaining data. Returns: bytes: data read. Raises: IOError: if the read failed. OSError: if the read failed. """ if not self._is_open: raise IOError('Not opened.') if self._current_offset < 0: raise IOError( 'Invalid current offset: {0:d} value less than zero.'.format( self._current_offset)) if self._decoded_stream_size is None: self._decoded_stream_size = self._GetDecodedStreamSize() if self._decoded_stream_size < 0: raise IOError('Invalid decoded stream size.') if self._current_offset >= self._decoded_stream_size: return b'' if self._realign_offset: self._AlignDecodedDataOffset(self._current_offset) self._realign_offset = False if size is None: size = self._decoded_stream_size if self._current_offset + size > self._decoded_stream_size: size = self._decoded_stream_size - self._current_offset decoded_data = b'' if size == 0: return decoded_data while size > self._decoded_data_size: decoded_data = b''.join([ decoded_data, self._decoded_data[self._decoded_data_offset:]]) remaining_decoded_data_size = ( self._decoded_data_size - self._decoded_data_offset) self._current_offset += remaining_decoded_data_size size -= remaining_decoded_data_size if self._current_offset >= self._decoded_stream_size: break read_count = self._ReadEncodedData(self._ENCODED_DATA_BUFFER_SIZE) self._decoded_data_offset = 0 if read_count == 0: break if size > 0: slice_start_offset = self._decoded_data_offset slice_end_offset = slice_start_offset + size decoded_data = b''.join([ decoded_data, self._decoded_data[slice_start_offset:slice_end_offset]]) self._decoded_data_offset += size self._current_offset += size return decoded_data
https://github.com/log2timeline/dfvfs/blob/2b3ccd115f9901d89f383397d4a1376a873c83c4/dfvfs/file_io/encoded_stream_io.py#L208-L286
readonly array
python
def read(self, size=None): """Reads a byte string from the file-like object at the current offset. The function will read a byte string of the specified size or all of the remaining data if no size was specified. Args: size (Optional[int]): number of bytes to read, where None is all remaining data. Returns: bytes: data read. Raises: IOError: if the read failed. OSError: if the read failed. """ if not self._is_open: raise IOError('Not opened.') if self._current_offset < 0: raise IOError( 'Invalid current offset: {0:d} value less than zero.'.format( self._current_offset)) if self._uncompressed_stream_size is None: self._uncompressed_stream_size = self._GetUncompressedStreamSize() if self._uncompressed_stream_size < 0: raise IOError('Invalid uncompressed stream size.') if self._current_offset >= self._uncompressed_stream_size: return b'' if self._realign_offset: self._AlignUncompressedDataOffset(self._current_offset) self._realign_offset = False if size is None: size = self._uncompressed_stream_size if self._current_offset + size > self._uncompressed_stream_size: size = self._uncompressed_stream_size - self._current_offset uncompressed_data = b'' if size == 0: return uncompressed_data while size > self._uncompressed_data_size: uncompressed_data = b''.join([ uncompressed_data, self._uncompressed_data[self._uncompressed_data_offset:]]) remaining_uncompressed_data_size = ( self._uncompressed_data_size - self._uncompressed_data_offset) self._current_offset += remaining_uncompressed_data_size size -= remaining_uncompressed_data_size if self._current_offset >= self._uncompressed_stream_size: break read_count = self._ReadCompressedData(self._COMPRESSED_DATA_BUFFER_SIZE) self._uncompressed_data_offset = 0 if read_count == 0: break if size > 0: slice_start_offset = self._uncompressed_data_offset slice_end_offset = slice_start_offset + size uncompressed_data = b''.join([ uncompressed_data, self._uncompressed_data[slice_start_offset:slice_end_offset]]) self._uncompressed_data_offset += size self._current_offset += size return uncompressed_data
https://github.com/log2timeline/dfvfs/blob/2b3ccd115f9901d89f383397d4a1376a873c83c4/dfvfs/file_io/compressed_stream_io.py#L186-L264
readonly array
python
def read(self, size=None): """Reads a byte string from the file-like object at the current offset. The function will read a byte string of the specified size or all of the remaining data if no size was specified. Args: size (Optional[int]): number of bytes to read, where None is all remaining data. Returns: bytes: data read. Raises: IOError: if the read failed. OSError: if the read failed. """ if not self._is_open: raise IOError('Not opened.') if self._current_offset < 0: raise IOError( 'Invalid current offset: {0:d} value less than zero.'.format( self._current_offset)) if self._file_data is None or self._current_offset >= self._size: return b'' if size is None: size = self._size if self._current_offset + size > self._size: size = self._size - self._current_offset start_offset = self._current_offset self._current_offset += size return self._file_data[start_offset:self._current_offset]
https://github.com/log2timeline/dfvfs/blob/2b3ccd115f9901d89f383397d4a1376a873c83c4/dfvfs/file_io/fake_file_io.py#L62-L97
readonly array
python
def read(self, size=None): """Reads a byte string from the file-like object at the current offset. The function will read a byte string of the specified size or all of the remaining data if no size was specified. Args: size (Optional[int]): number of bytes to read, where None is all remaining data. Returns: bytes: data read. Raises: IOError: if the read failed. OSError: if the read failed. """ if not self._is_open: raise IOError('Not opened.') if self._current_offset < 0: raise IOError( 'Invalid current offset: {0:d} value less than zero.'.format( self._current_offset)) if self._decrypted_stream_size is None: self._decrypted_stream_size = self._GetDecryptedStreamSize() if self._decrypted_stream_size < 0: raise IOError('Invalid decrypted stream size.') if self._current_offset >= self._decrypted_stream_size: return b'' if self._realign_offset: self._AlignDecryptedDataOffset(self._current_offset) self._realign_offset = False if size is None: size = self._decrypted_stream_size if self._current_offset + size > self._decrypted_stream_size: size = self._decrypted_stream_size - self._current_offset decrypted_data = b'' if size == 0: return decrypted_data while size > self._decrypted_data_size: decrypted_data = b''.join([ decrypted_data, self._decrypted_data[self._decrypted_data_offset:]]) remaining_decrypted_data_size = ( self._decrypted_data_size - self._decrypted_data_offset) self._current_offset += remaining_decrypted_data_size size -= remaining_decrypted_data_size if self._current_offset >= self._decrypted_stream_size: break read_count = self._ReadEncryptedData(self._ENCRYPTED_DATA_BUFFER_SIZE) self._decrypted_data_offset = 0 if read_count == 0: break if size > 0: slice_start_offset = self._decrypted_data_offset slice_end_offset = slice_start_offset + size decrypted_data = b''.join([ decrypted_data, self._decrypted_data[slice_start_offset:slice_end_offset]]) self._decrypted_data_offset += size self._current_offset += size return decrypted_data
https://github.com/log2timeline/dfvfs/blob/2b3ccd115f9901d89f383397d4a1376a873c83c4/dfvfs/file_io/encrypted_stream_io.py#L222-L300
readonly array
python
def read(self, size=None): """Reads a byte string from the file-like object at the current offset. The function will read a byte string of the specified size or all of the remaining data if no size was specified. Args: size (Optional[int]): number of bytes to read, where None is all remaining data. Returns: bytes: data read. Raises: IOError: if the read failed. OSError: if the read failed. """ if not self._is_open: raise IOError('Not opened.') # Do not pass the size argument as a keyword argument since it breaks # some file-like object implementations. return self._file_object.read(size)
https://github.com/log2timeline/dfvfs/blob/2b3ccd115f9901d89f383397d4a1376a873c83c4/dfvfs/file_io/file_object_io.py#L87-L109