query
stringlengths
9
60
language
stringclasses
1 value
code
stringlengths
105
25.7k
url
stringlengths
91
217
parse binary file to custom class
python
def _parse_binary(v, header_d): """ Parses binary string. Note: <str> for py2 and <binary> for py3. """ # This is often a no-op, but it ocassionally converts numbers into strings v = nullify(v) if v is None: return None if six.PY2: try: return six.binary_type(v).strip() except UnicodeEncodeError: return six.text_type(v).strip() else: # py3 try: return six.binary_type(v, 'utf-8').strip() except UnicodeEncodeError: return six.text_type(v).strip()
https://github.com/CivicSpleen/ambry/blob/d7f2be4bf1f7ffd086f3fadd4fcae60c32473e42/ambry/valuetype/types.py#L357-L382
parse binary file to custom class
python
def is_binary(path): ''' Detects if the file is a binary, returns bool. Returns True if the file is a bin, False if the file is not and None if the file is not available. ''' if not os.path.isfile(path): return False try: with fopen(path, 'rb') as fp_: try: data = fp_.read(2048) if six.PY3: data = data.decode(__salt_system_encoding__) return salt.utils.stringutils.is_binary(data) except UnicodeDecodeError: return True except os.error: return False
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/files.py#L682-L699
parse binary file to custom class
python
def _parse_singlefile(self, desired_type: Type[T], file_path: str, encoding: str, logger: Logger, options: Dict[str, Dict[str, Any]]) -> T: """ Implementation of AnyParser API """ # first use the base parser to parse something compliant with the conversion chain first = self._base_parser._parse_singlefile(self._converter.from_type, file_path, encoding, logger, options) # then apply the conversion chain return self._converter.convert(desired_type, first, logger, options)
https://github.com/smarie/python-parsyfiles/blob/344b37e1151e8d4e7c2ee49ae09d6568715ae64e/parsyfiles/parsing_combining_parsers.py#L588-L598
parse binary file to custom class
python
def frombinary(path, ext='bin', conf='conf.json', dtype=None, shape=None, skip=0, index=None, labels=None, engine=None, credentials=None): """ Load series data from flat binary files. Parameters ---------- path : string URI or local filesystem path Directory to load from, can be a URI string with scheme (e.g. 'file://', 's3n://', or 'gs://'), or a single file, or a directory, or a directory with a single wildcard character. ext : str, optional, default = 'bin' Optional file extension specifier. conf : str, optional, default = 'conf.json' Name of conf file with type and size information. dtype : dtype or dtype specifier, default 'float64' Numerical type to use for data after converting from text. shape : tuple or list, optional, default = None Shape of data if known, will be inferred otherwise. skip : int, optional, default = 0 Number of items in each record to skip. index : array, optional, default = None Index for records, if not provided will use (0, 1, ...) labels : array, optional, default = None Labels for records. If provided, should have shape of shape[:-1]. engine : object, default = None Computational engine (e.g. a SparkContext for Spark) credentials : dict, default = None Credentials for remote storage (e.g. S3) in the form {access: ***, secret: ***} """ shape, dtype = _binaryconfig(path, conf, dtype, shape, credentials) from thunder.readers import normalize_scheme, get_parallel_reader path = normalize_scheme(path, ext) from numpy import dtype as dtype_func nelements = shape[-1] + skip recordsize = dtype_func(dtype).itemsize * nelements if spark and isinstance(engine, spark): lines = engine.binaryRecords(path, recordsize) raw = lines.map(lambda x: frombuffer(buffer(x), offset=0, count=nelements, dtype=dtype)[skip:]) def switch(record): ary, idx = record return (idx,), ary rdd = raw.zipWithIndex().map(switch) if shape and len(shape) > 2: expand = lambda k: unravel_index(k[0], shape[0:-1]) rdd = rdd.map(lambda kv: (expand(kv[0]), kv[1])) if not index: index = arange(shape[-1]) return fromrdd(rdd, dtype=dtype, shape=shape, index=index, ordered=True) else: reader = get_parallel_reader(path)(engine, credentials=credentials) data = reader.read(path, ext=ext) values = [] for record in data: buf = record[1] offset = 0 while offset < len(buf): v = frombuffer(buffer(buf), offset=offset, count=nelements, dtype=dtype) values.append(v[skip:]) offset += recordsize if not len(values) == prod(shape[0:-1]): raise ValueError('Unexpected shape, got %g records but expected %g' % (len(values), prod(shape[0:-1]))) values = asarray(values, dtype=dtype) if shape: values = values.reshape(shape) return fromarray(values, index=index, labels=labels)
https://github.com/thunder-project/thunder/blob/967ff8f3e7c2fabe1705743d95eb2746d4329786/thunder/series/readers.py#L254-L342
parse binary file to custom class
python
def parse_file(path, format=None, encoding='utf-8', force_types=True): """A convenience wrapper of parse, which accepts path of file to parse. Args: path: path to file to parse format: explicitly override the guessed `inp` markup format encoding: file encoding, defaults to utf-8 force_types: if `True`, integers, floats, booleans and none/null are recognized and returned as proper types instead of strings; if `False`, everything is converted to strings if `None`, backend return value is used Returns: parsed `inp` (dict or list) containing unicode values Raises: AnyMarkupError if a problem occurs while parsing """ try: with open(path, 'rb') as f: return parse(f, format, encoding, force_types) except EnvironmentError as e: raise AnyMarkupError(e, traceback.format_exc())
https://github.com/bkabrda/anymarkup-core/blob/299935092fc2650cca4e32ec92441786918f9bab/anymarkup_core/__init__.py#L120-L141
parse binary file to custom class
python
def convert_from_binary(self, binvalue, type, **kwargs): """ Convert binary data to type 'type'. 'type' must have a convert_binary function. If 'type' supports size checking, the size function is called to ensure that binvalue is the correct size for deserialization """ size = self.get_type_size(type) if size > 0 and len(binvalue) != size: raise ArgumentError("Could not convert type from binary since the data was not the correct size", required_size=size, actual_size=len(binvalue), type=type) typeobj = self.get_type(type) if not hasattr(typeobj, 'convert_binary'): raise ArgumentError("Type does not support conversion from binary", type=type) return typeobj.convert_binary(binvalue, **kwargs)
https://github.com/iotile/typedargs/blob/0a5091a664b9b4d836e091e9ba583e944f438fd8/typedargs/typeinfo.py#L95-L113
parse binary file to custom class
python
def is_binary(filename): """ :param filename: File to check. :returns: True if it's a binary file, otherwise False. """ logger.debug('is_binary: %(filename)r', locals()) # Check if the file extension is in a list of known binary types binary_extensions = ['pyc', 'iso', 'zip', 'pdf'] for ext in binary_extensions: if filename.endswith(ext): return True # Check if the starting chunk is a binary string chunk = get_starting_chunk(filename) return is_binary_string(chunk)
https://github.com/spyder-ide/spyder/blob/f76836ce1b924bcc4efd3f74f2960d26a4e528e0/spyder/utils/external/binaryornot/check.py#L28-L43
parse binary file to custom class
python
def as_binary(s, encoding='utf-8'): """Force conversion of given string to binary type. Binary is ``bytes`` type for Python 3.x and ``str`` for Python 2.x . If the string is already in binary, then no conversion is done and the same string is returned and ``encoding`` argument is ignored. Parameters ---------- s: str or bytes (Python3), str or unicode (Python2) The string to convert to binary. encoding: str The encoding of the resulting binary string (default: utf-8) Raises ------ ValueError In case an input of invalid type was passed to the function. Returns ------- ``bytes`` for Python3 or ``str`` for Python 2. """ if isinstance(s, six.text_type): return s.encode(encoding) elif isinstance(s, six.binary_type): # make sure the binary is in required encoding return s.decode(encoding).encode(encoding) else: raise ValueError('Can only convert types {0} and {1}'.format(six.text_type, six.binary_type))
https://github.com/estnltk/estnltk/blob/28ae334a68a0673072febc318635f04da0dcc54a/estnltk/core.py#L171-L200
parse binary file to custom class
python
def is_binary_file(file): """ Returns if given file is a binary file. :param file: File path. :type file: unicode :return: Is file binary. :rtype: bool """ file_handle = open(file, "rb") try: chunk_size = 1024 while True: chunk = file_handle.read(chunk_size) if chr(0) in chunk: return True if len(chunk) < chunk_size: break finally: file_handle.close() return False
https://github.com/KelSolaar/Foundations/blob/5c141330faf09dad70a12bc321f4c564917d0a91/foundations/io.py#L403-L424
parse binary file to custom class
python
def _parse_singlefile(self, desired_type: Type[T], file_path: str, encoding: str, logger: Logger, options: Dict[str, Dict[str, Any]]) -> T: """ Implementation of the parent method : since this is a multifile parser, this is not implemented. :param desired_type: :param file_path: :param encoding: :param logger: :param options: :return: """ raise Exception('Not implemented since this is a MultiFileParser')
https://github.com/smarie/python-parsyfiles/blob/344b37e1151e8d4e7c2ee49ae09d6568715ae64e/parsyfiles/parsing_core.py#L430-L442
parse binary file to custom class
python
def is_binary_file(file_obj): """ Returns True if file has non-ASCII characters (> 0x7F, or 127) Should work in both Python 2 and 3 """ start = file_obj.tell() fbytes = file_obj.read(1024) file_obj.seek(start) is_str = isinstance(fbytes, str) for fbyte in fbytes: if is_str: code = ord(fbyte) else: code = fbyte if code > 127: return True return False
https://github.com/mikedh/trimesh/blob/25e059bf6d4caa74f62ffd58ce4f61a90ee4e518/trimesh/util.py#L712-L728
parse binary file to custom class
python
def parse_file(file_or_path, encoding='utf-8', validate=False): """ Tries to parse a given filepath or fileobj into a Bibliography instance. If ``validate`` is passed as keyword argument and set to ``True``, the Bibliography will be validated using the standard rules. """ try: is_string = isinstance(file_or_path, basestring) except NameError: is_string = isinstance(file_or_path, str) if is_string: with codecs.open(file_or_path, 'r', encoding) as file_: result = pattern.parseFile(file_)[0] else: result = pattern.parseFile(file_or_path)[0] if validate: result.validate() return result
https://github.com/zerok/zs.bibtex/blob/ac9594b36b9d4ec9a3593c0dc7c1a35e5aea04d9/src/zs/bibtex/parser.py#L115-L132
parse binary file to custom class
python
def parse(file_path, content=None): """ Create a PythonFile object with specified file_path and content. If content is None then, it is loaded from the file_path method. Otherwise, file_path is only used for reporting errors. """ try: # Parso reads files in binary mode and converts to unicode # using python_bytes_to_unicode() function. As a result, # we no longer have information about original file encoding and # output of module.get_content() can't be converted back to bytes # For now we can make a compromise by reading the file ourselves # and passing content to parse() function. if content is None: with open(file_path) as f: content = f.read() py_tree = _parser.parse( content, path=file_path, error_recovery=False) return ParsoPythonFile(file_path, py_tree) except parso.parser.ParserSyntaxError as ex: logging.error("Failed to parse %s:%d '%s'", file_path, ex.error_leaf.line, ex.error_leaf.get_code())
https://github.com/getgauge/gauge-python/blob/90f3547dcfd2d16d51f116cdd4e53527eeab1a57/getgauge/parser_parso.py#L15-L36
parse binary file to custom class
python
def binary_file(self, file=None): """Same as :meth:`file` but for binary content.""" if file is None: file = BytesIO() self._binary_file(file) return file
https://github.com/fossasia/knittingpattern/blob/8e608896b0ab82fea1ca9fbfa2b4ee023d8c8027/knittingpattern/Dumper/file.py#L109-L114
parse binary file to custom class
python
def file_request(self): """ Request that retrieve a binary file """ response = requests.get( self.__base_url, headers=self.__headers, stream=True) return response.raw.read(), response.headers
https://github.com/signaturit/python-sdk/blob/2419c6d9675d901244f807ae360dc58aa46109a9/signaturit_sdk/resources/connection.py#L77-L86
parse binary file to custom class
python
def parse(self, type_str): """ Parses a type string into an appropriate instance of :class:`~eth_abi.grammar.ABIType`. If a type string cannot be parsed, throws :class:`~eth_abi.exceptions.ParseError`. :param type_str: The type string to be parsed. :returns: An instance of :class:`~eth_abi.grammar.ABIType` containing information about the parsed type string. """ if not isinstance(type_str, str): raise TypeError('Can only parse string values: got {}'.format(type(type_str))) try: return super().parse(type_str) except parsimonious.ParseError as e: raise ParseError(e.text, e.pos, e.expr)
https://github.com/ethereum/eth-abi/blob/0a5cab0bdeae30b77efa667379427581784f1707/eth_abi/grammar.py#L109-L125
parse binary file to custom class
python
def readBinary (self, filename=None): """Reads a binary command sequence from the given filename (defaults to self.pathname). """ if filename is None: filename = self.pathname stream = open(filename, 'rb') magic = struct.unpack('>H', stream.read(2))[0] self.crc32 = struct.unpack('>I', stream.read(4))[0] self.seqid = struct.unpack('>H', stream.read(2))[0] self.version = struct.unpack('>H', stream.read(2))[0] ncmds = struct.unpack('>H', stream.read(2))[0] reserved = stream.read(20) for n in range(ncmds): bytes = stream.read(110) self.lines.append( SeqCmd.decode(bytes, self.cmddict) )
https://github.com/NASA-AMMOS/AIT-Core/blob/9d85bd9c738e7a6a6fbdff672bea708238b02a3a/ait/core/seq.py#L186-L203
parse binary file to custom class
python
def _parse_file(filename, relpath=None): """ Return a list of :class:`_PyconfigCall` from parsing `filename`. :param filename: A file to parse :param relpath: Relative directory to strip (optional) :type filename: str :type relpath: str """ with open(filename, 'r') as source: source = source.read() pyconfig_calls = [] try: nodes = ast.parse(source, filename=filename) except SyntaxError: # XXX(Jake): We might want to handle this differently return [] # Look for UTF-8 encoding first_lines = source[0:200] match = re.match('^#.*coding[:=].?([a-zA-Z0-9-_]+).*', first_lines) if match: try: coding = match.group(1) source = source.decode(coding) except: print("# Error decoding file, may not parse correctly:", filename) try: # Split the source into lines so we can reference it easily source = source.split('\n') except: print("# Error parsing file, ignoring:", filename); return [] # Make the filename relative to the given path, if needed if relpath: filename = os.path.relpath(filename, relpath) for call in ast.walk(nodes): if not isinstance(call, _ast.Call): # Skip any node that isn't a Call continue func = call.func if not isinstance(call.func, _ast.Attribute): # We're looking for calls to pyconfig.*, so the function has to be # an Attribute node, otherwise skip it continue if getattr(func.value, 'id', None) != 'pyconfig': # If the Attribute value isn't a Name (doesn't have an `id`) or it # isn't 'pyconfig', then we skip continue if func.attr not in ['get', 'set', 'setting']: # If the Attribute attr isn't one of the pyconfig API methods, then # we skip continue # Now we parse the call arguments as best we can args = [] if call.args: arg = call.args[0] if isinstance(arg, _ast.Str): args.append(arg.s) else: args.append(_map_arg(arg)) for arg in call.args[1:]: args.append(_map_arg(arg)) line = (filename, source[call.lineno-1], call.lineno, call.col_offset) call = _PyconfigCall(func.attr, args[0], args[1:], line) pyconfig_calls.append(call) return pyconfig_calls
https://github.com/shakefu/pyconfig/blob/000cb127db51e03cb4070aae6943e956193cbad5/pyconfig/scripts.py#L475-L553
parse binary file to custom class
python
def _parse_singlefile(self, desired_type: Type[T], file_path: str, encoding: str, logger: Logger, options: Dict[str, Dict[str, Any]]) -> T: """ Relies on the inner parsing function to parse the file. If _streaming_mode is True, the file will be opened and closed by this method. Otherwise the parsing function will be responsible to open and close. :param desired_type: :param file_path: :param encoding: :param options: :return: """ opts = get_options_for_id(options, self.get_id_for_options()) if self._streaming_mode: # We open the stream, and let the function parse from it file_stream = None try: # Open the file with the appropriate encoding file_stream = open(file_path, 'r', encoding=encoding) # Apply the parsing function if self.function_args is None: return self._parser_func(desired_type, file_stream, logger, **opts) else: return self._parser_func(desired_type, file_stream, logger, **self.function_args, **opts) except TypeError as e: raise CaughtTypeError.create(self._parser_func, e) finally: if file_stream is not None: # Close the File in any case file_stream.close() else: # the parsing function will open the file itself if self.function_args is None: return self._parser_func(desired_type, file_path, encoding, logger, **opts) else: return self._parser_func(desired_type, file_path, encoding, logger, **self.function_args, **opts)
https://github.com/smarie/python-parsyfiles/blob/344b37e1151e8d4e7c2ee49ae09d6568715ae64e/parsyfiles/parsing_core.py#L573-L615
parse binary file to custom class
python
def is_binary(self, subpath=None): """ Gets whether the specified subpath is a supported binary file. """ mimetype = self.mimetype_for(subpath) return mimetype is not None and mimetype.startswith('image/')
https://github.com/joeyespo/grip/blob/ce933ccc4ca8e0d3718f271c59bd530a4518bf63/grip/readers.py#L220-L225
parse binary file to custom class
python
def _parse_build(encoded_data, pointer=0, spec=None, spec_params=None, strict=False): """ Parses a byte string generically, or using a spec with optional params :param encoded_data: A byte string that contains BER-encoded data :param pointer: The index in the byte string to parse from :param spec: A class derived from Asn1Value that defines what class_ and tag the value should have, and the semantics of the encoded value. The return value will be of this type. If omitted, the encoded value will be decoded using the standard universal tag based on the encoded tag number. :param spec_params: A dict of params to pass to the spec object :param strict: A boolean indicating if trailing data should be forbidden - if so, a ValueError will be raised when trailing data exists :return: A 2-element tuple: - 0: An object of the type spec, or if not specified, a child of Asn1Value - 1: An integer indicating how many bytes were consumed """ encoded_len = len(encoded_data) info, new_pointer = _parse(encoded_data, encoded_len, pointer) if strict and new_pointer != pointer + encoded_len: extra_bytes = pointer + encoded_len - new_pointer raise ValueError('Extra data - %d bytes of trailing data were provided' % extra_bytes) return (_build(*info, spec=spec, spec_params=spec_params), new_pointer)
https://github.com/wbond/asn1crypto/blob/ecda20176f55d37021cbca1f6da9083a8e491197/asn1crypto/core.py#L5258-L5293
parse binary file to custom class
python
def parse( files, config=None, compilation_mode=COMPILATION_MODE.FILE_BY_FILE, cache=None): """ Parse header files. :param files: The header files that should be parsed :type files: list of str :param config: Configuration object or None :type config: :class:`parser.xml_generator_configuration_t` :param compilation_mode: Determines whether the files are parsed individually or as one single chunk :type compilation_mode: :class:`parser.COMPILATION_MODE` :param cache: Declaration cache (None=no cache) :type cache: :class:`parser.cache_base_t` or str :rtype: list of :class:`declarations.declaration_t` """ if not config: config = xml_generator_configuration_t() parser = project_reader_t(config=config, cache=cache) declarations = parser.read_files(files, compilation_mode) config.xml_generator_from_xml_file = parser.xml_generator_from_xml_file return declarations
https://github.com/gccxml/pygccxml/blob/2b1efbb9e37ceb2ae925c7f3ce1570f476db9e1e/pygccxml/parser/__init__.py#L29-L53
parse binary file to custom class
python
def _parse_file(self, file): """ Parses the given file. """ case = Case() file.seek(0) case.base_mva = float(file.next().split(",")[1].split("/")[0]) case.name = "%s %s" % (file.next().strip(), file.next().strip()) bustype_map = {1: "PQ", 2: "PV", 3: "ref", 4: "isolated"} # I, 'NAME', BASKV, IDE, GL, BL, AREA, ZONE, VM, VA, OWNER bus_data = file.next().split(",") while bus_data[0].strip()[0] != "0": bus = Bus() i = int(bus_data[0].strip()) self.bus_map[i] = bus bus._i = i bus.name = bus_data[1].strip("'").strip() bus.v_base = float(bus_data[2]) bus.type = bustype_map[int(bus_data[3])] bus.g_shunt = float(bus_data[4]) bus.b_shunt = float(bus_data[5]) bus.v_magnitude = float(bus_data[8]) bus.v_angle = float(bus_data[9]) # bus.area = 1; # hcui7 added case.buses.append(bus) bus_data = file.next().split(",") # I, ID, STATUS, AREA, ZONE, PL, QL, IP, IQ, YP, YQ, OWNER load_data = file.next().split(",") while load_data[0].strip()[0] != "0": bus = self.bus_map[int(load_data[0].strip())] bus.p_demand += float(load_data[5]) bus.q_demand += float(load_data[6]) load_data = file.next().split(",") #I,ID,PG,QG,QT,QB,VS,IREG,MBASE,ZR,ZX,RT,XT,GTAP,STAT,RMPCT,PT,PB,O1,F1 gen_data = file.next().split(",") while gen_data[0].strip()[0] != "0": bus = self.bus_map[int(gen_data[0].strip())] g = Generator(bus) g.p = float(gen_data[2]) g.q = float(gen_data[3]) g.q_max = float(gen_data[4]) g.q_min = float(gen_data[5]) g.v_magnitude = float(gen_data[6]) g.base_mva = float(gen_data[8]) g.online = bool(int(gen_data[14])) g.p_max = float(gen_data[16]) g.p_min = float(gen_data[17]) case.generators.append(g) gen_data = file.next().split(",") # I,J,CKT,R,X,B,RATEA,RATEB,RATEC,GI,BI,GJ,BJ,ST,LEN,O1,F1,...,O4,F4 branch_data = file.next().split(",") while branch_data[0].strip()[0] != "0": from_bus = self.bus_map[abs(int(branch_data[0]))] to_bus = self.bus_map[abs(int(branch_data[1]))] l = Branch(from_bus, to_bus) l.r = float(branch_data[3]) l.x = float(branch_data[4]) l.b = float(branch_data[5]) l.rate_a = float(branch_data[6]) l.rate_b = float(branch_data[7]) l.rate_c = float(branch_data[8]) # l.online = bool(int(branch_data[13])) case.branches.append(l) branch_data = file.next().split(",") # I,J,K,CKT,CW,CZ,CM,MAG1,MAG2,NMETR,'NAME',STAT,O1,F1,...,O4,F4 # R1-2,X1-2,SBASE1-2 # WINDV1,NOMV1,ANG1,RATA1,RATB1,RATC1,COD1,CONT1,RMA1,RMI1,VMA1,VMI1,NTP1,TAB1,CR1,CX1 # WINDV2,NOMV2 trx_data = file.next().split(",") while trx_data[0].strip()[0] != "0": trx_data2 = file.next().split(",") trx_data3 = file.next().split(",") trx_data4 = file.next().split(",") # second winding if len(trx_data2) < 5: from_bus = self.bus_map[abs(int(trx_data[0]))] to_bus = self.bus_map[abs(int(trx_data[1]))] l = Branch(from_bus, to_bus) l.name = trx_data[10].strip("'").strip() l.online = bool(int(trx_data[11])) l.b = float(trx_data[8]) l.r = float(trx_data2[0]) l.x = float(trx_data2[1]) l.ratio = float(trx_data3[0]) l.phase_shift = float(trx_data3[2]) rate_a = float(trx_data3[3]) if rate_a != 0.0: l.rate_a = rate_a rate_b = float(trx_data3[4]) if rate_b != 0.0: l.rate_b = rate_b rate_c = float(trx_data3[5]) if rate_c != 0.0: l.rate_c = rate_c case.branches.append(l) trx_data = file.next().split(",") else: # I,J,K,CKT,CW,CZ,CM,MAG1,MAG2,NMETR,'NAME',STAT,O1,F1,...,O4,F4 # R1-2,X1-2,SBASE1-2,R2-3,X2-3,SBASE2-3,R3-1,X3-1,SBASE3-1,VMSTAR,ANSTAR # WINDV1,NOMV1,ANG1,RATA1,RATB1,RATC1,COD1,CONT1,RMA1,RMI1,VMA1,VMI1,NTP1,TAB1,CR1,CX1 # WINDV2,NOMV2,ANG2,RATA2,RATB2,RATC2,COD2,CONT2,RMA2,RMI2,VMA2,VMI2,NTP2,TAB2,CR2,CX2 # WINDV3,NOMV3,ANG3,RATA3,RATB3,RATC3,COD3,CONT3,RMA3,RMI3,VMA3,VMI3,NTP3,TAB3,CR3,CX3 trx_data5 = file.next().split(",") # third winding # Three-winding transformers are modelled as a group of three # two-winding transformers with a fictitious neutral bus. tmp_bus = Bus() tmp_bus.name = "n" + tmp_bus.name tmp_bus._i = len(case.buses) + 1 bus1 = self.bus_map[abs(int(trx_data[0]))] bus2 = self.bus_map[abs(int(trx_data[1]))] bus3 = self.bus_map[abs(int(trx_data[2]))] l1 = Branch(tmp_bus, bus1) l2 = Branch(tmp_bus, bus2) l3 = Branch(tmp_bus, bus3) b = float(trx_data[8]) # MAG2 l1.b = b# / 3.0 # l2.b = b / 3.0 # l3.b = b / 3.0 on = bool(int(trx_data[11])) l1.online = on l2.online = on l3.online = on r12 = float(trx_data2[0]) x12 = float(trx_data2[1]) r23 = float(trx_data2[3]) x23 = float(trx_data2[4]) r31 = float(trx_data2[6]) x31 = float(trx_data2[7]) l1.r = 0.5 * (r12 + r31 - r23) l1.x = 0.5 * (x12 + x31 - x23) l2.r = 0.5 * (r12 + r23 - r31) l2.x = 0.5 * (x12 + x23 - x31) l3.r = 0.5 * (r23 + r31 - r12) l3.x = 0.5 * (x23 + x31 - x12) for l in [l1, l2, l3]: if abs(l.x) < 1e-5: logger.warning("Zero branch reactance [%s]." % l.name) l.x = self.xtol if abs(complex(l.r, l.x)) < 0.00001: logger.warning("Zero branch impedance [%s]." % l.name) l1.ratio = float(trx_data3[0]) l1.phase_shift = float(trx_data3[2]) l2.ratio = float(trx_data4[0]) l2.phase_shift = float(trx_data4[2]) l3.ratio = float(trx_data5[0]) l3.phase_shift = float(trx_data5[2]) rate_a1 = float(trx_data3[3]) rate_b1 = float(trx_data3[4]) rate_c1 = float(trx_data3[5]) if rate_a1 > 0.0: l1.rate_a = rate_a1 if rate_b1 > 0.0: l1.rate_b = rate_b1 if rate_c1 > 0.0: l1.rate_c = rate_c1 rate_a2 = float(trx_data4[3]) rate_b2 = float(trx_data4[4]) rate_c2 = float(trx_data4[5]) if rate_a2 > 0.0: l2.rate_a = rate_a2 if rate_b2 > 0.0: l2.rate_b = rate_b2 if rate_c2 > 0.0: l2.rate_c = rate_c2 rate_a3 = float(trx_data5[3]) rate_b3 = float(trx_data5[4]) rate_c3 = float(trx_data5[5]) if rate_a3 > 0.0: l3.rate_a = rate_a3 if rate_b2 > 0.0: l3.rate_b = rate_b3 if rate_c2 > 0.0: l3.rate_c = rate_c3 case.buses.append(tmp_bus) case.branches.append(l1) case.branches.append(l2) case.branches.append(l3) trx_data = file.next().split(",") # Area interchange data. # I, ISW, PDES, PTOL, 'ARNAME' trx_data = file.next().split(",") while trx_data[0].strip()[0] != "0": logger.warning("Ignoring area interchange data.") trx_data = file.next().split(",") # Two-terminal DC line data. # I,MDC,RDC,SETVL,VSCHD,VCMOD,RCOMP,DELTI,METER,DCVMIN,CCCITMX,CCCACC # IPR,NBR,ALFMX,ALFMN,RCR,XCR,EBASR,TRR,TAPR,TMXR,TMNR,STPR,ICR,IFR,ITR,IDR,XCAPR # IPI,NBI,GAMMX,GAMMN,RCI,XCI,EBASI,TRI,TAPI,TMXI,TMNI,STPI,ICI,IFI,ITI,IDI,XCAPI trx_data = file.next().split(",") while trx_data[0].strip()[0] != "0": logger.warning("Ignoring two-terminal DC line data.") trx_data = file.next().split(",") # VSC DC line data. # 'NAME', MDC, RDC, O1, F1, ... O4, F4 # IBUS,TYPE,MODE,DOCET,ACSET,ALOSS,BLOSS,MINOSS,SMAX,IMAX,PWF,MAXQ,MINQ, # REMOT,RMPCT trx_data = file.next().split(",") while trx_data[0].strip()[0] != "0": logger.warning("Ignoring VSC DC line data.") trx_data = file.next().split(",") # Switched shunt data. # I,MODSW,VSWHI,VSWLO,SWREM,RMPCT,'RMIDNT',BINIT,N1,B1,N2,B2,...N8,B8 trx_data = file.next().split(",") while trx_data[0].strip()[0] != "0": bus = self.bus_map[abs(int(trx_data[0]))] bus.b_shunt += float(trx_data[7]) trx_data = file.next().split(",") # Transformer impedance correction table. # I, T1, F1, T2, F2, T3, F3, ... T11, F11 trx_data = file.next().split(",") while trx_data[0].strip()[0] != "0": logger.warning("Ignoring transformer X correction table data.") trx_data = file.next().split(",") # Multi-terminal dc line data. # I, NCONV, NDCBS, NDCLN, MDC, VCONV, VCMOD, VCONVN # IB,N,ANGMX,ANGMN,RC,XC,EBAS,TR,TAP,TPMX,TPMN,TSTP,SETVL,DCPF,MARG,CNVCOD # IDC, IB, IA, ZONE, 'NAME', IDC2, RGRND, OWNER # IDC, JDC, DCCKT, RDC, LDC trx_data = file.next().split(",") while trx_data[0].strip()[0] != "0": logger.warning("Ignoring multi-terminal dc line data.") trx_data = file.next().split(",") # Multisection line data. # I,J,ID,DUM1,DUM2,...DUM9 trx_data = file.next().split(",") while trx_data[0].strip()[0] != "0": logger.warning("Ignoring multisection line data.") trx_data = file.next().split(",") # Zone data. # I,'ZONAME' trx_data = file.next().split(",") while trx_data[0].strip()[0] != "0": logger.warning("Ignoring zone data.") trx_data = file.next().split(",") # Interarea transfer data. # ARFROM, ARTO, TRID, PTRAN trx_data = file.next().split(",") while trx_data[0].strip()[0] != "0": logger.warning("Ignoring interarea transfer data.") trx_data = file.next().split(",") # Owner data. # I,'OWNAME' trx_data = file.next().split(",") while trx_data[0].strip()[0] != "0": logger.warning("Ignoring owner data.") trx_data = file.next().split(",") # FACTS device data. # N,I,J,MODE,PDES,QDES,VSET,SHMX,TRMX,VTMN,VTMX,VSMX,IMX,LINX,RMPCT,OWNER,SET1,SET2,VSREF trx_data = file.next().split(",") while trx_data[0].strip()[0] != "0": logger.warning("Ignoring FACTS device data.") trx_data = file.next().split(",") return case
https://github.com/rwl/pylon/blob/916514255db1ae1661406f0283df756baf960d14/pylon/io/psse.py#L89-L370
parse binary file to custom class
python
def parse_binary(self, data, display, rawdict = 0): """values, remdata = s.parse_binary(data, display, rawdict = 0) Convert a binary representation of the structure into Python values. DATA is a string or a buffer containing the binary data. DISPLAY should be a Xlib.protocol.display.Display object if there are any Resource fields or Lists with ResourceObjs. The Python values are returned as VALUES. If RAWDICT is true, a Python dictionary is returned, where the keys are field names and the values are the corresponding Python value. If RAWDICT is false, a DictWrapper will be returned where all fields are available as attributes. REMDATA are the remaining binary data, unused by the Struct object. """ ret = {} val = struct.unpack(self.static_codes, data[:self.static_size]) lengths = {} formats = {} vno = 0 for f in self.static_fields: # Fields without name should be ignored. This is typically # pad and constant fields if not f.name: pass # Store index in val for Length and Format fields, to be used # when treating varfields. elif isinstance(f, LengthField): f_names = [f.name] if f.other_fields: f_names.extend(f.other_fields) field_val = val[vno] if f.parse_value is not None: field_val = f.parse_value(field_val, display) for f_name in f_names: lengths[f_name] = field_val elif isinstance(f, FormatField): formats[f.name] = val[vno] # Treat value fields the same was as in parse_value. else: if f.structvalues == 1: field_val = val[vno] else: field_val = val[vno:vno+f.structvalues] if f.parse_value is not None: field_val = f.parse_value(field_val, display) ret[f.name] = field_val vno = vno + f.structvalues data = data[self.static_size:] # Call parse_binary_value for each var_field, passing the # length and format values from the unpacked val. for f in self.var_fields: ret[f.name], data = f.parse_binary_value(data, display, lengths.get(f.name), formats.get(f.name), ) if not rawdict: ret = DictWrapper(ret) return ret, data
https://github.com/python-xlib/python-xlib/blob/8901e831737e79fe5645f48089d70e1d1046d2f2/Xlib/protocol/rq.py#L1135-L1210
parse binary file to custom class
python
def bin_b64_type(arg): """An argparse type representing binary data encoded in base64.""" try: arg = base64.standard_b64decode(arg) except (binascii.Error, TypeError): raise argparse.ArgumentTypeError("{0} is invalid base64 data".format(repr(arg))) return arg
https://github.com/zeroSteiner/smoke-zephyr/blob/a6d2498aeacc72ee52e7806f783a4d83d537ffb2/smoke_zephyr/argparse_types.py#L78-L84
parse binary file to custom class
python
def parse_binary(self): ''' when retrieving a NonRDF resource, parse binary data and make available via generators ''' # derive mimetype self.mimetype = self.resource.rdf.graph.value( self.resource.uri, self.resource.rdf.prefixes.ebucore.hasMimeType).toPython() # get binary content as stremable response self.data = self.resource.repo.api.http_request( 'GET', self.resource.uri, data=None, headers={'Content-Type':self.resource.mimetype}, is_rdf=False, stream=True)
https://github.com/ghukill/pyfc4/blob/59011df592f08978c4a901a908862d112a5dcf02/pyfc4/models.py#L1718-L1737
parse binary file to custom class
python
def parse(cls, s, **kwargs): """ Parse a bytes object and create a class object. :param bytes s: A bytes object. :return: A class object. :rtype: cls """ pb2_obj = cls._get_cmsg() pb2_obj.ParseFromString(s) return cls.parse_from_cmessage(pb2_obj, **kwargs)
https://github.com/angr/angr/blob/4e2f97d56af5419ee73bdb30482c8dd8ff5f3e40/angr/serializable.py#L52-L64
parse binary file to custom class
python
def parse(self): """wrapper for original parse function. will read pkl with bdf info, if available.""" if os.path.exists(self.pklname): # check for pkl with binary info logger.info('Found bdf pkl file %s. Loading...' % (self.pklname)) try: with open(self.pklname,'rb') as pkl: (self.mimemsg, self.headxml, self.sizeinfo, self.binarychunks, self.n_integrations, self.n_antennas, self.n_baselines, self.n_basebands, self.n_spws, self.n_channels, self.crosspols) = pickle.load(pkl) except: logger.warning('Something went wrong. Parsing bdf directly...') self._parse() else: if self.pklname: logger.info('Could not find bdf pkl file %s.' % (self.pklname)) self._parse() self.headsize, self.intsize = self.calc_intsize() return self
https://github.com/caseyjlaw/sdmreader/blob/b6c3498f1915138727819715ee00d2c46353382d/sdmreader/sdmreader.py#L331-L349
parse binary file to custom class
python
def binary2base64(binary_file: str) -> str: """Convert a binary file (OGG, executable, etc.) to a printable string. """ # Use mode = "rb" to read binary file with open(binary_file, "rb") as bin_file: encoded_string = base64.b64encode(bin_file.read()) return encoded_string.decode()
https://github.com/cedricbonhomme/Stegano/blob/502e6303791d348e479290c22108551ba3be254f/stegano/tools.py#L96-L103
parse binary file to custom class
python
def Binary(x): """Return x as a binary type.""" if isinstance(x, text_type) and not (JYTHON or IRONPYTHON): return x.encode() return bytes(x)
https://github.com/PyMySQL/Tornado-MySQL/blob/75d3466e4332e43b2bf853799f1122dec5da60bc/tornado_mysql/__init__.py#L77-L81
parse binary file to custom class
python
def parse_file(self, filename): """ Load self from the file, such as "MANIFEST.MF" or "SIGNATURE.SF". :param filename: contains UTF-8 encoded manifest """ with open(filename, "rb", _BUFFERING) as stream: self.parse(stream.read())
https://github.com/obriencj/python-javatools/blob/9e2332b452ddc508bed0615937dddcb2cf051557/javatools/manifest.py#L346-L353
parse binary file to custom class
python
def is_binary(filename): """ Returns True if the file is binary """ with open(filename, 'rb') as fp: data = fp.read(1024) if not data: return False if b'\0' in data: return True return False
https://github.com/dmerejkowsky/replacer/blob/8dc16f297d0ff3a6ee2fa3c0d77789a6859b0f6a/replacer.py#L55-L65
parse binary file to custom class
python
def parse_file(self, cu, analysis): """Generate data for single file""" if hasattr(analysis, 'parser'): filename = cu.file_locator.relative_filename(cu.filename) source_lines = analysis.parser.lines with cu.source_file() as source_file: source = source_file.read() try: if sys.version_info < (3, 0): encoding = source_encoding(source) if encoding != 'utf-8': source = source.decode(encoding).encode('utf-8') except UnicodeDecodeError: log.warning( 'Source file %s can not be properly decoded, skipping. ' 'Please check if encoding declaration is ok', os.path.basename(cu.filename)) return else: if hasattr(cu, 'relative_filename'): filename = cu.relative_filename() else: filename = analysis.coverage.file_locator.relative_filename( cu.filename) token_lines = analysis.file_reporter.source_token_lines() source_lines = list(enumerate(token_lines)) source = analysis.file_reporter.source() coverage_lines = [self.get_hits(i, analysis) for i in range(1, len(source_lines) + 1)] # ensure results are properly merged between platforms posix_filename = filename.replace(os.path.sep, '/') results = { 'name': posix_filename, 'source': source, 'coverage': coverage_lines, } branches = self.get_arcs(analysis) if branches: results['branches'] = branches self.source_files.append(results)
https://github.com/coveralls-clients/coveralls-python/blob/0d2636d029b329f8bd74cad43e04b2c8f518532a/coveralls/reporter.py#L116-L162
parse binary file to custom class
python
def is_binary(self): """Return true if this is a binary file.""" with open(self.path, 'rb') as fin: CHUNKSIZE = 1024 while 1: chunk = fin.read(CHUNKSIZE) if b'\0' in chunk: return True if len(chunk) < CHUNKSIZE: break return False
https://github.com/hyde/fswrap/blob/41e4ad6f7e9ba73eabe61bd97847cd284e3edbd2/fswrap.py#L224-L234
parse binary file to custom class
python
def parse(self, **global_args): """Entry point to parsing a BUILD file. Args: **global_args: Variables to include in the parsing environment. """ if self.build_file not in ParseContext._parsed: # http://en.wikipedia.org/wiki/Abstract_syntax_tree # http://martinfowler.com/books/dsl.html butcher_context = {} for str_to_exec in self._strs_to_exec: ast = compile(str_to_exec, '<string>', 'exec') exec_function(ast, butcher_context) with ParseContext.activate(self): startdir = os.path.abspath(os.curdir) try: os.chdir(self.build_file.path_on_disk) if self.build_file not in ParseContext._parsed: ParseContext._parsed.add(self.build_file) eval_globals = copy.copy(butcher_context) eval_globals.update( {'ROOT_DIR': self.build_file.path_on_disk, '__file__': 'bogus please fix this'}) eval_globals.update(global_args) exec_function(self.build_file.code, eval_globals) finally: os.chdir(startdir)
https://github.com/benley/butcher/blob/8b18828ea040af56b7835beab5fd03eab23cc9ee/butcher/buildfile_context.py#L60-L88
parse binary file to custom class
python
def parse(self, file, outfile=None): """Parse a BGI (basic gene info) JSON file """ file = self._ensure_file(file) obj = json.load(file) items = obj['data'] return [self.transform_item(item) for item in items]
https://github.com/biolink/ontobio/blob/4e512a7831cfe6bc1b32f2c3be2ba41bc5cf7345/ontobio/io/entityparser.py#L197-L203
parse binary file to custom class
python
def files(self): """Set of found binary files output by StagYY.""" if self._rundir['ls'] is UNDETERMINED: out_stem = pathlib.Path(self.par['ioin']['output_file_stem'] + '_') out_dir = self.path / out_stem.parent if out_dir.is_dir(): self._rundir['ls'] = set(out_dir.iterdir()) else: self._rundir['ls'] = set() return self._rundir['ls']
https://github.com/StagPython/StagPy/blob/18c4416cc4a1011db2fd736ee8b0ec29aa6e4fd4/stagpy/stagyydata.py#L505-L514
parse binary file to custom class
python
def _parse_file(self): """Preprocess and parse C file into an AST""" # We need to set the CPU type to pull in the right register definitions # only preprocess the file (-E) and get rid of gcc extensions that aren't # supported in ISO C. args = utilities.build_includes(self.arch.includes()) # args.append('-mcpu=%s' % self.arch.property('chip')) args.append('-E') args.append('-D__attribute__(x)=') args.append('-D__extension__=') self.ast = parse_file(self.filepath, use_cpp=True, cpp_path='arm-none-eabi-gcc', cpp_args=args)
https://github.com/iotile/coretools/blob/2d794f5f1346b841b0dcd16c9d284e9bf2f3c6ec/iotilebuild/iotile/build/config/site_scons/cfileparser.py#L35-L47
parse binary file to custom class
python
def file_object_supports_binary(fp): # type: (BinaryIO) -> bool ''' A function to check whether a file-like object supports binary mode. Parameters: fp - The file-like object to check for binary mode support. Returns: True if the file-like object supports binary mode, False otherwise. ''' if hasattr(fp, 'mode'): return 'b' in fp.mode # Python 3 if sys.version_info >= (3, 0): return isinstance(fp, (io.RawIOBase, io.BufferedIOBase)) # Python 2 return isinstance(fp, (cStringIO.OutputType, cStringIO.InputType, io.RawIOBase, io.BufferedIOBase))
https://github.com/clalancette/pycdlib/blob/1e7b77a809e905d67dc71e12d70e850be26b6233/pycdlib/utils.py#L304-L322
parse binary file to custom class
python
def parse_file(cls, filename, encoding='utf-8', required=True, resolve=True, unresolved_value=DEFAULT_SUBSTITUTION): """Parse file :param filename: filename :type filename: basestring :param encoding: file encoding :type encoding: basestring :param required: If true, raises an exception if can't load file :type required: boolean :param resolve: if true, resolve substitutions :type resolve: boolean :param unresolved_value: assigned value value to unresolved substitution. If overriden with a default value, it will replace all unresolved value to the default value. If it is set to to pyhocon.STR_SUBSTITUTION then it will replace the value by its substitution expression (e.g., ${x}) :type unresolved_value: boolean :return: Config object :type return: Config """ try: with codecs.open(filename, 'r', encoding=encoding) as fd: content = fd.read() return cls.parse_string(content, os.path.dirname(filename), resolve, unresolved_value) except IOError as e: if required: raise e logger.warn('Cannot include file %s. File does not exist or cannot be read.', filename) return []
https://github.com/chimpler/pyhocon/blob/e5b22a8e74e8f88e43cf9e9140cca5f2cd0ab4a3/pyhocon/config_parser.py#L78-L104
parse binary file to custom class
python
def _parse_file(self, file): """ Parses the given file-like object. """ case = Case() file.seek(0) line = file.readline().split() if line[0] != "function": logger.error("Invalid data file header.") return case if line[1] != "mpc": self._is_struct = False base = "" else: base = "mpc." case.name = line[-1] for line in file: if line.startswith("%sbaseMVA" % base): case_data = line.rstrip(";\n").split() case.base_mva = float(case_data[-1]) elif line.startswith("%sbus" % base): self._parse_buses(case, file) elif line.startswith("%sgencost" % base): self._parse_gencost(case, file) elif line.startswith("%sgen" % base): self._parse_generators(case, file) elif line.startswith("%sbranch" % base): self._parse_branches(case, file) return case
https://github.com/rwl/pylon/blob/916514255db1ae1661406f0283df756baf960d14/pylon/io/matpower.py#L95-L125
parse binary file to custom class
python
def parse_encoding(fp): """Deduce the encoding of a Python source file (binary mode) from magic comment. It does this in the same way as the `Python interpreter`__ .. __: http://docs.python.org/ref/encodings.html The ``fp`` argument should be a seekable file object in binary mode. """ pos = fp.tell() fp.seek(0) try: line1 = fp.readline() has_bom = line1.startswith(codecs.BOM_UTF8) if has_bom: line1 = line1[len(codecs.BOM_UTF8):] m = _PYTHON_MAGIC_COMMENT_re.match(line1.decode('ascii', 'ignore')) if not m: try: import parser parser.suite(line1.decode('ascii', 'ignore')) except (ImportError, SyntaxError): # Either it's a real syntax error, in which case the source # is not valid python source, or line2 is a continuation of # line1, in which case we don't want to scan line2 for a magic # comment. pass else: line2 = fp.readline() m = _PYTHON_MAGIC_COMMENT_re.match( line2.decode('ascii', 'ignore')) if has_bom: if m: raise SyntaxError("python refuses to compile code with both a UTF8" \ " byte-order-mark and a magic encoding comment") return 'utf_8' elif m: return m.group(1) else: return None finally: fp.seek(pos)
https://github.com/fboender/ansible-cmdb/blob/ebd960ac10684e8c9ec2b12751bba2c4c9504ab7/lib/mako/util.py#L209-L253
parse binary file to custom class
python
def parse_file(filename): """Parse the provided file, and return Code object.""" assert isinstance(filename, _str_type), "`filename` parameter should be a string, got %r" % type(filename) with open(filename, "rt", encoding="utf-8") as f: return Code(_tokenize(f.readline))
https://github.com/h2oai/h2o-3/blob/dd62aaa1e7f680a8b16ee14bc66b0fb5195c2ad8/h2o-bindings/bin/pyparser.py#L50-L54
parse binary file to custom class
python
def find_binary(self, binary): """ Scan and return the first path to a binary that we can find """ if os.path.exists(binary): return binary # Extract out the filename if we were given a full path binary_name = os.path.basename(binary) # Gather $PATH search_paths = os.environ['PATH'].split(':') # Extra paths to scan... default_paths = [ '/usr/bin', '/bin' '/usr/local/bin', '/usr/sbin', '/sbin' '/usr/local/sbin', ] for path in default_paths: if path not in search_paths: search_paths.append(path) for path in search_paths: if os.path.isdir(path): filename = os.path.join(path, binary_name) if os.path.exists(filename): return filename return binary
https://github.com/python-diamond/Diamond/blob/0f3eb04327d6d3ed5e53a9967d6c9d2c09714a47/src/diamond/collector.py#L495-L528
parse binary file to custom class
python
def _is_common_binary(self, inpath): """private method to compare file path mime type to common binary file types""" # make local variables for the available char numbers in the suffix types to be tested two_suffix = inpath[-3:] three_suffix = inpath[-4:] four_suffix = inpath[-5:] # test for inclusion in the instance variable common_binaries (defined in __init__) if two_suffix in self.common_binaries: return True elif three_suffix in self.common_binaries: return True elif four_suffix in self.common_binaries: return True else: return False
https://github.com/chrissimpkins/crypto/blob/6b95fa81b26312e46f02557dca0b5f5c898a76fd/lib/crypto/library/cryptor.py#L132-L147
parse binary file to custom class
python
def binary_regex(self): """Return the regex for the binary.""" regex = {'linux': r'^%(BINARY_NAME)s-%(VERSION)s\.%(EXT)s$', 'linux64': r'^%(BINARY_NAME)s-%(VERSION)s\.%(EXT)s$', 'mac': r'^%(BINARY_NAME)s(?:\s|-)%(VERSION)s\.%(EXT)s$', 'mac64': r'^%(BINARY_NAME)s(?:\s|-)%(VERSION)s\.%(EXT)s$', 'win32': r'^%(BINARY_NAME)s(%(STUB_NEW)s|(?:\sSetup\s|-)%(STUB)s%(VERSION)s)\.%(EXT)s$', 'win64': r'^%(BINARY_NAME)s(%(STUB_NEW)s|(?:\sSetup\s|-)%(STUB)s%(VERSION)s)\.%(EXT)s$', } return regex[self.platform] % { 'BINARY_NAME': APPLICATIONS_TO_BINARY_NAME.get(self.application, self.application), 'EXT': self.extension, 'STUB': 'Stub ' if self.is_stub_installer else '', 'STUB_NEW': ' Installer' if self.is_stub_installer else '', 'VERSION': self.version, }
https://github.com/mozilla/mozdownload/blob/97796a028455bb5200434562d23b66d5a5eb537b/mozdownload/scraper.py#L607-L624
parse binary file to custom class
python
def to_binary(s, encoding='utf8'): """Portable cast function. In python 2 the ``str`` function which is used to coerce objects to bytes does not accept an encoding argument, whereas python 3's ``bytes`` function requires one. :param s: object to be converted to binary_type :return: binary_type instance, representing s. """ if PY3: # pragma: no cover return s if isinstance(s, binary_type) else binary_type(s, encoding=encoding) return binary_type(s)
https://github.com/clld/clldutils/blob/7b8587ef5b56a2fc6cafaff90bc5004355c2b13f/src/clldutils/misc.py#L61-L72
parse binary file to custom class
python
def _parse(fileobj): """Parse fileobj for a shebang.""" fileobj.seek(0) try: part = fileobj.read(2) except UnicodeDecodeError: part = "" if part == "#!": shebang = shlex.split(fileobj.readline().strip()) if (platform.system() == "Windows" and len(shebang) and os.path.basename(shebang[0]) == "env"): return shebang[1:] return shebang return []
https://github.com/polysquare/python-parse-shebang/blob/18fddc6d987268edb031a2903c66820f5ad52902/parseshebang/__init__.py#L20-L37
parse binary file to custom class
python
def bin2bas(self, data): """ convert binary files to a ASCII basic string. Supported are: * Dragon DOS Binary Format * TODO: CoCo DECB (Disk Extended Color BASIC) Format see: http://archive.worldofdragon.org/phpBB3/viewtopic.php?f=8&t=348&p=10139#p10139 """ data = bytearray(data) binary_file = BinaryFile() binary_file.load_from_bin(data) if binary_file.file_type != 0x01: log.error("ERROR: file type $%02X is not $01 (tokenised BASIC)!", binary_file.file_type) ascii_lines = self.program_dump2ascii_lines(dump=binary_file.data, # FIXME: #program_start=bin.exec_address program_start=binary_file.load_address ) return "\n".join(ascii_lines)
https://github.com/6809/dragonlib/blob/faa4011e76c5857db96efdb4199e2fd49711e999/dragonlib/api.py#L156-L179
parse binary file to custom class
python
def _do_parse(inp, fmt, encoding, force_types): """Actually parse input. Args: inp: bytes yielding file-like object fmt: format to use for parsing encoding: encoding of `inp` force_types: if `True`, integers, floats, booleans and none/null are recognized and returned as proper types instead of strings; if `False`, everything is converted to strings if `None`, backend return value is used Returns: parsed `inp` (dict or list) containing unicode values Raises: various sorts of errors raised by used libraries while parsing """ res = {} _check_lib_installed(fmt, 'parse') if fmt == 'ini': cfg = configobj.ConfigObj(inp, encoding=encoding) res = cfg.dict() elif fmt == 'json': if six.PY3: # python 3 json only reads from unicode objects inp = io.TextIOWrapper(inp, encoding=encoding) res = json.load(inp, encoding=encoding) elif fmt == 'json5': if six.PY3: inp = io.TextIOWrapper(inp, encoding=encoding) res = json5.load(inp, encoding=encoding) elif fmt == 'toml': if not _is_utf8(encoding): raise AnyMarkupError('toml is always utf-8 encoded according to specification') if six.PY3: # python 3 toml prefers unicode objects inp = io.TextIOWrapper(inp, encoding=encoding) res = toml.load(inp) elif fmt == 'xml': res = xmltodict.parse(inp, encoding=encoding) elif fmt == 'yaml': # guesses encoding by its own, there seems to be no way to pass # it explicitly res = yaml.safe_load(inp) else: raise # unknown format # make sure it's all unicode and all int/float values were parsed correctly # the unicode part is here because of yaml on PY2 and also as workaround for # https://github.com/DiffSK/configobj/issues/18#issuecomment-76391689 return _ensure_proper_types(res, encoding, force_types)
https://github.com/bkabrda/anymarkup-core/blob/299935092fc2650cca4e32ec92441786918f9bab/anymarkup_core/__init__.py#L208-L259
parse binary file to custom class
python
def get_binary_stdio(stream): """ Return the specified standard input, output or errors stream as a 'raw' buffer object suitable for reading/writing binary data from/to it. """ assert stream in ['stdin', 'stdout', 'stderr'], 'invalid stream name' stdio = getattr(sys, stream) if sys.version_info[0] < 3: if sys.platform == 'win32': # set I/O stream binary flag on python2.x (Windows) runtime = platform.python_implementation() if runtime == 'PyPy': # the msvcrt trick doesn't work in pypy, so I use fdopen mode = 'rb' if stream == 'stdin' else 'wb' stdio = os.fdopen(stdio.fileno(), mode, 0) else: # this works with CPython -- untested on other implementations import msvcrt msvcrt.setmode(stdio.fileno(), os.O_BINARY) return stdio else: # get 'buffer' attribute to read/write binary data on python3.x if hasattr(stdio, 'buffer'): return stdio.buffer else: orig_stdio = getattr(sys, '__%s__' % stream) return orig_stdio.buffer
https://github.com/google/brotli/blob/4b2b2d4f83ffeaac7708e44409fe34896a01a278/python/bro.py#L21-L46
parse binary file to custom class
python
def binary(self): """Returns the binary that builds the pex for this lambda.""" dependencies = self.dependencies if len(dependencies) != 1: raise TargetDefinitionException(self, 'An app must define exactly one binary ' 'dependency, have: {}'.format(dependencies)) binary = dependencies[0] if not isinstance(binary, PythonBinary): raise TargetDefinitionException(self, 'Expected binary dependency to be a python_binary ' 'target, found {}'.format(binary)) return binary
https://github.com/pantsbuild/pants/blob/b72e650da0df685824ffdcc71988b8c282d0962d/contrib/awslambda/python/src/python/pants/contrib/awslambda/python/targets/python_awslambda.py#L49-L59
parse binary file to custom class
python
def parse(name, **kwargs): """ Parse a C/C++ file """ idx = clang.cindex.Index.create() assert os.path.exists(name) tu = idx.parse(name, **kwargs) return _ensure_parse_valid(tu)
https://github.com/AndrewWalker/glud/blob/57de000627fed13d0c383f131163795b09549257/glud/parsing.py#L35-L41
parse binary file to custom class
python
def parse(self, filelike, filename): """Parse the given file-like object and return its Module object.""" self.log = log self.source = filelike.readlines() src = "".join(self.source) # This may raise a SyntaxError: compile(src, filename, "exec") self.stream = TokenStream(StringIO(src)) self.filename = filename self.all = None self.future_imports = set() self._accumulated_decorators = [] return self.parse_module()
https://github.com/peterjc/flake8-rst-docstrings/blob/b8b17d0317fc6728d5586553ab29a7d97e6417fd/flake8_rst_docstrings.py#L587-L599
parse binary file to custom class
python
def conv(arg, source, target, filename, section): """Convert binary. Extract bytes in the given section from binary files and construct C source code that can be used to test as shellcode. Supported executable formats: ELF via pyelftools and PE via pefile. """ logging.info(_('This is Binary Conversion mode.')) section = section.encode('utf-8') if source == 'sec': arg = open(arg, 'rb') if source == 'sec': kwargs = dict(section_name=section) else: kwargs = dict() result = Converter.uni_from(source, arg, **kwargs).uni_to(target) if result: if filename: logging.info( _('Writing shellcode to the file: %s'), filename) mode = 'wb' if target == 'bin' else 'w' with open(filename, mode) as output: output.write(result) else: print(result) else: logging.error(_('Failed.')) if source == 'sec': arg.close() return 0
https://github.com/NoviceLive/intellicoder/blob/6cac5ebfce65c370dbebe47756a1789b120ef982/intellicoder/main.py#L392-L424
parse binary file to custom class
python
def extract(binary): ''' Extract a code object from a binary pyc file. :param binary: a sequence of bytes from a pyc file. ''' if len(binary) <= 8: raise Exception("Binary pyc must be greater than 8 bytes (got %i)" % len(binary)) magic = binary[:4] MAGIC = get_magic() if magic != MAGIC: raise Exception("Python version mismatch (%r != %r) Is this a pyc file?" % (magic, MAGIC)) modtime = time.asctime(time.localtime(struct.unpack('i', binary[4:8])[0])) code = marshal.loads(binary[8:]) return modtime, code
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/meta/bytecodetools/pyc_file.py#L20-L39
parse binary file to custom class
python
def parse_file(defn, preprocess=True): """ Parse a series of C definitions, returns a tuple of two type mappings, one for variable definitions and one for type definitions. """ if pycparser is None: raise ImportError("Please install pycparser in order to parse C definitions") defn = '\n'.join(x for x in defn.split('\n') if _include_re.match(x) is None) if preprocess: defn = do_preprocess(defn) preamble, ignoreme = make_preamble() node = pycparser.c_parser.CParser().parse(preamble + defn) if not isinstance(node, pycparser.c_ast.FileAST): raise ValueError("Something went horribly wrong using pycparser") out = {} extra_types = {} for piece in node.ext: if isinstance(piece, pycparser.c_ast.FuncDef): out[piece.decl.name] = _decl_to_type(piece.decl.type, extra_types) elif isinstance(piece, pycparser.c_ast.Decl): ty = _decl_to_type(piece.type, extra_types) if piece.name is not None: out[piece.name] = ty elif isinstance(piece, pycparser.c_ast.Typedef): extra_types[piece.name] = _decl_to_type(piece.type, extra_types) for ty in ignoreme: del extra_types[ty] return out, extra_types
https://github.com/angr/angr/blob/4e2f97d56af5419ee73bdb30482c8dd8ff5f3e40/angr/sim_type.py#L1033-L1064
parse binary file to custom class
python
def parse_files(self, fls): """Public method for parsing abricate output files. This method is called at at class instantiation for the provided output files. Additional abricate output files can be added using this method after the class instantiation. Parameters ---------- fls : list List of paths to Abricate files """ for f in fls: # Make sure paths exists if os.path.exists(f): self._parser(f) else: logger.warning("File {} does not exist".format(f))
https://github.com/assemblerflow/flowcraft/blob/fc3f4bddded1efc76006600016dc71a06dd908c0/flowcraft/templates/process_abricate.py#L131-L150
parse binary file to custom class
python
def parse_file(self,filename): """Parse file (helper function)""" try: return self.rProgram.ignore(cStyleComment).parseFile(filename, parseAll=True) except SemanticException as err: print(err) exit(3) except ParseException as err: print(err) exit(3)
https://github.com/pyparsing/pyparsing/blob/f0264bd8d1a548a50b3e5f7d99cfefd577942d14/examples/pymicko.py#L1297-L1306
parse binary file to custom class
python
def parse(self, filelike, filename): """Parse the given file-like object and return its Module object.""" self.log = log self.source = filelike.readlines() src = ''.join(self.source) try: compile(src, filename, 'exec') except SyntaxError as error: raise ParseError() from error self.stream = TokenStream(StringIO(src)) self.filename = filename self.dunder_all = None self.dunder_all_error = None self.future_imports = set() self._accumulated_decorators = [] return self.parse_module()
https://github.com/PyCQA/pydocstyle/blob/2549847f9efad225789f931e83dfe782418ca13e/src/pydocstyle/parser.py#L301-L316
parse binary file to custom class
python
def parse(self, raw): """Convert raw incoming to class attributes.""" self._raw = raw self.hub_name = self._parse("userData", "hubName", converter=base64_to_unicode) self.ip = self._parse("userData", "ip") self.ssid = self._parse("userData", "ssid")
https://github.com/sander76/aio-powerview-api/blob/08b6ac747aba9de19842359a981a7ff1292f5a6c/aiopvapi/hub.py#L20-L25
parse binary file to custom class
python
def _binary_file(self, file): """Dump the ocntent into the `file` in binary mode. """ if self.__text_is_expected: file = TextWrapper(file, self.__encoding) self.__dump_to_file(file)
https://github.com/fossasia/knittingpattern/blob/8e608896b0ab82fea1ca9fbfa2b4ee023d8c8027/knittingpattern/Dumper/file.py#L116-L121
parse binary file to custom class
python
def _get_binary(data, position, obj_end, opts, dummy1): """Decode a BSON binary to bson.binary.Binary or python UUID.""" length, subtype = _UNPACK_LENGTH_SUBTYPE(data[position:position + 5]) position += 5 if subtype == 2: length2 = _UNPACK_INT(data[position:position + 4])[0] position += 4 if length2 != length - 4: raise InvalidBSON("invalid binary (st 2) - lengths don't match!") length = length2 end = position + length if length < 0 or end > obj_end: raise InvalidBSON('bad binary object length') if subtype in (3, 4): # Java Legacy uuid_representation = opts.uuid_representation if uuid_representation == JAVA_LEGACY: java = data[position:end] value = uuid.UUID(bytes=java[0:8][::-1] + java[8:16][::-1]) # C# legacy elif uuid_representation == CSHARP_LEGACY: value = uuid.UUID(bytes_le=data[position:end]) # Python else: value = uuid.UUID(bytes=data[position:end]) return value, end # Python3 special case. Decode subtype 0 to 'bytes'. if PY3 and subtype == 0: value = data[position:end] else: value = Binary(data[position:end], subtype) return value, end
https://github.com/DataDog/integrations-core/blob/ebd41c873cf9f97a8c51bf9459bc6a7536af8acd/tokumx/datadog_checks/tokumx/vendor/bson/__init__.py#L190-L221
parse binary file to custom class
python
def parse_file(self, fpath): ''' Read a file on the file system (relative to salt's base project dir) :returns: A file-like object. :raises IOError: If the file cannot be found or read. ''' sdir = os.path.abspath(os.path.join(os.path.dirname(salt.__file__), os.pardir)) with open(os.path.join(sdir, fpath), 'rb') as f: return f.readlines()
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/doc/_ext/saltdomain.py#L40-L50
parse binary file to custom class
python
def binary(self): """Return the name of the build.""" def _get_binary(): # Retrieve all entries from the remote virtual folder parser = self._create_directory_parser(self.path) if not parser.entries: raise errors.NotFoundError('No entries found', self.path) # Download the first matched directory entry pattern = re.compile(self.binary_regex, re.IGNORECASE) for entry in parser.entries: try: self._binary = pattern.match(entry).group() break except Exception: # No match, continue with next entry continue else: raise errors.NotFoundError("Binary not found in folder", self.path) self._retry_check_404(_get_binary) return self._binary
https://github.com/mozilla/mozdownload/blob/97796a028455bb5200434562d23b66d5a5eb537b/mozdownload/scraper.py#L165-L188
parse binary file to custom class
python
def parse(self, filename: str, language: Optional[str]=None, contents: Optional[str]=None, mode: Optional[ModeType]=None, timeout: Optional[int]=None) -> ResultContext: """ Queries the Babelfish server and receives the UAST response for the specified file. :param filename: The path to the file. Can be arbitrary if contents \ is not None. :param language: The programming language of the file. Refer to \ https://doc.bblf.sh/languages.html for the list of \ currently supported languages. None means autodetect. :param contents: The contents of the file. IF None, it is read from \ filename. :param mode: UAST transformation mode. :param timeout: The request timeout in seconds. :type filename: str :type language: str :type contents: str :type timeout: float :return: UAST object. """ # TODO: handle syntax errors contents = self._get_contents(contents, filename) request = ParseRequest(filename=os.path.basename(filename), content=contents, mode=mode, language=self._scramble_language(language)) response = self._stub_v2.Parse(request, timeout=timeout) return ResultContext(response)
https://github.com/bblfsh/client-python/blob/815835d191d5e385973f3c685849cc3b46aa20a5/bblfsh/client.py#L56-L85
parse binary file to custom class
python
def is_binary(f): """Return True if binary mode.""" # NOTE: order matters here. We don't bail on Python 2 just yet. Both # codecs.open() and io.open() can open in text mode, both set the encoding # attribute. We must do that check first. # If it has a decoding attribute with a value, it is text mode. if getattr(f, "encoding", None): return False # Python 2 makes no further distinction. if not PY3: return True # If the file has a mode, and it contains b, it is binary. try: if 'b' in getattr(f, 'mode', ''): return True except TypeError: import gzip if isinstance(f, gzip.GzipFile): return True # in gzip mode is an integer raise # Can we sniff? try: f.seek(0, os.SEEK_CUR) except (AttributeError, IOError): return False # Finally, let's sniff by reading a byte. byte = f.read(1) f.seek(-1, os.SEEK_CUR) return hasattr(byte, 'decode')
https://github.com/btimby/fulltext/blob/9234cc1e2099209430e20317649549026de283ce/fulltext/__init__.py#L329-L362
parse binary file to custom class
python
def _select_binary_stream(self, name, urls): """Download a file from a list of urls, yielding a stream after downloading the file. URLs are tried in order until they succeed. :raises: :class:`BinaryToolFetcher.BinaryNotFound` if requests to all the given urls fail. """ downloaded_successfully = False accumulated_errors = [] for url in OrderedSet(urls): # De-dup URLS: we only want to try each URL once. logger.info('Attempting to fetch {name} binary from: {url} ...'.format(name=name, url=url)) try: with temporary_file() as dest: logger.debug("in BinaryToolFetcher: url={}, timeout_secs={}" .format(url, self._timeout_secs)) self._fetcher.download(url, listener=Fetcher.ProgressListener(), path_or_fd=dest, timeout_secs=self._timeout_secs) logger.info('Fetched {name} binary from: {url} .'.format(name=name, url=url)) downloaded_successfully = True dest.seek(0) yield dest break except (IOError, Fetcher.Error, ValueError) as e: accumulated_errors.append('Failed to fetch binary from {url}: {error}' .format(url=url, error=e)) if not downloaded_successfully: raise self.BinaryNotFound(name, accumulated_errors)
https://github.com/pantsbuild/pants/blob/b72e650da0df685824ffdcc71988b8c282d0962d/src/python/pants/binaries/binary_util.py#L195-L223
parse binary file to custom class
python
def parse(self): """ Parse file specified by constructor. """ f = open(self.parse_log_path, "r") self.parse2(f) f.close()
https://github.com/h2oai/h2o-3/blob/dd62aaa1e7f680a8b16ee14bc66b0fb5195c2ad8/h2o-r/scripts/build_dataset_from_parse_log.py#L100-L106
parse binary file to custom class
python
def get_binary(self): """Return a binary buffer containing the file content""" content_disp = 'Content-Disposition: form-data; name="file"; filename="{}"' stream = io.BytesIO() stream.write(_string_to_binary('--{}'.format(self.boundary))) stream.write(_crlf()) stream.write(_string_to_binary(content_disp.format(self.file_name))) stream.write(_crlf()) stream.write(_crlf()) stream.write(self.body) stream.write(_crlf()) stream.write(_string_to_binary('--{}--'.format(self.boundary))) stream.write(_crlf()) return stream.getvalue()
https://github.com/Knoema/knoema-python-driver/blob/e98b13db3e4df51c208c272e2977bfbe4c6e5532/knoema/api_client.py#L260-L276
parse binary file to custom class
python
def _ParseBinaryDataAsString(self, parser_mediator, binary_data_value): """Parses a binary data value as string Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. binary_data_value (bytes): binary data value (CSSM_DB_ATTRIBUTE_FORMAT_BLOB) Returns: str: binary data value formatted as a string or None if no string could be extracted or binary data value is None (NULL). """ if not binary_data_value: return None try: return binary_data_value.decode('utf-8') except UnicodeDecodeError: parser_mediator.ProduceExtractionWarning( 'invalid binary data string value: {0:s}'.format( repr(binary_data_value))) return None
https://github.com/log2timeline/plaso/blob/9c564698d2da3ffbe23607a3c54c0582ea18a6cc/plaso/parsers/mac_keychain.py#L735-L757
parse binary file to custom class
python
def get_binary_stream(name): """Returns a system stream for byte processing. This essentially returns the stream from the sys module with the given name but it solves some compatibility issues between different Python versions. Primarily this function is necessary for getting binary streams on Python 3. :param name: the name of the stream to open. Valid names are ``'stdin'``, ``'stdout'`` and ``'stderr'`` """ opener = binary_streams.get(name) if opener is None: raise TypeError('Unknown standard stream %r' % name) return opener()
https://github.com/pypa/pipenv/blob/cae8d76c210b9777e90aab76e9c4b0e53bb19cde/pipenv/vendor/click/utils.py#L264-L277
parse binary file to custom class
python
def binary_regex(self): """Return the regex for the binary.""" regex_base_name = (r'^(%(STUB_NEW)s|%(BINARY_NAME)s-.*\.%(LOCALE)s\.%(PLATFORM)s)') regex_suffix = {'linux': r'.*\.%(EXT)s$', 'linux64': r'.*\.%(EXT)s$', 'mac': r'.*\.%(EXT)s$', 'mac64': r'.*\.%(EXT)s$', 'win32': r'(\.installer%(STUB)s)?\.%(EXT)s$', 'win64': r'(\.installer%(STUB)s)?\.%(EXT)s$'} regex = regex_base_name + regex_suffix[self.platform] return regex % {'BINARY_NAME': APPLICATIONS_TO_BINARY_NAME.get(self.application, self.application), 'LOCALE': self.locale, 'PLATFORM': PLATFORM_FRAGMENTS[self.platform], 'STUB': '-stub' if self.is_stub_installer else '', 'STUB_NEW': 'setup' if self.is_stub_installer else '', 'EXT': self.extension}
https://github.com/mozilla/mozdownload/blob/97796a028455bb5200434562d23b66d5a5eb537b/mozdownload/scraper.py#L815-L833
parse binary file to custom class
python
def _parse_from_file(self, file_path): """ см. описание _parse_from_text. Только на вход подаётся не текст, а путь к файлу. """ file_path = abspath(file_path) if not isfile(file_path): raise MarkovTextExcept("Передан не файл.") with open(file_path, "rb") as txt_file: for line in txt_file: text = line.decode("utf-8", "ignore").strip() if not text: continue yield from self._parse_from_text(text)
https://github.com/NyashniyVladya/MarkovTextGenerator/blob/3d90e02a507939709773ef01c7ff3ec68b2b8d4b/MarkovTextGenerator/markov_text_generator.py#L382-L395
parse binary file to custom class
python
def parse_file(filename): """ Convenience method to parse a generic volumetric data file in the vasp like format. Used by subclasses for parsing file. Args: filename (str): Path of file to parse Returns: (poscar, data) """ poscar_read = False poscar_string = [] dataset = [] all_dataset = [] # for holding any strings in input that are not Poscar # or VolumetricData (typically augmentation charges) all_dataset_aug = {} dim = None dimline = None read_dataset = False ngrid_pts = 0 data_count = 0 poscar = None with zopen(filename, "rt") as f: for line in f: original_line = line line = line.strip() if read_dataset: toks = line.split() for tok in toks: if data_count < ngrid_pts: # This complicated procedure is necessary because # vasp outputs x as the fastest index, followed by y # then z. x = data_count % dim[0] y = int(math.floor(data_count / dim[0])) % dim[1] z = int(math.floor(data_count / dim[0] / dim[1])) dataset[x, y, z] = float(tok) data_count += 1 if data_count >= ngrid_pts: read_dataset = False data_count = 0 all_dataset.append(dataset) elif not poscar_read: if line != "" or len(poscar_string) == 0: poscar_string.append(line) elif line == "": poscar = Poscar.from_string("\n".join(poscar_string)) poscar_read = True elif not dim: dim = [int(i) for i in line.split()] ngrid_pts = dim[0] * dim[1] * dim[2] dimline = line read_dataset = True dataset = np.zeros(dim) elif line == dimline: # when line == dimline, expect volumetric data to follow # so set read_dataset to True read_dataset = True dataset = np.zeros(dim) else: # store any extra lines that were not part of the # volumetric data so we know which set of data the extra # lines are associated with key = len(all_dataset) - 1 if key not in all_dataset_aug: all_dataset_aug[key] = [] all_dataset_aug[key].append(original_line) if len(all_dataset) == 4: data = {"total": all_dataset[0], "diff_x": all_dataset[1], "diff_y": all_dataset[2], "diff_z": all_dataset[3]} data_aug = {"total": all_dataset_aug.get(0, None), "diff_x": all_dataset_aug.get(1, None), "diff_y": all_dataset_aug.get(2, None), "diff_z": all_dataset_aug.get(3, None)} # construct a "diff" dict for scalar-like magnetization density, # referenced to an arbitrary direction (using same method as # pymatgen.electronic_structure.core.Magmom, see # Magmom documentation for justification for this) # TODO: re-examine this, and also similar behavior in # Magmom - @mkhorton # TODO: does CHGCAR change with different SAXIS? diff_xyz = np.array([data["diff_x"], data["diff_y"], data["diff_z"]]) diff_xyz = diff_xyz.reshape((3, dim[0] * dim[1] * dim[2])) ref_direction = np.array([1.01, 1.02, 1.03]) ref_sign = np.sign(np.dot(ref_direction, diff_xyz)) diff = np.multiply(np.linalg.norm(diff_xyz, axis=0), ref_sign) data["diff"] = diff.reshape((dim[0], dim[1], dim[2])) elif len(all_dataset) == 2: data = {"total": all_dataset[0], "diff": all_dataset[1]} data_aug = {"total": all_dataset_aug.get(0, None), "diff": all_dataset_aug.get(1, None)} else: data = {"total": all_dataset[0]} data_aug = {"total": all_dataset_aug.get(0, None)} return poscar, data, data_aug
https://github.com/materialsproject/pymatgen/blob/4ca558cf72f8d5f8a1f21dfdfc0181a971c186da/pymatgen/io/vasp/outputs.py#L2838-L2938
parse binary file to custom class
python
def parse(self, filename_or_file, initialize=True): """ Load manifest from file or file object """ if isinstance(filename_or_file, (str, unicode)): filename = filename_or_file else: filename = filename_or_file.name try: domtree = minidom.parse(filename_or_file) except xml.parsers.expat.ExpatError, e: args = [e.args[0]] if isinstance(filename, unicode): filename = filename.encode(sys.getdefaultencoding(), "replace") args.insert(0, '\n File "%s"\n ' % filename) raise ManifestXMLParseError(" ".join([str(arg) for arg in args])) if initialize: self.__init__() self.filename = filename self.load_dom(domtree, False)
https://github.com/bwhite/hadoopy/blob/ff39b4e6d4e6efaf1f571cf0f2c0e0d7ab28c2d6/hadoopy/thirdparty/pyinstaller/PyInstaller/utils/winmanifest.py#L702-L719
parse binary file to custom class
python
def bin_hex_type(arg): """An argparse type representing binary data encoded in hex.""" if re.match(r'^[a-f0-9]{2}(:[a-f0-9]{2})+$', arg, re.I): arg = arg.replace(':', '') elif re.match(r'^(\\x[a-f0-9]{2})+$', arg, re.I): arg = arg.replace('\\x', '') try: arg = binascii.a2b_hex(arg) except (binascii.Error, TypeError): raise argparse.ArgumentTypeError("{0} is invalid hex data".format(repr(arg))) return arg
https://github.com/zeroSteiner/smoke-zephyr/blob/a6d2498aeacc72ee52e7806f783a4d83d537ffb2/smoke_zephyr/argparse_types.py#L86-L96
parse binary file to custom class
python
def parsefile(self, filename): """Parse from the file """ with open(filename, 'rb') as fd: return self.parse(fd.read())
https://github.com/lipixun/pymime/blob/4762cf2e51ba80c21d872f26b8e408b6a6863d26/src/mime/tools/specxmlparser.py#L44-L48
parse binary file to custom class
python
def _parse_from_file(self, filepath, fname, dependencies, recursive, greedy): """Parses the specified string to load the modules *from scratch* as opposed to loading pickled versions from the file cache.""" #Now that we have the file contents, we can parse them using the parsers string = self.tramp.read(filepath) pmodules = self.modulep.parse(string, self, filepath=filepath) file_mtime = self._get_mod_mtime(filepath) for module in pmodules: module.change_time = file_mtime self.modules[module.name.lower()] = module self._modulefiles[fname].append(module.name.lower()) pprograms = self.modulep.parse(string, self, False) for program in pprograms: program.change_time = file_mtime self.programs[program.name.lower()] = program self._programfiles[fname].append(program.name.lower()) #There may be xml files for the docstrings that also need to be parsed. self._parse_docstrings(filepath) return (pmodules, pprograms)
https://github.com/rosenbrockc/fortpy/blob/1ed0757c52d549e41d9d44bdea68cb89529293a5/fortpy/code.py#L148-L171
parse binary file to custom class
python
def parse_file(self, filename, encoding=None, debug=False): """Parse a file and return the syntax tree.""" stream = codecs.open(filename, "r", encoding) try: return self.parse_stream(stream, debug) finally: stream.close()
https://github.com/fabioz/PyDev.Debugger/blob/ed9c4307662a5593b8a7f1f3389ecd0e79b8c503/third_party/pep8/lib2to3/lib2to3/pgen2/driver.py#L95-L101
parse binary file to custom class
python
def parse(cls, filename, root=None): """Parses the file at filename and returns a PythonFile. If root is specified, it will open the file with root prepended to the path. The idea is to allow for errors to contain a friendlier file path than the full absolute path. """ if root is not None: if os.path.isabs(filename): raise ValueError("filename must be a relative path if root is specified") full_filename = os.path.join(root, filename) else: full_filename = filename with io.open(full_filename, 'rb') as fp: blob = fp.read() tree = cls._parse(blob, filename) return cls(blob=blob, tree=tree, root=root, filename=filename)
https://github.com/pantsbuild/pants/blob/b72e650da0df685824ffdcc71988b8c282d0962d/contrib/python/src/python/pants/contrib/python/checks/checker/common.py#L128-L145
parse binary file to custom class
python
def parse_filespec(fspec, sep=':', gpat='*'): """ Parse given filespec `fspec` and return [(filetype, filepath)]. Because anyconfig.load should find correct file's type to load by the file extension, this function will not try guessing file's type if not file type is specified explicitly. :param fspec: filespec :param sep: a char separating filetype and filepath in filespec :param gpat: a char for glob pattern >>> parse_filespec("base.json") [('base.json', None)] >>> parse_filespec("json:base.json") [('base.json', 'json')] >>> parse_filespec("yaml:foo.yaml") [('foo.yaml', 'yaml')] >>> parse_filespec("yaml:foo.dat") [('foo.dat', 'yaml')] TODO: Allow '*' (glob pattern) in filepath when escaped with '\\', etc. # >>> parse_filespec("yaml:bar/*.conf") # [('bar/a.conf', 'yaml'), ('bar/b.conf', 'yaml')] """ if sep in fspec: tpl = (ftype, fpath) = tuple(fspec.split(sep)) else: tpl = (ftype, fpath) = (None, fspec) return [(fs, ftype) for fs in sorted(glob.glob(fpath))] \ if gpat in fspec else [flip(tpl)]
https://github.com/ssato/python-anytemplate/blob/3e56baa914bd47f044083b20e33100f836443596/anytemplate/utils.py#L125-L156
parse binary file to custom class
python
def parse_file(self, inputstring, addhash=True): """Parse file code.""" if addhash: use_hash = self.genhash(False, inputstring) else: use_hash = None return self.parse(inputstring, self.file_parser, {"nl_at_eof_check": True}, {"header": "file", "use_hash": use_hash})
https://github.com/evhub/coconut/blob/ff97177344e7604e89a0a98a977a87ed2a56fc6d/coconut/compiler/compiler.py#L1961-L1967
parse binary file to custom class
python
def gen_binary_files_from_urls( urls: Iterable[str], on_disk: bool = False, show_info: bool = True) -> Generator[BinaryIO, None, None]: """ Generate binary files from a series of URLs (one per URL). Args: urls: iterable of URLs on_disk: if ``True``, yields files that are on disk (permitting random access); if ``False``, yields in-memory files (which will not permit random access) show_info: show progress to the log? Yields: files, each of type :class:`BinaryIO` """ for url in urls: if on_disk: # Necessary for e.g. zip processing (random access) with tempfile.TemporaryDirectory() as tmpdir: filename = os.path.join(tmpdir, "tempfile") download(url=url, filename=filename) with open(filename, 'rb') as f: yield f else: if show_info: log.info("Reading from URL: {}", url) with urllib.request.urlopen(url) as f: yield f if show_info: log.info("... finished reading from URL: {}", url)
https://github.com/RudolfCardinal/pythonlib/blob/0b84cb35f38bd7d8723958dae51b480a829b7227/cardinal_pythonlib/network.py#L134-L166
parse binary file to custom class
python
def parse_bss(bss): """Parse data prepared by nla_parse() and nla_parse_nested() into Python-friendly formats. Automatically chooses the right data-type for each attribute and converts it into Python integers, strings, unicode, etc objects. Positional arguments: bss -- dictionary with integer keys and nlattr values. Returns: New dictionary with the same integer keys and converted values. Excludes null/empty data from `bss`. """ # First parse data into Python data types. Weed out empty values. intermediate = dict() _get(intermediate, bss, 'NL80211_BSS_BSSID', libnl.attr.nla_data) # MAC address of access point. _get(intermediate, bss, 'NL80211_BSS_FREQUENCY', libnl.attr.nla_get_u32) # Frequency in MHz. _get(intermediate, bss, 'NL80211_BSS_TSF', libnl.attr.nla_get_msecs) # Timing Synchronization Function. _get(intermediate, bss, 'NL80211_BSS_BEACON_INTERVAL', libnl.attr.nla_get_u16) _get(intermediate, bss, 'NL80211_BSS_CAPABILITY', libnl.attr.nla_get_u16) _get(intermediate, bss, 'NL80211_BSS_INFORMATION_ELEMENTS', libnl.attr.nla_data) _get(intermediate, bss, 'NL80211_BSS_SIGNAL_MBM', libnl.attr.nla_get_u32) _get(intermediate, bss, 'NL80211_BSS_SIGNAL_UNSPEC', libnl.attr.nla_get_u8) _get(intermediate, bss, 'NL80211_BSS_STATUS', libnl.attr.nla_get_u32) _get(intermediate, bss, 'NL80211_BSS_SEEN_MS_AGO', libnl.attr.nla_get_u32) _get(intermediate, bss, 'NL80211_BSS_BEACON_IES', libnl.attr.nla_data) # Parse easy data into final Python types. parsed = dict() if 'bssid' in intermediate: parsed['bssid'] = ':'.join(format(x, '02x') for x in intermediate['bssid'][:6]) if 'frequency' in intermediate: parsed['frequency'] = intermediate['frequency'] if 'tsf' in intermediate: parsed['tsf'] = timedelta(microseconds=intermediate['tsf']) if 'beacon_interval' in intermediate: parsed['beacon_interval'] = intermediate['beacon_interval'] if 'signal_mbm' in intermediate: data_u32 = intermediate['signal_mbm'] data_s32 = -(data_u32 & 0x80000000) + (data_u32 & 0x7fffffff) parsed['signal_mbm'] = data_s32 / 100.0 if 'signal_unspec' in intermediate: parsed['signal_unspec'] = intermediate['signal_unspec'] / 100.0 if 'seen_ms_ago' in intermediate: parsed['seen_ms_ago'] = timedelta(milliseconds=intermediate['seen_ms_ago']) # Handle status. if intermediate.get('status') == nl80211.NL80211_BSS_STATUS_AUTHENTICATED: parsed['status'] = 'authenticated' elif intermediate.get('status') == nl80211.NL80211_BSS_STATUS_ASSOCIATED: parsed['status'] = 'associated' elif intermediate.get('status') == nl80211.NL80211_BSS_STATUS_IBSS_JOINED: parsed['status'] = 'joined' elif 'status' in intermediate: parsed['status'] = 'unknown status: {0}'.format(intermediate['status']) # Handle capability. if 'capability' in intermediate: # http://git.kernel.org/cgit/linux/kernel/git/jberg/iw.git/tree/scan.c?id=v3.17#n1479 data = intermediate['capability'] list_of_caps = list() if parsed['frequency'] > 45000: if data & iw_scan.WLAN_CAPABILITY_DMG_TYPE_MASK == iw_scan.WLAN_CAPABILITY_DMG_TYPE_AP: list_of_caps.append('DMG_ESS') elif data & iw_scan.WLAN_CAPABILITY_DMG_TYPE_MASK == iw_scan.WLAN_CAPABILITY_DMG_TYPE_PBSS: list_of_caps.append('DMG_PCP') elif data & iw_scan.WLAN_CAPABILITY_DMG_TYPE_MASK == iw_scan.WLAN_CAPABILITY_DMG_TYPE_IBSS: list_of_caps.append('DMG_IBSS') if data & iw_scan.WLAN_CAPABILITY_DMG_CBAP_ONLY: list_of_caps.append('CBAP_Only') if data & iw_scan.WLAN_CAPABILITY_DMG_CBAP_SOURCE: list_of_caps.append('CBAP_Src') if data & iw_scan.WLAN_CAPABILITY_DMG_PRIVACY: list_of_caps.append('Privacy') if data & iw_scan.WLAN_CAPABILITY_DMG_ECPAC: list_of_caps.append('ECPAC') if data & iw_scan.WLAN_CAPABILITY_DMG_SPECTRUM_MGMT: list_of_caps.append('SpectrumMgmt') if data & iw_scan.WLAN_CAPABILITY_DMG_RADIO_MEASURE: list_of_caps.append('RadioMeasure') else: if data & iw_scan.WLAN_CAPABILITY_ESS: list_of_caps.append('ESS') if data & iw_scan.WLAN_CAPABILITY_IBSS: list_of_caps.append('IBSS') if data & iw_scan.WLAN_CAPABILITY_CF_POLLABLE: list_of_caps.append('CfPollable') if data & iw_scan.WLAN_CAPABILITY_CF_POLL_REQUEST: list_of_caps.append('CfPollReq') if data & iw_scan.WLAN_CAPABILITY_PRIVACY: list_of_caps.append('Privacy') if data & iw_scan.WLAN_CAPABILITY_SHORT_PREAMBLE: list_of_caps.append('ShortPreamble') if data & iw_scan.WLAN_CAPABILITY_PBCC: list_of_caps.append('PBCC') if data & iw_scan.WLAN_CAPABILITY_CHANNEL_AGILITY: list_of_caps.append('ChannelAgility') if data & iw_scan.WLAN_CAPABILITY_SPECTRUM_MGMT: list_of_caps.append('SpectrumMgmt') if data & iw_scan.WLAN_CAPABILITY_QOS: list_of_caps.append('QoS') if data & iw_scan.WLAN_CAPABILITY_SHORT_SLOT_TIME: list_of_caps.append('ShortSlotTime') if data & iw_scan.WLAN_CAPABILITY_APSD: list_of_caps.append('APSD') if data & iw_scan.WLAN_CAPABILITY_RADIO_MEASURE: list_of_caps.append('RadioMeasure') if data & iw_scan.WLAN_CAPABILITY_DSSS_OFDM: list_of_caps.append('DSSS-OFDM') if data & iw_scan.WLAN_CAPABILITY_DEL_BACK: list_of_caps.append('DelayedBACK') if data & iw_scan.WLAN_CAPABILITY_IMM_BACK: list_of_caps.append('ImmediateBACK') parsed['capability'] = list_of_caps # Handle (beacon) information elements. for k in ('information_elements', 'beacon_ies'): if k not in intermediate: continue parsed[k] = iw_scan.get_ies(intermediate[k]) # Make some data more human-readable. parsed['signal'] = parsed.get('signal_mbm', parsed.get('signal_unspec')) parsed['channel'] = _fetch(parsed, 'DS Parameter set') parsed['ssid'] = _fetch(parsed, 'SSID') or _fetch(parsed, 'MESH ID') or '' parsed['supported_rates'] = _fetch(parsed, 'Supported rates') parsed['extended_supported_rates'] = _fetch(parsed, 'Extended supported rates') parsed['channel_width'] = _fetch(parsed, 'HT operation', 'STA channel width') return parsed
https://github.com/Robpol86/libnl/blob/274e9fdaa39822d06ef70b799ed4a95937a4d923/libnl/nl80211/helpers.py#L48-L175
parse binary file to custom class
python
def compile_binary(source): """ Prepare chkrootkit binary $ tar xzvf chkrootkit.tar.gz $ cd chkrootkit-0.52 $ make sense sudo mv chkrootkit-0.52 /usr/local/chkrootkit sudo ln -s """ cmd = 'make sense' src = '/usr/local/bin/chkrootkit' dst = '/usr/local/chkrootkit/chkrootkit' # Tar Extraction t = tarfile.open(source, 'r') t.extractall(TMPDIR) if isinstance(t.getnames(), list): extract_dir = t.getnames()[0].split('/')[0] os.chdir(TMPDIR + '/' + extract_dir) logger.info('make output: \n%s' % subprocess.getoutput(cmd)) # move directory in place mv_cmd = 'sudo mv %s /usr/local/chkrootkit' % (TMPDIR + '/' + extract_dir) subprocess.getoutput(mv_cmd) # create symlink to binary in directory os.symlink(dst, src) return True return False
https://github.com/fstab50/metal/blob/0488bbdd516a508909267cc44191f632e21156ba/metal/chk_standalone.py#L45-L70
parse binary file to custom class
python
def parse(ifp, pb_cls, **kwargs): """Parse a stream. Args: ifp (string or file-like object): input stream. pb_cls (protobuf.message.Message.__class__): The class object of the protobuf message type encoded in the stream. """ mode = 'rb' if isinstance(ifp, str): istream = open(ifp, mode=mode, **kwargs) else: istream = open(fileobj=ifp, mode=mode, **kwargs) with istream: for data in istream: pb_obj = pb_cls() pb_obj.ParseFromString(data) yield pb_obj
https://github.com/cartoonist/pystream-protobuf/blob/40e70b932436887b748905e5e0a82839e4c559f0/stream/stream.py#L19-L36
parse binary file to custom class
python
def parse_args(): """Parses the arguments and options.""" parser = argparse.ArgumentParser( prog="geneparse-indexer", description="Genotype file indexer." ) # IMPUTE2 files group = parser.add_argument_group("IMPUTE2 index") group.add_argument( "--impute2", metavar="IMPUTE2", type=str, nargs="+", help="Index an IMPUTE2 genotype file format. The file can be plain " "text or bgzipped.", ) # BGEN files group = parser.add_argument_group("BGEN index") group.add_argument( "--bgen", metavar="BGEN", type=str, nargs="+", help="Index a BGEN genotype file. This requires 'bgenix' to be in the " "PATH.", ) group.add_argument( "--legacy", action="store_true", help="Index the file using the '-with-rowid' option. This flag " "enables compatibility with SQLITE prior to version 3.8.2. See " "https://bitbucket.org/gavinband/bgen/wiki/bgenix for more " "information.", ) return parser.parse_args()
https://github.com/pgxcentre/geneparse/blob/f698f9708af4c7962d384a70a5a14006b1cb7108/geneparse/index/__main__.py#L94-L124
parse binary file to custom class
python
def _parse_file(cls, value): """Represents value as a string, allowing including text from nearest files using `file:` directive. Directive is sandboxed and won't reach anything outside directory with setup.py. Examples: file: README.rst, CHANGELOG.md, src/file.txt :param str value: :rtype: str """ include_directive = 'file:' if not isinstance(value, string_types): return value if not value.startswith(include_directive): return value spec = value[len(include_directive):] filepaths = (os.path.abspath(path.strip()) for path in spec.split(',')) return '\n'.join( cls._read_file(path) for path in filepaths if (cls._assert_local(path) or True) and os.path.isfile(path) )
https://github.com/pypa/setuptools/blob/83c667e0b2a98193851c07115d1af65011ed0fb6/setuptools/config.py#L270-L298
parse binary file to custom class
python
def compile_binary(source): """ Prepare chkrootkit binary $ tar xzvf chkrootkit.tar.gz $ cd chkrootkit-0.52 $ make sense sudo mv chkrootkit-0.52 /usr/local/chkrootkit sudo ln -s """ cmd = 'make sense' slink = '/usr/local/bin/chkrootkit' target = '/usr/local/chkrootkit/chkrootkit' # Tar Extraction t = tarfile.open(source, 'r') t.extractall(TMPDIR) if isinstance(t.getnames(), list): extract_dir = t.getnames()[0].split('/')[0] os.chdir(TMPDIR + '/' + extract_dir) logger.info('make output: \n%s' % subprocess.getoutput(cmd)) # move directory in place os.rename(TMPDIR + '/' + extract_dir, 'usr/local/chkrootkit') # create symlink to binary in directory os.symlink(target, slink) return True return False
https://github.com/fstab50/metal/blob/0488bbdd516a508909267cc44191f632e21156ba/metal/chkrootkit.py#L46-L70
parse binary file to custom class
python
def fetch_binary(self, fetch_request): """Fulfill a binary fetch request.""" bootstrap_dir = os.path.realpath(os.path.expanduser(self._bootstrap_dir)) bootstrapped_binary_path = os.path.join(bootstrap_dir, fetch_request.download_path) logger.debug("bootstrapped_binary_path: {}".format(bootstrapped_binary_path)) file_name = fetch_request.file_name urls = fetch_request.urls if self._ignore_cached_download or not os.path.exists(bootstrapped_binary_path): self._do_fetch(bootstrapped_binary_path, file_name, urls) logger.debug('Selected {binary} binary bootstrapped to: {path}' .format(binary=file_name, path=bootstrapped_binary_path)) return bootstrapped_binary_path
https://github.com/pantsbuild/pants/blob/b72e650da0df685824ffdcc71988b8c282d0962d/src/python/pants/binaries/binary_util.py#L231-L244
parse binary file to custom class
python
def _parse(self, source, name, filename): """Internal parsing function used by `parse` and `compile`.""" return Parser(self, source, name, _encode_filename(filename)).parse()
https://github.com/kivy/python-for-android/blob/8e0e8056bc22e4d5bd3398a6b0301f38ff167933/pythonforandroid/bootstraps/pygame/build/buildlib/jinja2.egg/jinja2/environment.py#L388-L390
parse binary file to custom class
python
def _parse (self): """Parse the BDF mime structure and record the locations of the binary blobs. Sets up various data fields in the BDFData object.""" feedparser = FeedParser (Message) binarychunks = {} sizeinfo = None headxml = None self.fp.seek (0, 0) while True: data = self.fp.readline () if not data: break feedparser.feed (data) skip = (data == '\n' and len (feedparser._msgstack) == 3 and feedparser._msgstack[-1].get_content_type () in ('application/octet-stream', 'binary/octet-stream')) if skip: # We just finished reading the headers for a huge binary blob. # Time to remember where the data chunk is and pretend it doesn't # exist. msg = feedparser._msgstack[-1] ident = msg['Content-Location'] assert ident.endswith ('.bin'), 'confusion #1 in hacky MIME parsing!' binarychunks[ident] = self.fp.tell () if sizeinfo is None: headxml, sizeinfo, tagpfx = _extract_size_info (feedparser) kind = ident.split ('/')[-1] assert kind in sizeinfo, 'no size info for binary chunk kind %s in MIME!' % kind self.fp.seek (sizeinfo[kind] + 1, 1) # skip ahead by data chunk size sample = self.fp.read (16) assert sample.startswith ('--MIME'), 'crap, unexpected chunk size in MIME parsing: %r' % sample self.fp.seek (-16, 1) # go back # check that two major kinds of data are read at least once if any([k.split('/')[3] == '3' for k in binarychunks.iterkeys()]): break if headxml is None: raise RuntimeError ('never found any binary data') self.mimemsg = feedparser.close () self.headxml = headxml self.sizeinfo = sizeinfo self.binarychunks = binarychunks headsize, intsize = self.calc_intsize() # Compute some miscellaneous parameters that we'll need. # self.n_integrations = len (self.mimemsg.get_payload ()) - 1 self.n_integrations = os.stat(self.fp.name).st_size/intsize self.n_antennas = int (headxml.find (tagpfx + nanttag).text) self.n_baselines = (self.n_antennas * (self.n_antennas - 1)) // 2 ds = headxml.find (tagpfx + dstag) nbb = 0 nspw = 0 nchan = 0 crosspolstr = None for bb in ds.findall (tagpfx + basebandtag): nbb += 1 for spw in bb.getchildren (): nspw += 1 nchan += int (spw.get ('numSpectralPoint')) if crosspolstr is None: crosspolstr = spw.get ('crossPolProducts') elif spw.get ('crossPolProducts') != crosspolstr: raise Exception ('can only handle spectral windows with identical cross pol products') self.n_basebands = nbb self.n_spws = nspw self.n_channels = nchan self.crosspols = crosspolstr.split () self.n_pols = len(self.crosspols) # if bdf info pkl not present, write it if os.path.exists(os.path.dirname(self.pklname)) and self.pklname and (not os.path.exists(self.pklname)): logger.info('Writing bdf pkl info to %s...' % (self.pklname)) with open(self.pklname,'wb') as pkl: # Compute some miscellaneous parameters that we'll need. pickle.dump( (self.mimemsg, self.headxml, self.sizeinfo, self.binarychunks, self.n_integrations, self.n_antennas, self.n_baselines, self.n_basebands, self.n_spws, self.n_channels, self.crosspols), pkl) return self
https://github.com/caseyjlaw/sdmreader/blob/b6c3498f1915138727819715ee00d2c46353382d/sdmreader/sdmreader.py#L351-L440
parse binary file to custom class
python
def _parse(self, source, name, filename): """Internal parsing function used by `parse` and `compile`.""" return Parser(self, source, name, encode_filename(filename)).parse()
https://github.com/pypa/pipenv/blob/cae8d76c210b9777e90aab76e9c4b0e53bb19cde/pipenv/vendor/jinja2/environment.py#L495-L497
parse binary file to custom class
python
def parse(self, stream, parser=None): """Parse the given file using available `BaseParser` instances. Raises: TypeError: when the parser argument is not a string or None. ValueError: when the parser argument is a string that does not name a `BaseParser`. """ force, parsers = self._get_parsers(parser) try: stream.seek(0) lookup = stream.read(1024) stream.seek(0) except (io.UnsupportedOperation, AttributeError): lookup = None for p in parsers: if p.hook(path=self.path, force=force, lookup=lookup): self.meta, self.terms, self.imports, self.typedefs = p.parse(stream) self._parsed_by = p.__name__ break
https://github.com/althonos/pronto/blob/a768adcba19fb34f26f67cde4a03d317f932c274/pronto/ontology.py#L204-L226
parse binary file to custom class
python
def parse(): """Parses all the modules in the library specified by the script args. """ from fortpy.code import CodeParser c = CodeParser() if args["verbose"]: c.verbose = True f90files = {} c.scan_path(args["source"], f90files) for fname, fpath in f90files.items(): if fname not in c._modulefiles: c._modulefiles[fname] = [] c._parse_from_file(fpath, fname, args["recursive"], args["recursive"], False) return c
https://github.com/rosenbrockc/fortpy/blob/1ed0757c52d549e41d9d44bdea68cb89529293a5/fortpy/scripts/bestprac.py#L4-L20
parse binary file to custom class
python
def parse_file(self, name): """Parse the content of a file. See 'parse' method for information. :param name: the pathname of the file to parse :return: True on success (no error detected), False otherwise """ with open(name, "rb") as fp: return self.parse(fp.read())
https://github.com/tonioo/sievelib/blob/88822d1f1daf30ef3dd9ac74911301b0773ef3c8/sievelib/parser.py#L443-L452
parse binary file to custom class
python
def generate_binary(outputfname, format_, progname='', binary_files=None, headless_binary_files=None): """ Outputs the memory binary to the output filename using one of the given formats: tap, tzx or bin """ global AUTORUN_ADDR org, binary = MEMORY.dump() if gl.has_errors: return if binary_files is None: binary_files = [] if headless_binary_files is None: headless_binary_files = [] bin_blocks = [] for fname in binary_files: with api.utils.open_file(fname) as f: bin_blocks.append((os.path.basename(fname), f.read())) headless_bin_blocks = [] for fname in headless_binary_files: with api.utils.open_file(fname) as f: headless_bin_blocks.append(f.read()) if AUTORUN_ADDR is None: AUTORUN_ADDR = org if not progname: progname = os.path.basename(outputfname)[:10] if OPTIONS.use_loader.value: import basic # Minimalist basic tokenizer program = basic.Basic() if org > 16383: # Only for zx48k: CLEAR if below 16383 program.add_line([['CLEAR', org - 1]]) program.add_line([['LOAD', '""', program.token('CODE')]]) if OPTIONS.autorun.value: program.add_line([['RANDOMIZE', program.token('USR'), AUTORUN_ADDR]]) else: program.add_line([['REM'], ['RANDOMIZE', program.token('USR'), AUTORUN_ADDR]]) if format_ in ('tap', 'tzx'): t = {'tap': outfmt.TAP, 'tzx': outfmt.TZX}[format_]() if OPTIONS.use_loader.value: t.save_program('loader', program.bytes, line=1) # Put line 0 to protect against MERGE t.save_code(progname, org, binary) for name, block in bin_blocks: t.save_code(name, 0, block) for block in headless_bin_blocks: t.standard_block(block) t.dump(outputfname) else: with open(outputfname, 'wb') as f: f.write(bytearray(binary))
https://github.com/boriel/zxbasic/blob/23b28db10e41117805bdb3c0f78543590853b132/asmparse.py#L1442-L1504
parse binary file to custom class
python
def parse(self, file_name): """ Parse entire file and return relevant object. :param file_name: File path :type file_name: str :return: Parsed object """ self.object = self.parsed_class() with open(file_name, encoding='utf-8') as f: self.parse_str(f.read()) return self.object
https://github.com/OpenHydrology/floodestimation/blob/782da7c5abd1348923129efe89fb70003ebb088c/floodestimation/parsers.py#L95-L106
parse binary file to custom class
python
def write_binary(filename, data): """Create path to filename and saves binary data""" dir = os.path.dirname(filename) if not os.path.exists(dir): os.makedirs(dir) with open(filename, 'wb') as f: f.write(data)
https://github.com/vasilcovsky/pytinypng/blob/ac633e4aa41122c49a806f411e43a76d8f73058e/pytinypng/utils.py#L22-L28