code
stringlengths
59
4.4k
docstring
stringlengths
5
7.69k
def subdivide(self, points_per_edge): if len(self.coords) <= 1 or points_per_edge < 1: return self.deepcopy() coords = interpolate_points(self.coords, nb_steps=points_per_edge, closed=False) return self.deepcopy(coords=coords)
Adds ``N`` interpolated points with uniform spacing to each edge. For each edge between points ``A`` and ``B`` this adds points at ``A + (i/(1+N)) * (B - A)``, where ``i`` is the index of the added point and ``N`` is the number of points to add per edge. Calling this method two times will split each edge at its center and then again split each newly created edge at their center. It is equivalent to calling `subdivide(3)`. Parameters ---------- points_per_edge : int Number of points to interpolate on each edge. Returns ------- LineString Line string with subdivided edges.
def do_notify(context, event_type, payload): LOG.debug('IP_BILL: notifying {}'.format(payload)) notifier = n_rpc.get_notifier('network') notifier.info(context, event_type, payload)
Generic Notifier. Parameters: - `context`: session context - `event_type`: the event type to report, i.e. ip.usage - `payload`: dict containing the payload to send
def find_working_password(self, usernames=None, host_strings=None): r = self.local_renderer if host_strings is None: host_strings = [] if not host_strings: host_strings.append(self.genv.host_string) if usernames is None: usernames = [] if not usernames: usernames.append(self.genv.user) for host_string in host_strings: for username in usernames: passwords = [] passwords.append(self.genv.user_default_passwords[username]) passwords.append(self.genv.user_passwords[username]) passwords.append(self.env.default_password) for password in passwords: with settings(warn_only=True): r.env.host_string = host_string r.env.password = password r.env.user = username ret = r._local("sshpass -p '{password}' ssh -o StrictHostKeyChecking=no {user}@{host_string} echo hello", capture=True) if ret.return_code in (1, 6) or 'hello' in ret: return host_string, username, password raise Exception('No working login found.')
Returns the first working combination of username and password for the current host.
def on_goto_out_of_doc(self, assignment): editor = self.open_file(assignment.module_path) if editor: TextHelper(editor).goto_line(assignment.line, assignment.column)
Open the a new tab when goto goes out of the current document. :param assignment: Destination
def _bind_success(self, stanza): payload = stanza.get_payload(ResourceBindingPayload) jid = payload.jid if not jid: raise BadRequestProtocolError(u"<jid/> element mising in" " the bind response") self.stream.me = jid self.stream.event(AuthorizedEvent(self.stream.me))
Handle resource binding success. [initiating entity only] :Parameters: - `stanza`: <iq type="result"/> stanza received. Set `streambase.StreamBase.me` to the full JID negotiated.
def add(name, path, branch, type): if not name and not path: ctx = click.get_current_context() click.echo(ctx.get_help()) examples = ( '\nExamples:\n' ' cpenv module add my_module ./path/to/my_module\n' ' cpenv module add my_module git@github.com:user/my_module.git' ' cpenv module add my_module git@github.com:user/my_module.git --branch=master --type=shared' ) click.echo(examples) return if not name: click.echo('Missing required argument: name') return if not path: click.echo('Missing required argument: path') env = cpenv.get_active_env() if type=='local': if not env: click.echo('\nActivate an environment to add a local module.\n') return if click.confirm('\nAdd {} to active env {}?'.format(name, env.name)): click.echo('Adding module...', nl=False) try: env.add_module(name, path, branch) except: click.echo(bold_red('FAILED')) raise else: click.echo(bold_green('OK!')) return module_paths = cpenv.get_module_paths() click.echo('\nAvailable module paths:\n') for i, mod_path in enumerate(module_paths): click.echo(' {}. {}'.format(i, mod_path)) choice = click.prompt( 'Where do you want to add your module?', type=int, default=0 ) module_root = module_paths[choice] module_path = utils.unipath(module_root, name) click.echo('Creating module {}...'.format(module_path), nl=False) try: cpenv.create_module(module_path, path, branch) except: click.echo(bold_red('FAILED')) raise else: click.echo(bold_green('OK!'))
Add a module to an environment. PATH can be a git repository path or a filesystem path.
def add_prepare_handler(self, prepare_handlers): if not isinstance(prepare_handlers, static_bundle.BUNDLE_ITERABLE_TYPES): prepare_handlers = [prepare_handlers] if self.prepare_handlers_chain is None: self.prepare_handlers_chain = [] for handler in prepare_handlers: self.prepare_handlers_chain.append(handler)
Add prepare handler to bundle :type: prepare_handler: static_bundle.handlers.AbstractPrepareHandler
def _calculate_distance(latlon1, latlon2): lat1, lon1 = latlon1 lat2, lon2 = latlon2 dlon = lon2 - lon1 dlat = lat2 - lat1 R = 6371 a = np.sin(dlat / 2)**2 + np.cos(lat1) * np.cos(lat2) * (np.sin(dlon / 2))**2 c = 2 * np.pi * R * np.arctan2(np.sqrt(a), np.sqrt(1 - a)) / 180 return c
Calculates the distance between two points on earth.
def get_channel_classes(channel_code): if channel_code: channel_code = channel_code.upper() if channel_code not in INTEGRATED_CHANNEL_CHOICES: raise CommandError(_('Invalid integrated channel: {channel}').format(channel=channel_code)) channel_classes = [INTEGRATED_CHANNEL_CHOICES[channel_code]] else: channel_classes = INTEGRATED_CHANNEL_CHOICES.values() return channel_classes
Assemble a list of integrated channel classes to transmit to. If a valid channel type was provided, use it. Otherwise, use all the available channel types.
def check_confirmations_or_resend(self, use_open_peers=False, **kw): if self.confirmations() == 0: self.send(use_open_peers, **kw)
check if a tx is confirmed, else resend it. :param use_open_peers: select random peers fro api/peers endpoint
def _peekNextID(self, conn=None): if conn is None: conn = self._get_connection() return to_unicode(conn.get(self._get_next_id_key()) or 0)
_peekNextID - Look at, but don't increment the primary key for this model. Internal. @return int - next pk
def _map(self, event): description = event.get('description', '') start_time = google_base.parse_rfc3339_utc_string( event.get('timestamp', '')) for name, regex in _EVENT_REGEX_MAP.items(): match = regex.match(description) if match: return {'name': name, 'start-time': start_time}, match return {'name': description, 'start-time': start_time}, None
Extract elements from an operation event and map to a named event.
def update(self, fp): if 'b' not in fp.mode: raise ValueError("File has to be opened in binary mode.") url = self._upload_url if fp.peek(1): response = self._put(url, data=fp) else: response = self._put(url, data=b'') if response.status_code != 200: msg = ('Could not update {} (status ' 'code: {}).'.format(self.path, response.status_code)) raise RuntimeError(msg)
Update the remote file from a local file. Pass in a filepointer `fp` that has been opened for writing in binary mode.
def _qsub_block(self, output_dir, error_dir, tid_specs): processes = [] job_names = [] for (tid, spec) in tid_specs: job_name = "%s_%s_tid_%d" % (self.batch_name, self.job_timestamp, tid) job_names.append(job_name) cmd_args = self.command( self.command._formatter(spec), tid, self._launchinfo) popen_args = self._qsub_args([("-e",error_dir), ('-N',job_name), ("-o",output_dir)], cmd_args) p = subprocess.Popen(popen_args, stdout=subprocess.PIPE) (stdout, stderr) = p.communicate() self.debug(stdout) if p.poll() != 0: raise EnvironmentError("qsub command exit with code: %d" % p.poll()) processes.append(p) self.message("Invoked qsub for %d commands" % len(processes)) if (self.reduction_fn is not None) or self.dynamic: self._qsub_collate_and_launch(output_dir, error_dir, job_names)
This method handles static argument specifiers and cases where the dynamic specifiers cannot be queued before the arguments are known.
def initialize_dictionaries(self, e_set, max_feats2 = 200): if(hasattr(e_set, '_type')): if(e_set._type == "train"): nvocab = util_functions.get_vocab(e_set._text, e_set._score, max_feats2 = max_feats2) svocab = util_functions.get_vocab(e_set._clean_stem_text, e_set._score, max_feats2 = max_feats2) self._normal_dict = CountVectorizer(ngram_range=(1,2), vocabulary=nvocab) self._stem_dict = CountVectorizer(ngram_range=(1,2), vocabulary=svocab) self.dict_initialized = True self._mean_spelling_errors=sum(e_set._spelling_errors)/float(len(e_set._spelling_errors)) self._spell_errors_per_character=sum(e_set._spelling_errors)/float(sum([len(t) for t in e_set._text])) good_pos_tags,bad_pos_positions=self._get_grammar_errors(e_set._pos,e_set._text,e_set._tokens) self._grammar_errors_per_character=(sum(good_pos_tags)/float(sum([len(t) for t in e_set._text]))) bag_feats=self.gen_bag_feats(e_set) f_row_sum=numpy.sum(bag_feats[:,:]) self._mean_f_prop=f_row_sum/float(sum([len(t) for t in e_set._text])) ret = "ok" else: raise util_functions.InputError(e_set, "needs to be an essay set of the train type.") else: raise util_functions.InputError(e_set, "wrong input. need an essay set object") return ret
Initializes dictionaries from an essay set object Dictionaries must be initialized prior to using this to extract features e_set is an input essay set returns a confirmation of initialization
def _symlink(path, link, overwrite=0, verbose=0): if exists(link) and not os.path.islink(link): if verbose: print('link location already exists') is_junc = _win32_is_junction(link) if os.path.isdir(link): if is_junc: pointed = _win32_read_junction(link) if path == pointed: if verbose: print('...and is a junction that points to the same place') return link else: if verbose: if not exists(pointed): print('...and is a broken junction that points somewhere else') else: print('...and is a junction that points somewhere else') else: if verbose: print('...and is an existing real directory!') raise IOError('Cannot overwrite a real directory') elif os.path.isfile(link): if _win32_is_hardlinked(link, path): if verbose: print('...and is a hard link that points to the same place') return link else: if verbose: print('...and is a hard link that points somewhere else') if _win32_can_symlink(): raise IOError('Cannot overwrite potentially real file if we can symlink') if overwrite: if verbose: print('...overwriting') util_io.delete(link, verbose > 1) else: if exists(link): raise IOError('Link already exists') _win32_symlink2(path, link, verbose=verbose)
Windows helper for ub.symlink
def write(self, data, sections=None): if self.error[0]: self.status = self.error[0] data = b(self.error[1]) if not self.headers_sent: self.send_headers(data, sections) if self.request_method != 'HEAD': try: if self.chunked: self.conn.sendall(b('%x\r\n%s\r\n' % (len(data), data))) else: self.conn.sendall(data) except socket.timeout: self.closeConnection = True except socket.error: self.closeConnection = True
Write the data to the output socket.
def nullspace(A, atol=1e-13, rtol=0): A = np.atleast_2d(A) u, s, vh = np.linalg.svd(A) tol = max(atol, rtol * s[0]) nnz = (s >= tol).sum() ns = vh[nnz:].conj().T return ns
Compute an approximate basis for the nullspace of A. The algorithm used by this function is based on the singular value decomposition of `A`. Parameters ---------- A : numpy.ndarray A should be at most 2-D. A 1-D array with length k will be treated as a 2-D with shape (1, k) atol : float The absolute tolerance for a zero singular value. Singular values smaller than `atol` are considered to be zero. rtol : float The relative tolerance. Singular values less than rtol*smax are considered to be zero, where smax is the largest singular value. If both `atol` and `rtol` are positive, the combined tolerance is the maximum of the two; that is:: tol = max(atol, rtol * smax) Singular values smaller than `tol` are considered to be zero. Returns ------- numpy.ndarray If `A` is an array with shape (m, k), then `ns` will be an array with shape (k, n), where n is the estimated dimension of the nullspace of `A`. The columns of `ns` are a basis for the nullspace; each element in numpy.dot(A, ns) will be approximately zero. Notes ----- Taken from the numpy cookbook.
def replaceIterationCycle(self, phaseSpecs): self.__phaseManager = _PhaseManager( model=self.__model, phaseSpecs=phaseSpecs) return
Replaces the Iteration Cycle phases :param phaseSpecs: Iteration cycle description consisting of a sequence of IterationPhaseSpecXXXXX elements that are performed in the given order
def Tliquidus(Tms=None, ws=None, xs=None, CASRNs=None, AvailableMethods=False, Method=None): def list_methods(): methods = [] if none_and_length_check([Tms]): methods.append('Maximum') methods.append('Simple') methods.append('None') return methods if AvailableMethods: return list_methods() if not Method: Method = list_methods()[0] if Method == 'Maximum': _Tliq = max(Tms) elif Method == 'Simple': _Tliq = mixing_simple(xs, Tms) elif Method == 'None': return None else: raise Exception('Failure in in function') return _Tliq
This function handles the retrival of a mixtures's liquidus point. This API is considered experimental, and is expected to be removed in a future release in favor of a more complete object-oriented interface. >>> Tliquidus(Tms=[250.0, 350.0], xs=[0.5, 0.5]) 350.0 >>> Tliquidus(Tms=[250, 350], xs=[0.5, 0.5], Method='Simple') 300.0 >>> Tliquidus(Tms=[250, 350], xs=[0.5, 0.5], AvailableMethods=True) ['Maximum', 'Simple', 'None']
def delete_keys(self, *args, **kwargs): ikeys = iter(kwargs.get('keys', args[0] if args else [])) while True: try: key = ikeys.next() except StopIteration: break if isinstance(key, basestring): mimicdb.backend.srem(tpl.bucket % self.name, key) mimicdb.backend.delete(tpl.key % (self.name, key)) elif isinstance(key, BotoKey) or isinstance(key, Key): mimicdb.backend.srem(tpl.bucket % self.name, key.name) mimicdb.backend.delete(tpl.key % (self.name, key.name)) return super(Bucket, self).delete_keys(*args, **kwargs)
Remove each key or key name in an iterable from the bucket set.
def read_adc(self, channel, gain=1, data_rate=None): assert 0 <= channel <= 3, 'Channel must be a value within 0-3!' return self._read(channel + 0x04, gain, data_rate, ADS1x15_CONFIG_MODE_SINGLE)
Read a single ADC channel and return the ADC value as a signed integer result. Channel must be a value within 0-3.
def get_namespace_hash(self, hash_fn=hashlib.md5) -> str: m = hash_fn() if self.has_names: items = self._get_namespace_name_to_encoding(desc='getting hash').items() else: items = self._get_namespace_identifier_to_encoding(desc='getting hash').items() for name, encoding in items: m.update(f'{name}:{encoding}'.encode('utf8')) return m.hexdigest()
Get the namespace hash. Defaults to MD5.
def precision(ntp, nfp): if (ntp+nfp) > 0: return ntp/(ntp+nfp) else: return np.nan
This calculates precision. https://en.wikipedia.org/wiki/Precision_and_recall Parameters ---------- ntp : int The number of true positives. nfp : int The number of false positives. Returns ------- float The precision calculated using `ntp/(ntp + nfp)`.
def nmap_smb_vulnscan(): service_search = ServiceSearch() services = service_search.get_services(ports=['445'], tags=['!smb_vulnscan'], up=True) services = [service for service in services] service_dict = {} for service in services: service.add_tag('smb_vulnscan') service_dict[str(service.address)] = service nmap_args = "-Pn -n --disable-arp-ping --script smb-security-mode.nse,smb-vuln-ms17-010.nse -p 445".split(" ") if services: result = nmap(nmap_args, [str(s.address) for s in services]) parser = NmapParser() report = parser.parse_fromstring(result) smb_signing = 0 ms17 = 0 for nmap_host in report.hosts: for script_result in nmap_host.scripts_results: script_result = script_result.get('elements', {}) service = service_dict[str(nmap_host.address)] if script_result.get('message_signing', '') == 'disabled': print_success("({}) SMB Signing disabled".format(nmap_host.address)) service.add_tag('smb_signing_disabled') smb_signing += 1 if script_result.get('CVE-2017-0143', {}).get('state', '') == 'VULNERABLE': print_success("({}) Vulnerable for MS17-010".format(nmap_host.address)) service.add_tag('MS17-010') ms17 += 1 service.update(tags=service.tags) print_notification("Completed, 'smb_signing_disabled' tag added to systems with smb signing disabled, 'MS17-010' tag added to systems that did not apply MS17-010.") stats = {'smb_signing': smb_signing, 'MS17_010': ms17, 'scanned_services': len(services)} Logger().log('smb_vulnscan', 'Scanned {} smb services for vulnerabilities'.format(len(services)), stats) else: print_notification("No services found to scan.")
Scans available smb services in the database for smb signing and ms17-010.
def resource(**kwargs): def inner(function): name = kwargs.pop('name', None) if name is None: name = utils.dasherize(function.__name__) methods = kwargs.pop('methods', None) if isinstance(methods, six.string_types): methods = methods, handler = (function, methods) if name not in _resources: _handlers[name] = [] from armet import resources kwargs['name'] = name class LightweightResource(resources.Resource): Meta = type(str('Meta'), (), kwargs) def route(self, request, response): for handler, methods in _handlers[name]: if methods is None or request.method in methods: return handler(request, response) resources.Resource.route(self) _resources[name] = LightweightResource _handlers[name].append(handler) return _resources[name] return inner
Wraps the decorated function in a lightweight resource.
def owsproxy_delegate(request): twitcher_url = request.registry.settings.get('twitcher.url') protected_path = request.registry.settings.get('twitcher.ows_proxy_protected_path', '/ows') url = twitcher_url + protected_path + '/proxy' if request.matchdict.get('service_name'): url += '/' + request.matchdict.get('service_name') if request.matchdict.get('access_token'): url += '/' + request.matchdict.get('service_name') url += '?' + urlparse.urlencode(request.params) LOGGER.debug("delegate to owsproxy: %s", url) resp = requests.request(method=request.method.upper(), url=url, data=request.body, headers=request.headers, verify=False) return Response(resp.content, status=resp.status_code, headers=resp.headers)
Delegates owsproxy request to external twitcher service.
async def update_read_timestamp(self, read_timestamp=None): if read_timestamp is None: read_timestamp = (self.events[-1].timestamp if self.events else datetime.datetime.now(datetime.timezone.utc)) if read_timestamp > self.latest_read_timestamp: logger.info( 'Setting {} latest_read_timestamp from {} to {}' .format(self.id_, self.latest_read_timestamp, read_timestamp) ) state = self._conversation.self_conversation_state state.self_read_state.latest_read_timestamp = ( parsers.to_timestamp(read_timestamp) ) try: await self._client.update_watermark( hangouts_pb2.UpdateWatermarkRequest( request_header=self._client.get_request_header(), conversation_id=hangouts_pb2.ConversationId( id=self.id_ ), last_read_timestamp=parsers.to_timestamp( read_timestamp ), ) ) except exceptions.NetworkError as e: logger.warning('Failed to update read timestamp: {}'.format(e)) raise
Update the timestamp of the latest event which has been read. This method will avoid making an API request if it will have no effect. Args: read_timestamp (datetime.datetime): (optional) Timestamp to set. Defaults to the timestamp of the newest event. Raises: .NetworkError: If the timestamp cannot be updated.
def draw(self): if self.enabled: self._vertex_list.colors = self._gl_colors self._vertex_list.vertices = self._gl_vertices self._vertex_list.draw(pyglet.gl.GL_TRIANGLES)
Draw the shape in the current OpenGL context.
def process(self, tensor): for processor in self.preprocessors: tensor = processor.process(tensor=tensor) return tensor
Process state. Args: tensor: tensor to process Returns: processed state
def same_log10_order_of_magnitude(x, delta=0.1): dmin = np.log10(np.min(x)*(1-delta)) dmax = np.log10(np.max(x)*(1+delta)) return np.floor(dmin) == np.floor(dmax)
Return true if range is approximately in same order of magnitude For example these sequences are in the same order of magnitude: - [1, 8, 5] # [1, 10) - [35, 20, 80] # [10 100) - [232, 730] # [100, 1000) Parameters ---------- x : array-like Values in base 10. Must be size 2 and ``rng[0] <= rng[1]``. delta : float Fuzz factor for approximation. It is multiplicative.
def do_toggle_variables(self, action): self.show_vars = action.get_active() if self.show_vars: self.show_variables_window() else: self.hide_variables_window()
Widget Action to toggle showing the variables window.
def total_regular_pixels_from_mask(mask): total_regular_pixels = 0 for y in range(mask.shape[0]): for x in range(mask.shape[1]): if not mask[y, x]: total_regular_pixels += 1 return total_regular_pixels
Compute the total number of unmasked regular pixels in a masks.
def pop(self, nbytes): size = 0 popped = [] with self._lock_packets: while size < nbytes: try: packet = self._packets.pop(0) size += len(packet.data.data) self._remaining -= len(packet.data.data) popped.append(packet) except IndexError: break return popped
pops packets with _at least_ nbytes of payload
def aoi(self, **kwargs): g = self._parse_geoms(**kwargs) if g is None: return self else: return self[g]
Subsets the Image by the given bounds Args: bbox (list): optional. A bounding box array [minx, miny, maxx, maxy] wkt (str): optional. A WKT geometry string geojson (str): optional. A GeoJSON geometry dictionary Returns: image: an image instance of the same type
def initialize(self, config, context): self.logger.info("Initializing PulsarSpout with the following") self.logger.info("Component-specific config: \n%s" % str(config)) self.logger.info("Context: \n%s" % str(context)) self.emit_count = 0 self.ack_count = 0 self.fail_count = 0 if not PulsarSpout.serviceUrl in config or not PulsarSpout.topicName in config: self.logger.fatal("Need to specify both serviceUrl and topicName") self.pulsar_cluster = str(config[PulsarSpout.serviceUrl]) self.topic = str(config[PulsarSpout.topicName]) mode = config[api_constants.TOPOLOGY_RELIABILITY_MODE] if mode == api_constants.TopologyReliabilityMode.ATLEAST_ONCE: self.acking_timeout = 1000 * int(config[api_constants.TOPOLOGY_MESSAGE_TIMEOUT_SECS]) else: self.acking_timeout = 30000 if PulsarSpout.receiveTimeoutMs in config: self.receive_timeout_ms = config[PulsarSpout.receiveTimeoutMs] else: self.receive_timeout_ms = 10 if PulsarSpout.deserializer in config: self.deserializer = config[PulsarSpout.deserializer] if not callable(self.deserializer): self.logger.fatal("Pulsar Message Deserializer needs to be callable") else: self.deserializer = self.default_deserializer self.logConfFileName = GenerateLogConfig(context) self.logger.info("Generated LogConf at %s" % self.logConfFileName) self.client = pulsar.Client(self.pulsar_cluster, log_conf_file_path=self.logConfFileName) self.logger.info("Setup Client with cluster %s" % self.pulsar_cluster) try: self.consumer = self.client.subscribe(self.topic, context.get_topology_name(), consumer_type=pulsar.ConsumerType.Failover, unacked_messages_timeout_ms=self.acking_timeout) except Exception as e: self.logger.fatal("Pulsar client subscription failed: %s" % str(e)) self.logger.info("Subscribed to topic %s" % self.topic)
Implements Pulsar Spout's initialize method
def scan_mem(self, data_to_find): if isinstance(data_to_find, bytes): data_to_find = [bytes([c]) for c in data_to_find] for mapping in sorted(self.maps): for ptr in mapping: if ptr + len(data_to_find) >= mapping.end: break candidate = mapping[ptr:ptr + len(data_to_find)] if issymbolic(candidate[0]): break if candidate == data_to_find: yield ptr
Scan for concrete bytes in all mapped memory. Successively yield addresses of all matches. :param bytes data_to_find: String to locate :return:
def predict(self, x, *args, **kwargs): if len(args) > 0: if type(args[0]) == nx.Graph or type(args[0]) == nx.DiGraph: return self.orient_graph(x, *args, **kwargs) else: return self.predict_proba(x, *args, **kwargs) elif type(x) == DataFrame: return self.predict_dataset(x, *args, **kwargs) elif type(x) == Series: return self.predict_proba(x.iloc[0], x.iloc[1], *args, **kwargs)
Generic predict method, chooses which subfunction to use for a more suited. Depending on the type of `x` and of `*args`, this function process to execute different functions in the priority order: 1. If ``args[0]`` is a ``networkx.(Di)Graph``, then ``self.orient_graph`` is executed. 2. If ``args[0]`` exists, then ``self.predict_proba`` is executed. 3. If ``x`` is a ``pandas.DataFrame``, then ``self.predict_dataset`` is executed. 4. If ``x`` is a ``pandas.Series``, then ``self.predict_proba`` is executed. Args: x (numpy.array or pandas.DataFrame or pandas.Series): First variable or dataset. args (numpy.array or networkx.Graph): graph or second variable. Returns: pandas.Dataframe or networkx.Digraph: predictions output
def CheckForHeaderGuard(filename, clean_lines, error): raw_lines = clean_lines.lines_without_raw_strings for i in raw_lines: if Search(r'//\s*NOLINT\(build/header_guard\)', i): return for i in raw_lines: if Search(r'^\s* return cppvar = GetHeaderGuardCPPVariable(filename) ifndef = '' ifndef_linenum = 0 define = '' endif = '' endif_linenum = 0 for linenum, line in enumerate(raw_lines): linesplit = line.split() if len(linesplit) >= 2: if not ifndef and linesplit[0] == ' ifndef = linesplit[1] ifndef_linenum = linenum if not define and linesplit[0] == ' define = linesplit[1] if line.startswith(' endif = line endif_linenum = linenum if not ifndef or not define or ifndef != define: error(filename, 0, 'build/header_guard', 5, 'No cppvar) return if ifndef != cppvar: error_level = 0 if ifndef != cppvar + '_': error_level = 5 ParseNolintSuppressions(filename, raw_lines[ifndef_linenum], ifndef_linenum, error) error(filename, ifndef_linenum, 'build/header_guard', error_level, ' ParseNolintSuppressions(filename, raw_lines[endif_linenum], endif_linenum, error) match = Match(r' if match: if match.group(1) == '_': error(filename, endif_linenum, 'build/header_guard', 0, ' return no_single_line_comments = True for i in xrange(1, len(raw_lines) - 1): line = raw_lines[i] if Match(r'^(?:(?:\'(?:\.|[^\'])*\')|(?:"(?:\.|[^"])*")|[^\'"])*//', line): no_single_line_comments = False break if no_single_line_comments: match = Match(r' if match: if match.group(1) == '_': error(filename, endif_linenum, 'build/header_guard', 0, ' return error(filename, endif_linenum, 'build/header_guard', 5, '
Checks that the file contains a header guard. Logs an error if no #ifndef header guard is present. For other headers, checks that the full pathname is used. Args: filename: The name of the C++ header file. clean_lines: A CleansedLines instance containing the file. error: The function to call with any errors found.
def get_all_files(folder): for path, dirlist, filelist in os.walk(folder): for fn in filelist: yield op.join(path, fn)
Generator that loops through all absolute paths of the files within folder Parameters ---------- folder: str Root folder start point for recursive search. Yields ------ fpath: str Absolute path of one file in the folders
def arcovar(x, order): r from spectrum import corrmtx import scipy.linalg X = corrmtx(x, order, 'covariance') Xc = np.matrix(X[:, 1:]) X1 = np.array(X[:, 0]) a, _residues, _rank, _singular_values = scipy.linalg.lstsq(-Xc, X1) Cz = np.dot(X1.conj().transpose(), Xc) e = np.dot(X1.conj().transpose(), X1) + np.dot(Cz, a) assert e.imag < 1e-4, 'wierd behaviour' e = float(e.real) return a, e
r"""Simple and fast implementation of the covariance AR estimate This code is 10 times faster than :func:`arcovar_marple` and more importantly only 10 lines of code, compared to a 200 loc for :func:`arcovar_marple` :param array X: Array of complex data samples :param int oder: Order of linear prediction model :return: * a - Array of complex forward linear prediction coefficients * e - error The covariance method fits a Pth order autoregressive (AR) model to the input signal, which is assumed to be the output of an AR system driven by white noise. This method minimizes the forward prediction error in the least-squares sense. The output vector contains the normalized estimate of the AR system parameters The white noise input variance estimate is also returned. If is the power spectral density of y(n), then: .. math:: \frac{e}{\left| A(e^{jw}) \right|^2} = \frac{e}{\left| 1+\sum_{k-1}^P a(k)e^{-jwk}\right|^2} Because the method characterizes the input data using an all-pole model, the correct choice of the model order p is important. .. plot:: :width: 80% :include-source: from spectrum import arcovar, marple_data, arma2psd from pylab import plot, log10, linspace, axis ar_values, error = arcovar(marple_data, 15) psd = arma2psd(ar_values, sides='centerdc') plot(linspace(-0.5, 0.5, len(psd)), 10*log10(psd/max(psd))) axis([-0.5, 0.5, -60, 0]) .. seealso:: :class:`pcovar` :validation: the AR parameters are the same as those returned by a completely different function :func:`arcovar_marple`. :References: [Mathworks]_
def write_bel_annotation(self, file: TextIO) -> None: if not self.is_populated(): self.populate() values = self._get_namespace_name_to_encoding(desc='writing names') write_annotation( keyword=self._get_namespace_keyword(), citation_name=self._get_namespace_name(), description='', values=values, file=file, )
Write as a BEL annotation file.
def predictions_and_gradient( self, image=None, label=None, strict=True, return_details=False): assert self.has_gradient() if image is None: image = self.__original_image if label is None: label = self.__original_class in_bounds = self.in_bounds(image) assert not strict or in_bounds self._total_prediction_calls += 1 self._total_gradient_calls += 1 predictions, gradient = self.__model.predictions_and_gradient(image, label) is_adversarial, is_best, distance = self.__is_adversarial( image, predictions, in_bounds) assert predictions.ndim == 1 assert gradient.shape == image.shape if return_details: return predictions, gradient, is_adversarial, is_best, distance else: return predictions, gradient, is_adversarial
Interface to model.predictions_and_gradient for attacks. Parameters ---------- image : `numpy.ndarray` Single input with shape as expected by the model (without the batch dimension). Defaults to the original image. label : int Label used to calculate the loss that is differentiated. Defaults to the original label. strict : bool Controls if the bounds for the pixel values should be checked.
def intensity_at_radius(self, radius): return self.intensity * np.exp( -self.sersic_constant * (((radius / self.effective_radius) ** (1. / self.sersic_index)) - 1))
Compute the intensity of the profile at a given radius. Parameters ---------- radius : float The distance from the centre of the profile.
def get_self_user_id(session): response = make_get_request(session, 'self') if response.status_code == 200: return response.json()['result']['id'] else: raise UserIdNotRetrievedException( 'Error retrieving user id: %s' % response.text, response.text)
Get the currently authenticated user ID
def run(self): logger.info(u'Started listening') while not self._stop: xml = self._readxml() if xml is None: break if not self.modelize: logger.info(u'Raw xml: %s' % xml) self.results.put(xml) continue if xml.tag == 'RECOGOUT': sentence = Sentence.from_shypo(xml.find('SHYPO'), self.encoding) logger.info(u'Modelized recognition: %r' % sentence) self.results.put(sentence) else: logger.info(u'Unmodelized xml: %s' % xml) self.results.put(xml) logger.info(u'Stopped listening')
Start listening to the server
def contains(x): if isinstance(x, str): x = canonical_name(x) return x in _TO_COLOR_USER or x in _TO_COLOR else: x = tuple(x) return x in _TO_NAME_USER or x in _TO_NAME
Return true if this string or integer tuple appears in tables
def init(self, username, reponame, force, backend=None): key = self.key(username, reponame) server_repodir = self.server_rootdir(username, reponame, create=False) if os.path.exists(server_repodir) and not force: raise RepositoryExists() if os.path.exists(server_repodir): shutil.rmtree(server_repodir) os.makedirs(server_repodir) with cd(server_repodir): git.init(".", "--bare") if backend is not None: backend.init_repo(server_repodir) repodir = self.rootdir(username, reponame, create=False) if os.path.exists(repodir) and not force: raise Exception("Local repo already exists") if os.path.exists(repodir): shutil.rmtree(repodir) os.makedirs(repodir) with cd(os.path.dirname(repodir)): git.clone(server_repodir, '--no-hardlinks') url = server_repodir if backend is not None: url = backend.url(username, reponame) repo = Repo(username, reponame) repo.manager = self repo.remoteurl = url repo.rootdir = self.rootdir(username, reponame) self.add(repo) return repo
Initialize a Git repo Parameters ---------- username, reponame : Repo name is tuple (name, reponame) force: force initialization of the repo even if exists backend: backend that must be used for this (e.g. s3)
def mpsse_set_clock(self, clock_hz, adaptive=False, three_phase=False): self._write('\x8A') if adaptive: self._write('\x96') else: self._write('\x97') if three_phase: self._write('\x8C') else: self._write('\x8D') divisor = int(math.ceil((30000000.0-float(clock_hz))/float(clock_hz))) & 0xFFFF if three_phase: divisor = int(divisor*(2.0/3.0)) logger.debug('Setting clockspeed with divisor value {0}'.format(divisor)) self._write(str(bytearray((0x86, divisor & 0xFF, (divisor >> 8) & 0xFF))))
Set the clock speed of the MPSSE engine. Can be any value from 450hz to 30mhz and will pick that speed or the closest speed below it.
def _get_heron_support_processes(self): retval = {} retval[self.heron_shell_ids[self.shard]] = Command([ '%s' % self.heron_shell_binary, '--port=%s' % self.shell_port, '--log_file_prefix=%s/heron-shell-%s.log' % (self.log_dir, self.shard), '--secret=%s' % self.topology_id], self.shell_env) return retval
Get a map from all daemon services' name to the command to start them
def SHLD(cpu, dest, src, count): OperandSize = dest.size tempCount = Operators.ZEXTEND(count.read(), OperandSize) & (OperandSize - 1) arg0 = dest.read() arg1 = src.read() MASK = ((1 << OperandSize) - 1) t0 = (arg0 << tempCount) t1 = arg1 >> (OperandSize - tempCount) res = Operators.ITEBV(OperandSize, tempCount == 0, arg0, t0 | t1) res = res & MASK dest.write(res) if isinstance(tempCount, int) and tempCount == 0: pass else: SIGN_MASK = 1 << (OperandSize - 1) lastbit = 0 != ((arg0 << (tempCount - 1)) & SIGN_MASK) cpu._set_shiftd_flags(OperandSize, arg0, res, lastbit, tempCount)
Double precision shift right. Shifts the first operand (destination operand) to the left the number of bits specified by the third operand (count operand). The second operand (source operand) provides bits to shift in from the right (starting with the least significant bit of the destination operand). :param cpu: current CPU. :param dest: destination operand. :param src: source operand. :param count: count operand
def plot_txn_time_hist(transactions, bin_minutes=5, tz='America/New_York', ax=None, **kwargs): if ax is None: ax = plt.gca() txn_time = transactions.copy() txn_time.index = txn_time.index.tz_convert(pytz.timezone(tz)) txn_time.index = txn_time.index.map(lambda x: x.hour * 60 + x.minute) txn_time['trade_value'] = (txn_time.amount * txn_time.price).abs() txn_time = txn_time.groupby(level=0).sum().reindex(index=range(570, 961)) txn_time.index = (txn_time.index / bin_minutes).astype(int) * bin_minutes txn_time = txn_time.groupby(level=0).sum() txn_time['time_str'] = txn_time.index.map(lambda x: str(datetime.time(int(x / 60), x % 60))[:-3]) trade_value_sum = txn_time.trade_value.sum() txn_time.trade_value = txn_time.trade_value.fillna(0) / trade_value_sum ax.bar(txn_time.index, txn_time.trade_value, width=bin_minutes, **kwargs) ax.set_xlim(570, 960) ax.set_xticks(txn_time.index[::int(30 / bin_minutes)]) ax.set_xticklabels(txn_time.time_str[::int(30 / bin_minutes)]) ax.set_title('Transaction time distribution') ax.set_ylabel('Proportion') ax.set_xlabel('') return ax
Plots a histogram of transaction times, binning the times into buckets of a given duration. Parameters ---------- transactions : pd.DataFrame Prices and amounts of executed trades. One row per trade. - See full explanation in tears.create_full_tear_sheet. bin_minutes : float, optional Sizes of the bins in minutes, defaults to 5 minutes. tz : str, optional Time zone to plot against. Note that if the specified zone does not apply daylight savings, the distribution may be partially offset. ax : matplotlib.Axes, optional Axes upon which to plot. **kwargs, optional Passed to plotting function. Returns ------- ax : matplotlib.Axes The axes that were plotted on.
def _advapi32_encrypt(certificate_or_public_key, data, rsa_oaep_padding=False): flags = 0 if rsa_oaep_padding: flags = Advapi32Const.CRYPT_OAEP out_len = new(advapi32, 'DWORD *', len(data)) res = advapi32.CryptEncrypt( certificate_or_public_key.ex_key_handle, null(), True, flags, null(), out_len, 0 ) handle_error(res) buffer_len = deref(out_len) buffer = buffer_from_bytes(buffer_len) write_to_buffer(buffer, data) pointer_set(out_len, len(data)) res = advapi32.CryptEncrypt( certificate_or_public_key.ex_key_handle, null(), True, flags, buffer, out_len, buffer_len ) handle_error(res) return bytes_from_buffer(buffer, deref(out_len))[::-1]
Encrypts a value using an RSA public key via CryptoAPI :param certificate_or_public_key: A Certificate or PublicKey instance to encrypt with :param data: A byte string of the data to encrypt :param rsa_oaep_padding: If OAEP padding should be used instead of PKCS#1 v1.5 :raises: ValueError - when any of the parameters contain an invalid value TypeError - when any of the parameters are of the wrong type OSError - when an error is returned by the OS crypto library :return: A byte string of the ciphertext
def SWAP(self, *operands): a = operands[0] b = operands[-1] return (b,) + operands[1:-1] + (a,)
Exchange 1st and 2nd stack items
def mkdir(self, folder): current_folder = self._ftp.pwd() folders = folder.split('/') for fld in folders: try: self.cd(fld) except error_perm: self._ftp.mkd(fld) self.cd(fld) self.cd(current_folder)
Creates a folder in the server :param folder: the folder to be created. :type folder: string
def _tile(self, n): pos = self._trans(self.pos[n]) return Tile(pos, pos).pad(self.support_pad)
Get the update tile surrounding particle `n`
def get_configuration(self, key, default=None): if key in self.config: return self.config.get(key) else: return default
Returns the configuration for KEY
async def fire(self, *args, **kwargs): logger.debug('Fired {}'.format(self)) for observer in self._observers: gen = observer(*args, **kwargs) if asyncio.iscoroutinefunction(observer): await gen
Fire this event, calling all observers with the same arguments.
def delete(self, force=True, pid=None): pid = pid or self.pid if self['_deposit'].get('pid'): raise PIDInvalidAction() if pid: pid.delete() return super(Deposit, self).delete(force=force)
Delete deposit. Status required: ``'draft'``. :param force: Force deposit delete. (Default: ``True``) :param pid: Force pid object. (Default: ``None``) :returns: A new Deposit object.
def luminosity_within_circle_in_units(self, radius: dim.Length, unit_luminosity='eps', kpc_per_arcsec=None, exposure_time=None): if not isinstance(radius, dim.Length): radius = dim.Length(value=radius, unit_length='arcsec') profile = self.new_profile_with_units_converted(unit_length=radius.unit_length, unit_luminosity=unit_luminosity, kpc_per_arcsec=kpc_per_arcsec, exposure_time=exposure_time) luminosity = quad(profile.luminosity_integral, a=0.0, b=radius, args=(1.0,))[0] return dim.Luminosity(luminosity, unit_luminosity)
Integrate the light profile to compute the total luminosity within a circle of specified radius. This is \ centred on the light profile's centre. The following units for mass can be specified and output: - Electrons per second (default) - 'eps'. - Counts - 'counts' (multiplies the luminosity in electrons per second by the exposure time). Parameters ---------- radius : float The radius of the circle to compute the dimensionless mass within. unit_luminosity : str The units the luminosity is returned in (eps | counts). exposure_time : float or None The exposure time of the observation, which converts luminosity from electrons per second units to counts.
def expand_filepaths(base_dir, rel_paths): return [os.path.join(base_dir, os.path.normpath(rp)) for rp in rel_paths]
Expand a list of relative paths to a give base directory. Parameters ---------- base_dir : str The target base directory rel_paths : list (or list-like) Collection of relative path strings Returns ------- expanded_paths : list `rel_paths` rooted at `base_dir` Examples -------- >>> jams.util.expand_filepaths('/data', ['audio', 'beat', 'seglab']) ['/data/audio', '/data/beat', '/data/seglab']
def get_deck_link(self, deck: BaseAttrDict): deck_link = 'https://link.clashroyale.com/deck/en?deck=' for i in deck: card = self.get_card_info(i.name) deck_link += '{0.id};'.format(card) return deck_link
Form a deck link Parameters --------- deck: official_api.models.BaseAttrDict An object is a deck. Can be retrieved from ``Player.current_deck`` Returns str
def acquireConnection(self): self._logger.debug("Acquiring connection") self._conn._ping_check() connWrap = ConnectionWrapper(dbConn=self._conn, cursor=self._conn.cursor(), releaser=self._releaseConnection, logger=self._logger) return connWrap
Get a Connection instance. Parameters: ---------------------------------------------------------------- retval: A ConnectionWrapper instance. NOTE: Caller is responsible for calling the ConnectionWrapper instance's release() method or use it in a context manager expression (with ... as:) to release resources.
def create(self, store_id, order_id, data): self.store_id = store_id self.order_id = order_id if 'id' not in data: raise KeyError('The order line must have an id') if 'product_id' not in data: raise KeyError('The order line must have a product_id') if 'product_variant_id' not in data: raise KeyError('The order line must have a product_variant_id') if 'quantity' not in data: raise KeyError('The order line must have a quantity') if 'price' not in data: raise KeyError('The order line must have a price') response = self._mc_client._post(url=self._build_path(store_id, 'orders', order_id, 'lines')) if response is not None: self.line_id = response['id'] else: self.line_id = None return response
Add a new line item to an existing order. :param store_id: The store id. :type store_id: :py:class:`str` :param order_id: The id for the order in a store. :type order_id: :py:class:`str` :param data: The request body parameters :type data: :py:class:`dict` data = { "id": string*, "product_id": string*, "product_variant_id": string*, "quantity": integer*, "price": number* }
def default_headers(self): _headers = { "User-Agent": "Pyzotero/%s" % __version__, "Zotero-API-Version": "%s" % __api_version__, } if self.api_key: _headers["Authorization"] = "Bearer %s" % self.api_key return _headers
It's always OK to include these headers
def get_data(filename): name, ext = get_file_extension(filename) func = json_get_data if ext == '.json' else yaml_get_data return func(filename)
Calls right function according to file extension
def get_repository(self, path, info=None, verbose=True): if path.strip() in ('','.'): path = os.getcwd() realPath = os.path.realpath( os.path.expanduser(path) ) if not os.path.isdir(realPath): os.makedirs(realPath) if not self.is_repository(realPath): self.create_repository(realPath, info=info, verbose=verbose) else: self.load_repository(realPath)
Create a repository at given real path or load any existing one. This method insures the creation of the directory in the system if it is missing.\n Unlike create_repository, this method doesn't erase any existing repository in the path but loads it instead. **N.B. On some systems and some paths, creating a directory may requires root permissions.** :Parameters: #. path (string): The real absolute path where to create the Repository. If '.' or an empty string is passed, the current working directory will be used. #. info (None, object): Any information that can identify the repository. #. verbose (boolean): Whether to be warn and informed about any abnormalities.
def updatecache(filename, module_globals=None): if filename in cache: del cache[filename] if not filename or (filename.startswith('<') and filename.endswith('>')): return [] fullname = filename try: stat = os.stat(fullname) except OSError: basename = filename if module_globals and '__loader__' in module_globals: name = module_globals.get('__name__') loader = module_globals['__loader__'] get_source = getattr(loader, 'get_source', None) if name and get_source: try: data = get_source(name) except (ImportError, IOError): pass else: if data is None: return [] cache[filename] = ( len(data), None, [line+'\n' for line in data.splitlines()], fullname ) return cache[filename][2] if os.path.isabs(filename): return [] for dirname in sys.path: try: fullname = os.path.join(dirname, basename) except (TypeError, AttributeError): continue try: stat = os.stat(fullname) break except os.error: pass else: return [] try: with open(fullname, 'rU') as fp: lines = fp.readlines() except IOError: return [] if lines and not lines[-1].endswith('\n'): lines[-1] += '\n' size, mtime = stat.st_size, stat.st_mtime cache[filename] = size, mtime, lines, fullname return lines
Update a cache entry and return its list of lines. If something's wrong, print a message, discard the cache entry, and return an empty list.
def pid_context(pid_filename=None): pid_filename = pid_filename or DEFAULT_PID_FILENAME if os.path.exists(pid_filename): contents = open(pid_filename).read(16) log.warning('pid_filename %s already exists with contents %s', pid_filename, contents) with open(pid_filename, 'w') as fp: fp.write(str(os.getpid())) fp.write('\n') try: yield finally: try: os.remove(pid_filename) except Exception as e: log.error('Got an exception %s deleting the pid_filename %s', e, pid_filename)
For the duration of this context manager, put the PID for this process into `pid_filename`, and then remove the file at the end.
def _pprint(params, offset=0, printer=repr): options = np.get_printoptions() np.set_printoptions(precision=5, threshold=64, edgeitems=2) params_list = list() this_line_length = offset line_sep = ',\n' + (1 + offset // 2) * ' ' for i, (k, v) in enumerate(sorted(six.iteritems(params))): if type(v) is float: this_repr = '%s=%s' % (k, str(v)) else: this_repr = '%s=%s' % (k, printer(v)) if len(this_repr) > 500: this_repr = this_repr[:300] + '...' + this_repr[-100:] if i > 0: if (this_line_length + len(this_repr) >= 75 or '\n' in this_repr): params_list.append(line_sep) this_line_length = len(line_sep) else: params_list.append(', ') this_line_length += 2 params_list.append(this_repr) this_line_length += len(this_repr) np.set_printoptions(**options) lines = ''.join(params_list) lines = '\n'.join(l.rstrip(' ') for l in lines.split('\n')) return lines
Pretty print the dictionary 'params' Parameters ---------- params: dict The dictionary to pretty print offset: int The offset in characters to add at the begin of each line. printer: The function to convert entries to strings, typically the builtin str or repr
def count_snps(mat): snps = np.zeros(4, dtype=np.uint32) snps[0] = np.uint32(\ mat[0, 5] + mat[0, 10] + mat[0, 15] + \ mat[5, 0] + mat[5, 10] + mat[5, 15] + \ mat[10, 0] + mat[10, 5] + mat[10, 15] + \ mat[15, 0] + mat[15, 5] + mat[15, 10]) for i in range(16): if i % 5: snps[1] += mat[i, i] snps[2] = mat[1, 4] + mat[2, 8] + mat[3, 12] +\ mat[4, 1] + mat[6, 9] + mat[7, 13] +\ mat[8, 2] + mat[9, 6] + mat[11, 14] +\ mat[12, 3] + mat[13, 7] + mat[14, 11] snps[3] = (mat.sum() - np.diag(mat).sum()) - snps[2] return snps
get dstats from the count array and return as a float tuple
def join(self, timeout=None): if not self.__initialized: raise RuntimeError("Thread.__init__() not called") if not self.__started.is_set(): raise RuntimeError("cannot join thread before it is started") if self is current_thread(): raise RuntimeError("cannot join current thread") if __debug__: if not self.__stopped: self._note("%s.join(): waiting until thread stops", self) self.__block.acquire() try: if timeout is None: while not self.__stopped: self.__block.wait() if __debug__: self._note("%s.join(): thread stopped", self) else: deadline = _time() + timeout while not self.__stopped: delay = deadline - _time() if delay <= 0: if __debug__: self._note("%s.join(): timed out", self) break self.__block.wait(delay) else: if __debug__: self._note("%s.join(): thread stopped", self) finally: self.__block.release()
Wait until the thread terminates. This blocks the calling thread until the thread whose join() method is called terminates -- either normally or through an unhandled exception or until the optional timeout occurs. When the timeout argument is present and not None, it should be a floating point number specifying a timeout for the operation in seconds (or fractions thereof). As join() always returns None, you must call isAlive() after join() to decide whether a timeout happened -- if the thread is still alive, the join() call timed out. When the timeout argument is not present or None, the operation will block until the thread terminates. A thread can be join()ed many times. join() raises a RuntimeError if an attempt is made to join the current thread as that would cause a deadlock. It is also an error to join() a thread before it has been started and attempts to do so raises the same exception.
def formfield(self, **kwargs): defaults = { 'form_class': RichTextFormField, 'config': self.config, } defaults.update(kwargs) return super(RichTextField, self).formfield(**defaults)
Get the form for field.
def set_pkg_source_info(self, doc, text): self.assert_package_exists() if not self.package_source_info_set: self.package_source_info_set = True doc.package.source_info = text return True else: raise CardinalityError('Package::SourceInfo')
Sets the package's source information, if not already set. text - Free form text. Raises CardinalityError if already defined. Raises OrderError if no package previously defined.
def get(ctx): user, project_name, _build = get_build_or_local(ctx.obj.get('project'), ctx.obj.get('build')) try: response = PolyaxonClient().build_job.get_build(user, project_name, _build) cache.cache(config_manager=BuildJobManager, response=response) except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e: Printer.print_error('Could not get build job `{}`.'.format(_build)) Printer.print_error('Error message `{}`.'.format(e)) sys.exit(1) get_build_details(response)
Get build job. Uses [Caching](/references/polyaxon-cli/#caching) Examples: \b ```bash $ polyaxon build -b 1 get ``` \b ```bash $ polyaxon build --build=1 --project=project_name get ```
def stem(self, word): word = normalize('NFC', text_type(word.lower())) word = word.translate(self._umlauts) wlen = len(word) - 1 if wlen > 3: if wlen > 5: if word[-3:] == 'nen': return word[:-3] if wlen > 4: if word[-2:] in {'en', 'se', 'es', 'er'}: return word[:-2] if word[-1] in {'e', 'n', 'r', 's'}: return word[:-1] return word
Return CLEF German stem. Parameters ---------- word : str The word to stem Returns ------- str Word stem Examples -------- >>> stmr = CLEFGerman() >>> stmr.stem('lesen') 'lese' >>> stmr.stem('graues') 'grau' >>> stmr.stem('buchstabieren') 'buchstabier'
def _get_comp_config(self): proto_config = topology_pb2.Config() key = proto_config.kvs.add() key.key = TOPOLOGY_COMPONENT_PARALLELISM key.value = str(self.parallelism) key.type = topology_pb2.ConfigValueType.Value("STRING_VALUE") if self.custom_config is not None: sanitized = self._sanitize_config(self.custom_config) for key, value in sanitized.items(): if isinstance(value, str): kvs = proto_config.kvs.add() kvs.key = key kvs.value = value kvs.type = topology_pb2.ConfigValueType.Value("STRING_VALUE") else: kvs = proto_config.kvs.add() kvs.key = key kvs.serialized_value = default_serializer.serialize(value) kvs.type = topology_pb2.ConfigValueType.Value("PYTHON_SERIALIZED_VALUE") return proto_config
Returns component-specific Config protobuf message It first adds ``topology.component.parallelism``, and is overriden by a user-defined component-specific configuration, specified by spec().
def settings(self): stmt = "select {fields} from pg_settings".format(fields=', '.join(SETTINGS_FIELDS)) settings = [] for row in self._iter_results(stmt): row['setting'] = self._vartype_map[row['vartype']](row['setting']) settings.append(Settings(**row)) return settings
Returns settings from the server.
def __refill_tokenbuffer(self): if len(self.tokens) == 0: self.__tokenize(self.dxfile.readline())
Add a new tokenized line from the file to the token buffer. __refill_tokenbuffer() Only reads a new line if the buffer is empty. It is safe to call it repeatedly. At end of file, method returns empty strings and it is up to __peek and __consume to flag the end of the stream.
def get_credentials_from_file(filepath): try: creds = load_file_as_yaml(filepath) except Exception: creds = {} profile_name = os.environ.get(citr_env_vars.CITRINATION_PROFILE) if profile_name is None or len(profile_name) == 0: profile_name = DEFAULT_CITRINATION_PROFILE api_key = None site = None try: profile = creds[profile_name] api_key = profile[CREDENTIALS_API_KEY_KEY] site = profile[CREDENTIALS_SITE_KEY] except KeyError: pass return (api_key, site)
Extracts credentials from the yaml formatted credential filepath passed in. Uses the default profile if the CITRINATION_PROFILE env var is not set, otherwise looks for a profile with that name in the credentials file. :param filepath: The path of the credentials file
def _open(self, f): if None in self.files: fd = self.files.index(None) self.files[fd] = f else: fd = len(self.files) self.files.append(f) return fd
Adds a file descriptor to the current file descriptor list :rtype: int :param f: the file descriptor to add. :return: the index of the file descriptor in the file descr. list
def _scan_footpaths(self, stop_id, walk_departure_time): for _, neighbor, data in self._walk_network.edges_iter(nbunch=[stop_id], data=True): d_walk = data["d_walk"] arrival_time = walk_departure_time + d_walk / self._walk_speed self._update_stop_label(neighbor, arrival_time)
Scan the footpaths originating from stop_id Parameters ---------- stop_id: int
def set_seq2(self, b): if b is self.b: return self.b = b self.matching_blocks = self.opcodes = None self.fullbcount = None self.__chain_b()
Set the second sequence to be compared. The first sequence to be compared is not changed. >>> s = SequenceMatcher(None, "abcd", "bcde") >>> s.ratio() 0.75 >>> s.set_seq2("abcd") >>> s.ratio() 1.0 >>> SequenceMatcher computes and caches detailed information about the second sequence, so if you want to compare one sequence S against many sequences, use .set_seq2(S) once and call .set_seq1(x) repeatedly for each of the other sequences. See also set_seqs() and set_seq1().
def fwhm2sigma(fwhm): fwhm = np.asarray(fwhm) return fwhm / np.sqrt(8 * np.log(2))
Convert a FWHM value to sigma in a Gaussian kernel. Parameters ---------- fwhm: float or numpy.array fwhm value or values Returns ------- fwhm: float or numpy.array sigma values
def is_home_environment(path): home = unipath(os.environ.get('CPENV_HOME', '~/.cpenv')) path = unipath(path) return path.startswith(home)
Returns True if path is in CPENV_HOME
def analyzeAll(self): searchableData=str(self.files2) self.log.debug("considering analysis for %d ABFs",len(self.IDs)) for ID in self.IDs: if not ID+"_" in searchableData: self.log.debug("%s needs analysis",ID) try: self.analyzeABF(ID) except: print("EXCEPTION! "*100) else: self.log.debug("%s has existing analysis, not overwriting",ID) self.log.debug("verified analysis of %d ABFs",len(self.IDs))
analyze every unanalyzed ABF in the folder.
def _encrypt(self, value): value = json.dumps(value) with warnings.catch_warnings(): warnings.simplefilter("ignore") encrypted_value = self.cipher.encrypt(value.encode('utf8')) hexified_value = binascii.hexlify(encrypted_value).decode('ascii') return hexified_value
Turn a json serializable value into an jsonified, encrypted, hexa string.
def tf_combined_loss(self, states, internals, actions, terminal, reward, next_states, next_internals, update, reference=None): q_model_loss = self.fn_loss( states=states, internals=internals, actions=actions, terminal=terminal, reward=reward, next_states=next_states, next_internals=next_internals, update=update, reference=reference ) demo_loss = self.fn_demo_loss( states=states, internals=internals, actions=actions, terminal=terminal, reward=reward, update=update, reference=reference ) return q_model_loss + self.supervised_weight * demo_loss
Combines Q-loss and demo loss.
def find_effect_class(self, path) -> Type[Effect]: package_name, class_name = parse_package_string(path) if package_name: package = self.get_package(package_name) return package.find_effect_class(class_name, raise_for_error=True) for package in self.packages: effect_cls = package.find_effect_class(class_name) if effect_cls: return effect_cls raise EffectError("No effect class '{}' found in any packages".format(class_name))
Find an effect class by class name or full python path to class Args: path (str): effect class name or full python path to effect class Returns: Effect class Raises: EffectError if no class is found
def expGenerator(args): parser = OptionParser() parser.set_usage("%prog [options] --description='{json object with args}'\n" + \ "%prog [options] --descriptionFromFile='{filename}'\n" + \ "%prog [options] --showSchema") parser.add_option("--description", dest = "description", help = "Tells ExpGenerator to generate an experiment description.py and " \ "permutations.py file using the given JSON formatted experiment "\ "description string.") parser.add_option("--descriptionFromFile", dest = 'descriptionFromFile', help = "Tells ExpGenerator to open the given filename and use it's " \ "contents as the JSON formatted experiment description.") parser.add_option("--claDescriptionTemplateFile", dest = 'claDescriptionTemplateFile', default = 'claDescriptionTemplate.tpl', help = "The file containing the template description file for " \ " ExpGenerator [default: %default]") parser.add_option("--showSchema", action="store_true", dest="showSchema", help="Prints the JSON schemas for the --description arg.") parser.add_option("--version", dest = 'version', default='v2', help = "Generate the permutations file for this version of hypersearch." " Possible choices are 'v1' and 'v2' [default: %default].") parser.add_option("--outDir", dest = "outDir", default=None, help = "Where to generate experiment. If not specified, " \ "then a temp directory will be created" ) (options, remainingArgs) = parser.parse_args(args) if len(remainingArgs) > 0: raise _InvalidCommandArgException( _makeUsageErrorStr("Unexpected command-line args: <%s>" % \ (' '.join(remainingArgs),), parser.get_usage())) activeOptions = filter(lambda x: getattr(options, x) != None, ('description', 'showSchema')) if len(activeOptions) > 1: raise _InvalidCommandArgException( _makeUsageErrorStr(("The specified command options are " + \ "mutually-exclusive: %s") % (activeOptions,), parser.get_usage())) if options.showSchema: _handleShowSchemaOption() elif options.description: _handleDescriptionOption(options.description, options.outDir, parser.get_usage(), hsVersion=options.version, claDescriptionTemplateFile = options.claDescriptionTemplateFile) elif options.descriptionFromFile: _handleDescriptionFromFileOption(options.descriptionFromFile, options.outDir, parser.get_usage(), hsVersion=options.version, claDescriptionTemplateFile = options.claDescriptionTemplateFile) else: raise _InvalidCommandArgException( _makeUsageErrorStr("Error in validating command options. No option " "provided:\n", parser.get_usage()))
Parses, validates, and executes command-line options; On success: Performs requested operation and exits program normally On Error: Dumps exception/error info in JSON format to stdout and exits the program with non-zero status.
def begin(self): return Range(self.source_buffer, self.begin_pos, self.begin_pos, expanded_from=self.expanded_from)
Returns a zero-length range located just before the beginning of this range.
def load(fp, separator=DEFAULT, index_separator=DEFAULT, cls=dict, list_cls=list): converter = None output = cls() arraykeys = set() for line in fp: if converter is None: if isinstance(line, six.text_type): converter = six.u else: converter = six.b default_separator = converter('|') default_index_separator = converter('_') newline = converter('\n') if separator is DEFAULT: separator = default_separator if index_separator is DEFAULT: index_separator = default_index_separator key, value = line.strip().split(separator, 1) keyparts = key.split(index_separator) try: index = int(keyparts[-1]) endwithint = True except ValueError: endwithint = False if len(keyparts) > 1 and endwithint: basekey = key.rsplit(index_separator, 1)[0] if basekey not in arraykeys: arraykeys.add(basekey) if basekey in output: if not isinstance(output[basekey], dict): output[basekey] = {-1: output[basekey]} else: output[basekey] = {} output[basekey][index] = value else: if key in output and isinstance(output[key], dict): output[key][-1] = value else: output[key] = value for key in arraykeys: output[key] = list_cls(pair[1] for pair in sorted(six.iteritems(output[key]))) return output
Load an object from the file pointer. :param fp: A readable filehandle. :param separator: The separator between key and value. Defaults to u'|' or b'|', depending on the types. :param index_separator: The separator between key and index. Defaults to u'_' or b'_', depending on the types. :param cls: A callable that returns a Mapping that is filled with pairs. The most common alternate option would be OrderedDict. :param list_cls: A callable that takes an iterable and returns a sequence.
def vector_to_volume(arr, mask, order='C'): if mask.dtype != np.bool: raise ValueError("mask must be a boolean array") if arr.ndim != 1: raise ValueError("vector must be a 1-dimensional array") if arr.ndim == 2 and any(v == 1 for v in arr.shape): log.debug('Got an array of shape {}, flattening for my purposes.'.format(arr.shape)) arr = arr.flatten() volume = np.zeros(mask.shape[:3], dtype=arr.dtype, order=order) volume[mask] = arr return volume
Transform a given vector to a volume. This is a reshape function for 3D flattened and maybe masked vectors. Parameters ---------- arr: np.array 1-Dimensional array mask: numpy.ndarray Mask image. Must have 3 dimensions, bool dtype. Returns ------- np.ndarray
def Carcinogen(CASRN, AvailableMethods=False, Method=None): r methods = [COMBINED, IARC, NTP] if AvailableMethods: return methods if not Method: Method = methods[0] if Method == IARC: if CASRN in IARC_data.index: status = IARC_codes[IARC_data.at[CASRN, 'group']] else: status = UNLISTED elif Method == NTP: if CASRN in NTP_data.index: status = NTP_codes[NTP_data.at[CASRN, 'Listing']] else: status = UNLISTED elif Method == COMBINED: status = {} for method in methods[1:]: status[method] = Carcinogen(CASRN, Method=method) else: raise Exception('Failure in in function') return status
r'''Looks up if a chemical is listed as a carcinogen or not according to either a specifc method or with all methods. Returns either the status as a string for a specified method, or the status of the chemical in all available data sources, in the format {source: status}. Parameters ---------- CASRN : string CASRN [-] Returns ------- status : str or dict Carcinogen status information [-] methods : list, only returned if AvailableMethods == True List of methods which can be used to obtain carcinogen status with the given inputs Other Parameters ---------------- Method : string, optional A string for the method name to use, as defined by constants in Carcinogen_methods AvailableMethods : bool, optional If True, function will determine which methods can be used to obtain if a chemical is listed as carcinogenic, and will return methods instead of the status Notes ----- Supported methods are: * **IARC**: International Agency for Research on Cancer, [1]_. As extracted with a last update of February 22, 2016. Has listing information of 843 chemicals with CAS numbers. Chemicals without CAS numbers not included here. If two listings for the same CAS were available, that closest to the CAS number was used. If two listings were available published at different times, the latest value was used. All else equal, the most pessimistic value was used. * **NTP**: National Toxicology Program, [2]_. Has data on 226 chemicals. Examples -------- >>> Carcinogen('61-82-5') {'National Toxicology Program 13th Report on Carcinogens': 'Reasonably Anticipated', 'International Agency for Research on Cancer': 'Not classifiable as to its carcinogenicity to humans (3)'} References ---------- .. [1] International Agency for Research on Cancer. Agents Classified by the IARC Monographs, Volumes 1-115. Lyon, France: IARC; 2016 Available from: http://monographs.iarc.fr/ENG/Classification/ .. [2] NTP (National Toxicology Program). 2014. Report on Carcinogens, Thirteenth Edition. Research Triangle Park, NC: U.S. Department of Health and Human Services, Public Health Service. http://ntp.niehs.nih.gov/pubhealth/roc/roc13/
def calc_key_stats(self, metric_store): stats_to_calculate = ['mean', 'std', 'min', 'max'] percentiles_to_calculate = range(0, 100, 1) for column, groups_store in metric_store.items(): for group, time_store in groups_store.items(): data = metric_store[column][group].values() if self.groupby: column_name = group + '.' + column else: column_name = column if column.startswith('qps'): self.calculated_stats[column_name], self.calculated_percentiles[column_name] = naarad.utils.calculate_stats(data, stats_to_calculate, percentiles_to_calculate) else: self.calculated_stats[column_name], self.calculated_percentiles[column_name] = naarad.utils.calculate_stats(list(heapq.merge(*data)), stats_to_calculate, percentiles_to_calculate) self.update_summary_stats(column_name)
Calculate stats such as percentile and mean :param dict metric_store: The metric store used to store all the parsed log data :return: None
def format_parameters(self, **kwargs): req_data = {} for k, v in kwargs.items(): if isinstance(v, (list, tuple)): k = k + '[]' req_data[k] = v return req_data
Properly formats array types
def _serialize_int(value, size=32, padding=0): if size <= 0 or size > 32: raise ValueError if not isinstance(value, (int, BitVec)): raise ValueError if issymbolic(value): buf = ArrayVariable(index_bits=256, index_max=32, value_bits=8, name='temp{}'.format(uuid.uuid1())) value = Operators.SEXTEND(value, value.size, size * 8) buf = ArrayProxy(buf.write_BE(padding, value, size)) else: value = int(value) buf = bytearray() for _ in range(padding): buf.append(0) for position in reversed(range(size)): buf.append(Operators.EXTRACT(value, position * 8, 8)) return buf
Translates a signed python integral or a BitVec into a 32 byte string, MSB first
def query(self, input = '', params = {}): payload = {'input': input, 'appid': self.appid} for key, value in params.items(): if isinstance(value, (list, tuple)): payload[key] = ','.join(value) else: payload[key] = value try: r = requests.get("http://api.wolframalpha.com/v2/query", params=payload) if r.status_code != 200: raise Exception('Invalid response status code: %s' % (r.status_code)) if r.encoding != 'utf-8': raise Exception('Invalid encoding: %s' % (r.encoding)) except Exception, e: return Result(error = e) return Result(xml = r.text)
Query Wolfram Alpha and return a Result object
def oauth_enabled(decorated_function=None, scopes=None, **decorator_kwargs): def curry_wrapper(wrapped_function): @wraps(wrapped_function) def enabled_wrapper(request, *args, **kwargs): return_url = decorator_kwargs.pop('return_url', request.get_full_path()) user_oauth = django_util.UserOAuth2(request, scopes, return_url) setattr(request, django_util.oauth2_settings.request_prefix, user_oauth) return wrapped_function(request, *args, **kwargs) return enabled_wrapper if decorated_function: return curry_wrapper(decorated_function) else: return curry_wrapper
Decorator to enable OAuth Credentials if authorized, and setup the oauth object on the request object to provide helper functions to start the flow otherwise. .. code-block:: python :caption: views.py :name: views_enabled3 from oauth2client.django_util.decorators import oauth_enabled @oauth_enabled def optional_oauth2(request): if request.oauth.has_credentials(): # this could be passed into a view # request.oauth.http is also initialized return HttpResponse("User email: {0}".format( request.oauth.credentials.id_token['email']) else: return HttpResponse('Here is an OAuth Authorize link: <a href="{0}">Authorize</a>'.format( request.oauth.get_authorize_redirect())) Args: decorated_function: View function to decorate. scopes: Scopes to require, will default. decorator_kwargs: Can include ``return_url`` to specify the URL to return to after OAuth2 authorization is complete. Returns: The decorated view function.
def actions(self, state): "In the leftmost empty column, try all non-conflicting rows." if state[-1] is not None: return [] else: col = state.index(None) return [row for row in range(self.N) if not self.conflicted(state, row, col)]
In the leftmost empty column, try all non-conflicting rows.