code
stringlengths 59
4.4k
| docstring
stringlengths 5
7.69k
|
|---|---|
def get_course_runs_from_program(program):
course_runs = set()
for course in program.get("courses", []):
for run in course.get("course_runs", []):
if "key" in run and run["key"]:
course_runs.add(run["key"])
return course_runs
|
Return course runs from program data.
Arguments:
program(dict): Program data from Course Catalog API
Returns:
set: course runs in given program
|
def get_compound_amounts(self):
result = self._compound_masses * 1.0
for compound in self.material.compounds:
index = self.material.get_compound_index(compound)
result[index] = stoich.amount(compound, result[index])
return result
|
Determine the mole amounts of all the compounds.
:returns: List of amounts. [kmol]
|
def plot_diff(self, graphing_library='matplotlib'):
diff_datasource = sorted(set(self.reports[0].datasource) & set(self.reports[1].datasource))
graphed = False
for submetric in diff_datasource:
baseline_csv = naarad.utils.get_default_csv(self.reports[0].local_location, (submetric + '.percentiles'))
current_csv = naarad.utils.get_default_csv(self.reports[1].local_location, (submetric + '.percentiles'))
if (not (naarad.utils.is_valid_file(baseline_csv) & naarad.utils.is_valid_file(current_csv))):
continue
baseline_plot = PD(input_csv=baseline_csv, csv_column=1, series_name=submetric, y_label=submetric, precision=None, graph_height=600, graph_width=1200,
graph_type='line', plot_label='baseline', x_label='Percentiles')
current_plot = PD(input_csv=current_csv, csv_column=1, series_name=submetric, y_label=submetric, precision=None, graph_height=600, graph_width=1200,
graph_type='line', plot_label='current', x_label='Percentiles')
graphed, div_file = Diff.graphing_modules[graphing_library].graph_data_on_the_same_graph([baseline_plot, current_plot],
os.path.join(self.output_directory, self.resource_path),
self.resource_path, (submetric + '.diff'))
if graphed:
self.plot_files.append(div_file)
return True
|
Generate CDF diff plots of the submetrics
|
def set_param(self, section, param, value):
if section not in self.conf or param not in self.conf[section]:
logger.error('Config section %s and param %s not exists', section, param)
else:
self.conf[section][param] = value
|
Change a param in the config
|
def commandline(self, **mpiargs):
cmd = self.MDRUN.commandline()
if self.mpiexec:
cmd = self.mpicommand(**mpiargs) + cmd
return cmd
|
Returns simple command line to invoke mdrun.
If :attr:`mpiexec` is set then :meth:`mpicommand` provides the mpi
launcher command that prefixes the actual ``mdrun`` invocation:
:attr:`mpiexec` [*mpiargs*] :attr:`mdrun` [*mdrun-args*]
The *mdrun-args* are set on initializing the class. Override
:meth:`mpicommand` to fit your system if the simple default
OpenMP launcher is not appropriate.
|
def get_nearest_year_for_day(day):
now = time.gmtime()
result = now.tm_year
if day - now.tm_yday > 365 // 2:
result -= 1
if now.tm_yday - day > 365 // 2:
result += 1
return result
|
Returns the nearest year to now inferred from a Julian date.
|
def getTextFromNode(node):
t = ""
for n in node.childNodes:
if n.nodeType == n.TEXT_NODE:
t += n.nodeValue
else:
raise NotTextNodeError
return t
|
Scans through all children of node and gathers the
text. If node has non-text child-nodes then
NotTextNodeError is raised.
|
def alignment(self, d=5):
vx = vy = vz = 0
for b in self.boids:
if b != self:
vx, vy, vz = vx+b.vx, vy+b.vy, vz+b.vz
n = len(self.boids)-1
vx, vy, vz = vx/n, vy/n, vz/n
return (vx-self.vx)/d, (vy-self.vy)/d, (vz-self.vz)/d
|
Boids match velocity with other boids.
|
def filter_mechanism_list(mechanisms, properties, allow_insecure = False,
server_side = False):
result = []
for mechanism in mechanisms:
try:
if server_side:
klass = SERVER_MECHANISMS_D[mechanism]
else:
klass = CLIENT_MECHANISMS_D[mechanism]
except KeyError:
logger.debug(" skipping {0} - not supported".format(mechanism))
continue
secure = properties.get("security-layer")
if not allow_insecure and not klass._pyxmpp_sasl_secure and not secure:
logger.debug(" skipping {0}, as it is not secure".format(mechanism))
continue
if not klass.are_properties_sufficient(properties):
logger.debug(" skipping {0}, as the properties are not sufficient"
.format(mechanism))
continue
result.append(mechanism)
return result
|
Filter a mechanisms list only to include those mechanisms that cans
succeed with the provided properties and are secure enough.
:Parameters:
- `mechanisms`: list of the mechanisms names
- `properties`: available authentication properties
- `allow_insecure`: allow insecure mechanisms
:Types:
- `mechanisms`: sequence of `unicode`
- `properties`: mapping
- `allow_insecure`: `bool`
:returntype: `list` of `unicode`
|
def get(self, batch_webhook_id, **queryparams):
self.batch_webhook_id = batch_webhook_id
return self._mc_client._get(url=self._build_path(batch_webhook_id), **queryparams)
|
Get information about a specific batch webhook.
:param batch_webhook_id: The unique id for the batch webhook.
:type batch_webhook_id: :py:class:`str`
:param queryparams: The query string parameters
queryparams['fields'] = []
queryparams['exclude_fields'] = []
|
def serialize_formula(formula):
r
charge = charge_from_formula(formula)
element_dict = nested_formula_parser(formula)
base = atoms_to_Hill(element_dict)
if charge == 0:
pass
elif charge > 0:
if charge == 1:
base += '+'
else:
base += '+' + str(charge)
elif charge < 0:
if charge == -1:
base += '-'
else:
base += str(charge)
return base
|
r'''Basic formula serializer to construct a consistently-formatted formula.
This is necessary for handling user-supplied formulas, which are not always
well formatted.
Performs no sanity checking that elements are actually elements.
Parameters
----------
formula : str
Formula string as parseable by the method nested_formula_parser, [-]
Returns
-------
formula : str
A consistently formatted formula to describe a molecular formula, [-]
Notes
-----
Examples
--------
>>> serialize_formula('Pd(NH3)4+3')
'H12N4Pd+3'
|
def TBH(cpu, dest):
base_addr = dest.get_mem_base_addr()
if dest.mem.base in ('PC', 'R15'):
base_addr = cpu.PC
offset = cpu.read_int(base_addr + dest.get_mem_offset(), 16)
offset = Operators.ZEXTEND(offset, cpu.address_bit_size)
cpu.PC += (offset << 1)
|
Table Branch Halfword causes a PC-relative forward branch using a table of single halfword offsets. A base
register provides a pointer to the table, and a second register supplies an index into the table. The branch
length is twice the value of the halfword returned from the table.
:param ARMv7Operand dest: see below; register
|
def embed_data(request):
result = _EmbedDataFixture(request)
result.delete_data_dir()
result.create_data_dir()
yield result
result.delete_data_dir()
|
Create a temporary directory with input data for the test.
The directory contents is copied from a directory with the same name as the module located in the same directory of
the test module.
|
def login(self, username, password=None, token=None):
self.session.basic_auth(username, password)
|
Login user for protected API calls.
|
def findPreviousSiblings(self, name=None, attrs={}, text=None,
limit=None, **kwargs):
return self._findAll(name, attrs, text, limit,
self.previousSiblingGenerator, **kwargs)
|
Returns the siblings of this Tag that match the given
criteria and appear before this Tag in the document.
|
def get_popular_clans(self, **params: keys):
url = self.api.POPULAR + '/clans'
return self._get_model(url, PartialClan, **params)
|
Get a list of most queried clans
\*\*keys: Optional[list] = None
Filter which keys should be included in the
response
\*\*exclude: Optional[list] = None
Filter which keys should be excluded from the
response
\*\*max: Optional[int] = None
Limit the number of items returned in the response
\*\*page: Optional[int] = None
Works with max, the zero-based page of the
items
\*\*timeout: Optional[int] = None
Custom timeout that overwrites Client.timeout
|
def get_child_by_name(parent, name):
def iterate_children(widget, name):
if widget.get_name() == name:
return widget
try:
for w in widget.get_children():
result = iterate_children(w, name)
if result is not None:
return result
else:
continue
except AttributeError:
pass
return iterate_children(parent, name)
|
Iterate through a gtk container, `parent`,
and return the widget with the name `name`.
|
def get_dh_params_length(server_handshake_bytes):
output = None
dh_params_bytes = None
for record_type, _, record_data in parse_tls_records(server_handshake_bytes):
if record_type != b'\x16':
continue
for message_type, message_data in parse_handshake_messages(record_data):
if message_type == b'\x0c':
dh_params_bytes = message_data
break
if dh_params_bytes:
break
if dh_params_bytes:
output = int_from_bytes(dh_params_bytes[0:2]) * 8
return output
|
Determines the length of the DH params from the ServerKeyExchange
:param server_handshake_bytes:
A byte string of the handshake data received from the server
:return:
None or an integer of the bit size of the DH parameters
|
def canonical_averages_dtype(spanning_cluster=True):
fields = list()
fields.extend([
('number_of_runs', 'uint32'),
])
if spanning_cluster:
fields.extend([
('percolation_probability_mean', 'float64'),
('percolation_probability_m2', 'float64'),
])
fields.extend([
('max_cluster_size_mean', 'float64'),
('max_cluster_size_m2', 'float64'),
('moments_mean', '(5,)float64'),
('moments_m2', '(5,)float64'),
])
return _ndarray_dtype(fields)
|
The NumPy Structured Array type for canonical averages over several
runs
Helper function
Parameters
----------
spanning_cluster : bool, optional
Whether to detect a spanning cluster or not.
Defaults to ``True``.
Returns
-------
ret : list of pairs of str
A list of tuples of field names and data types to be used as ``dtype``
argument in numpy ndarray constructors
See Also
--------
http://docs.scipy.org/doc/numpy/user/basics.rec.html
canonical_statistics_dtype
finalized_canonical_averages_dtype
|
def translate(self, dr):
tile = self.copy()
tile.l += dr
tile.r += dr
return tile
|
Translate a tile by an amount dr
>>> Tile(5).translate(1)
Tile [1, 1, 1] -> [6, 6, 6] ([5, 5, 5])
|
def get_project_config(project_path, use_cache=True):
return get_local_config(project_path, use_cache=use_cache) \
or get_user_config(project_path, use_cache=use_cache) \
or get_default_config()
|
Produces the Tidypy configuration to use for the specified project.
If a ``pyproject.toml`` exists, the configuration will be based on that. If
not, the TidyPy configuration in the user's home directory will be used. If
one does not exist, the default configuration will be used.
:param project_path: the path to the project that is going to be analyzed
:type project_path: str
:param use_cache:
whether or not to use cached versions of any remote/referenced TidyPy
configurations. If not specified, defaults to ``True``.
:type use_cache: bool
:rtype: dict
|
def window_gaussian(N, alpha=2.5):
r
t = linspace(-(N-1)/2., (N-1)/2., N)
w = exp(-0.5*(alpha * t/(N/2.))**2.)
return w
|
r"""Gaussian window
:param N: window length
.. math:: \exp^{-0.5 \left( \sigma\frac{n}{N/2} \right)^2}
with :math:`\frac{N-1}{2}\leq n \leq \frac{N-1}{2}`.
.. note:: N-1 is used to be in agreement with octave convention. The ENBW of
1.4 is also in agreement with [Harris]_
.. plot::
:width: 80%
:include-source:
from spectrum import window_visu
window_visu(64, 'gaussian', alpha=2.5)
.. seealso:: scipy.signal.gaussian, :func:`create_window`
|
def do_levmarq_n_directions(s, directions, max_iter=2, run_length=2,
damping=1e-3, collect_stats=False, marquardt_damping=True, **kwargs):
normals = np.array([d/np.sqrt(np.dot(d,d)) for d in directions])
if np.isnan(normals).any():
raise ValueError('`directions` must not be 0s or contain nan')
obj = OptState(s, normals)
lo = LMOptObj(obj, max_iter=max_iter, run_length=run_length, damping=
damping, marquardt_damping=marquardt_damping, **kwargs)
lo.do_run_1()
if collect_stats:
return lo.get_termination_stats()
|
Optimization of a state along a specific set of directions in parameter
space.
Parameters
----------
s : :class:`peri.states.State`
The state to optimize
directions : np.ndarray
[n,d] element numpy.ndarray of the n directions in the d-
dimensional space to optimize along. `directions` is trans-
formed to a unit vector internally
Other Parameters
----------------
Any parameters passed to LMEngine.
|
def next_question(self):
for key, questions in self.questions.items():
if key in self.answers:
continue
for question in questions:
if self.check_condition(question._condition):
return question
return None
|
Returns the next `Question` in the questionnaire, or `None` if there
are no questions left. Returns first question for whose key there is no
answer and for which condition is satisfied, or for which there is no
condition.
|
def register_metrics(self, context):
sys_config = system_config.get_sys_config()
interval = float(sys_config[constants.HERON_METRICS_EXPORT_INTERVAL_SEC])
collector = context.get_metrics_collector()
super(ComponentMetrics, self).register_metrics(collector, interval)
|
Registers metrics to context
:param context: Topology Context
|
def finish_state(st, desc='finish-state', invert='guess'):
for minmass in [None, 0]:
for _ in range(3):
npart, poses = addsub.add_subtract_locally(st, region_depth=7,
minmass=minmass, invert=invert)
if npart == 0:
break
opt.finish(st, n_loop=1, separate_psf=True, desc=desc, dowarn=False)
opt.burn(st, mode='polish', desc=desc, n_loop=2, dowarn=False)
d = opt.finish(st, desc=desc, n_loop=4, dowarn=False)
if not d['converged']:
RLOG.warn('Optimization did not converge; consider re-running')
|
Final optimization for the best-possible state.
Runs a local add-subtract to capture any difficult-to-feature particles,
then does another set of optimization designed to get to the best
possible fit.
Parameters
----------
st : :class:`peri.states.ImageState`
The state to finish
desc : String, optional
Description to intermittently save the state as, as passed to
state.save. Default is `'finish-state'`.
invert : {'guess', True, False}
Whether to invert the image for featuring, as passed to
addsubtract.add_subtract. Default is to guess from the
state's current particles.
See Also
--------
`peri.opt.addsubtract.add_subtract_locally`
`peri.opt.optimize.finish`
|
def _win32_symlink2(path, link, allow_fallback=True, verbose=0):
if _win32_can_symlink():
return _win32_symlink(path, link, verbose)
else:
return _win32_junction(path, link, verbose)
|
Perform a real symbolic link if possible. However, on most versions of
windows you need special privledges to create a real symlink. Therefore, we
try to create a symlink, but if that fails we fallback to using a junction.
AFAIK, the main difference between symlinks and junctions are that symlinks
can reference relative or absolute paths, where as junctions always
reference absolute paths. Not 100% on this though. Windows is weird.
Note that junctions will not register as links via `islink`, but I
believe real symlinks will.
|
def datetime_exists(dt, tz=None):
if tz is None:
if dt.tzinfo is None:
raise ValueError('Datetime is naive and no time zone provided.')
tz = dt.tzinfo
dt = dt.replace(tzinfo=None)
dt_rt = dt.replace(tzinfo=tz).astimezone(tzutc()).astimezone(tz)
dt_rt = dt_rt.replace(tzinfo=None)
return dt == dt_rt
|
Given a datetime and a time zone, determine whether or not a given datetime
would fall in a gap.
:param dt:
A :class:`datetime.datetime` (whose time zone will be ignored if ``tz``
is provided.)
:param tz:
A :class:`datetime.tzinfo` with support for the ``fold`` attribute. If
``None`` or not provided, the datetime's own time zone will be used.
:return:
Returns a boolean value whether or not the "wall time" exists in ``tz``.
|
def default(self, obj):
if isinstance(obj, datetime.datetime):
return self._encode_datetime(obj)
return json.JSONEncoder.default(self, obj)
|
Encode values as JSON strings.
This method overrides the default implementation from
`json.JSONEncoder`.
|
def load_state(self, key, delete=True):
with self.load_stream(key, binary=True) as f:
state = self._serializer.deserialize(f)
if delete:
self.rm(key)
return state
|
Load a state from storage.
:param key: key that identifies state
:rtype: manticore.core.StateBase
|
def tz_convert(dt, to_tz, from_tz=None) -> str:
logger = logs.get_logger(tz_convert, level='info')
f_tz, t_tz = get_tz(from_tz), get_tz(to_tz)
from_dt = pd.Timestamp(str(dt), tz=f_tz)
logger.debug(f'converting {str(from_dt)} from {f_tz} to {t_tz} ...')
return str(pd.Timestamp(str(from_dt), tz=t_tz))
|
Convert to tz
Args:
dt: date time
to_tz: to tz
from_tz: from tz - will be ignored if tz from dt is given
Returns:
str: date & time
Examples:
>>> dt_1 = pd.Timestamp('2018-09-10 16:00', tz='Asia/Hong_Kong')
>>> tz_convert(dt_1, to_tz='NY')
'2018-09-10 04:00:00-04:00'
>>> dt_2 = pd.Timestamp('2018-01-10 16:00')
>>> tz_convert(dt_2, to_tz='HK', from_tz='NY')
'2018-01-11 05:00:00+08:00'
>>> dt_3 = '2018-09-10 15:00'
>>> tz_convert(dt_3, to_tz='NY', from_tz='JP')
'2018-09-10 02:00:00-04:00'
|
def get_last_modified_timestamp(path, ignore=None):
ignore = ignore or []
if not isinstance(path, six.string_types):
return
ignore_str = ''
if ignore:
assert isinstance(ignore, (tuple, list))
ignore_str = ' '.join("! -name '%s'" % _ for _ in ignore)
cmd = 'find "'+path+'" ' + ignore_str + ' -type f -printf "%T@ %p\n" | sort -n | tail -1 | cut -f 1 -d " "'
ret = subprocess.check_output(cmd, shell=True)
try:
ret = round(float(ret), 2)
except ValueError:
return
return ret
|
Recursively finds the most recent timestamp in the given directory.
|
def get_stores(self, names=None, workspaces=None):
if isinstance(workspaces, Workspace):
workspaces = [workspaces]
elif isinstance(workspaces, list) and [w for w in workspaces if isinstance(w, Workspace)]:
pass
else:
workspaces = self.get_workspaces(names=workspaces)
stores = []
for ws in workspaces:
ds_list = self.get_xml(ws.datastore_url)
cs_list = self.get_xml(ws.coveragestore_url)
wms_list = self.get_xml(ws.wmsstore_url)
stores.extend([datastore_from_index(self, ws, n) for n in ds_list.findall("dataStore")])
stores.extend([coveragestore_from_index(self, ws, n) for n in cs_list.findall("coverageStore")])
stores.extend([wmsstore_from_index(self, ws, n) for n in wms_list.findall("wmsStore")])
if names is None:
names = []
elif isinstance(names, basestring):
names = [s.strip() for s in names.split(',') if s.strip()]
if stores and names:
return ([store for store in stores if store.name in names])
return stores
|
Returns a list of stores in the catalog. If workspaces is specified will only return stores in those workspaces.
If names is specified, will only return stores that match.
names can either be a comma delimited string or an array.
Will return an empty list if no stores are found.
|
def setup_actions(self):
self.actionOpen.triggered.connect(self.on_open)
self.actionNew.triggered.connect(self.on_new)
self.actionSave.triggered.connect(self.on_save)
self.actionSave_as.triggered.connect(self.on_save_as)
self.actionQuit.triggered.connect(
QtWidgets.QApplication.instance().quit)
self.tabWidget.current_changed.connect(self.on_current_tab_changed)
self.tabWidget.last_tab_closed.connect(self.on_last_tab_closed)
self.actionAbout.triggered.connect(self.on_about)
self.actionRun.triggered.connect(self.on_run)
self.interactiveConsole.process_finished.connect(
self.on_process_finished)
self.actionConfigure_run.triggered.connect(self.on_configure_run)
|
Connects slots to signals
|
def schedule(self, variables=None, secure_variables=None, materials=None,
return_new_instance=False, backoff_time=1.0):
scheduling_args = dict(
variables=variables,
secure_variables=secure_variables,
material_fingerprint=materials,
headers={"Confirm": True},
)
scheduling_args = dict((k, v) for k, v in scheduling_args.items() if v is not None)
if return_new_instance:
pipelines = self.history()['pipelines']
if len(pipelines) == 0:
last_run = None
else:
last_run = pipelines[0]['counter']
response = self._post('/schedule', ok_status=202, **scheduling_args)
if not response:
return response
max_tries = 10
while max_tries > 0:
current = self.instance()
if not last_run and current:
return current
elif last_run and current['counter'] > last_run:
return current
else:
time.sleep(backoff_time)
max_tries -= 1
return response
else:
return self._post('/schedule', ok_status=202, **scheduling_args)
|
Schedule a pipeline run
Aliased as :meth:`run`, :meth:`schedule`, and :meth:`trigger`.
Args:
variables (dict, optional): Variables to set/override
secure_variables (dict, optional): Secure variables to set/override
materials (dict, optional): Material revisions to be used for
this pipeline run. The exact format for this is a bit iffy,
have a look at the official
`Go pipeline scheduling documentation`__ or inspect a call
from triggering manually in the UI.
return_new_instance (bool): Returns a :meth:`history` compatible
response for the newly scheduled instance. This is primarily so
users easily can get the new instance number. **Note:** This is done
in a very naive way, it just checks that the instance number is
higher than before the pipeline was triggered.
backoff_time (float): How long between each check for
:arg:`return_new_instance`.
.. __: http://api.go.cd/current/#scheduling-pipelines
Returns:
Response: :class:`gocd.api.response.Response` object
|
def check(self):
if not self.is_valid:
raise PolyaxonDeploymentConfigError(
'Deployment type `{}` not supported'.format(self.deployment_type))
check = False
if self.is_kubernetes:
check = self.check_for_kubernetes()
elif self.is_docker_compose:
check = self.check_for_docker_compose()
elif self.is_docker:
check = self.check_for_docker()
elif self.is_heroku:
check = self.check_for_heroku()
if not check:
raise PolyaxonDeploymentConfigError(
'Deployment `{}` is not valid'.format(self.deployment_type))
|
Add platform specific checks
|
def window_kaiser(N, beta=8.6, method='numpy'):
r
if N == 1:
return ones(1)
if method == 'numpy':
from numpy import kaiser
return kaiser(N, beta)
else:
return _kaiser(N, beta)
|
r"""Kaiser window
:param N: window length
:param beta: kaiser parameter (default is 8.6)
To obtain a Kaiser window that designs an FIR filter with
sidelobe attenuation of :math:`\alpha` dB, use the following :math:`\beta` where
:math:`\beta = \pi \alpha`.
.. math::
w_n = \frac{I_0\left(\pi\alpha\sqrt{1-\left(\frac{2n}{M}-1\right)^2}\right)} {I_0(\pi \alpha)}
where
* :math:`I_0` is the zeroth order Modified Bessel function of the first kind.
* :math:`\alpha` is a real number that determines the shape of the
window. It determines the trade-off between main-lobe width and side
lobe level.
* the length of the sequence is N=M+1.
The Kaiser window can approximate many other windows by varying
the :math:`\beta` parameter:
===== ========================
beta Window shape
===== ========================
0 Rectangular
5 Similar to a Hamming
6 Similar to a Hanning
8.6 Similar to a Blackman
===== ========================
.. plot::
:width: 80%
:include-source:
from pylab import plot, legend, xlim
from spectrum import window_kaiser
N = 64
for beta in [1,2,4,8,16]:
plot(window_kaiser(N, beta), label='beta='+str(beta))
xlim(0,N)
legend()
.. plot::
:width: 80%
:include-source:
from spectrum import window_visu
window_visu(64, 'kaiser', beta=8.)
.. seealso:: numpy.kaiser, :func:`spectrum.window.create_window`
|
def remove_near_duplicate_relation(triples, threshold=0.97):
logging.debug("remove duplicate")
_assert_threshold(threshold)
duplicate_rel_counter = defaultdict(list)
relations = set()
for t in triples:
duplicate_rel_counter[t.relation].append(f"{t.head} {t.tail}")
relations.add(t.relation)
relations = list(relations)
num_triples = len(triples)
removal_relation_set = set()
for rel, values in duplicate_rel_counter.items():
duplicate_rel_counter[rel] = Superminhash(values)
for i in relations:
for j in relations:
if i == j or i in removal_relation_set or j in removal_relation_set: continue
close_relations = [i]
if _set_close_to(duplicate_rel_counter[i], duplicate_rel_counter[j], threshold):
close_relations.append(j)
if len(close_relations) > 1:
close_relations.pop(np.random.randint(len(close_relations)))
removal_relation_set |= set(close_relations)
logging.info("Removing {} relations: {}".format(len(removal_relation_set), str(removal_relation_set)))
return list(filterfalse(lambda x: x.relation in removal_relation_set, triples))
|
If entity pairs in a relation is as close as another relations, only keep one relation of such set.
|
def rotate(self, angle, center=None):
args = [angle]
if center is not None:
args.extend(center)
self.poly.rotate(*args)
return self
|
Rotate the shape, in-place.
Parameters
----------
angle : float
Angle to rotate, in radians counter-clockwise.
center : array-like, optional
Point about which to rotate.
If not passed, the center of the shape will be used.
|
def nconflicts(self, var, val, assignment):
"Return the number of conflicts var=val has with other variables."
def conflict(var2):
return (var2 in assignment
and not self.constraints(var, val, var2, assignment[var2]))
return count_if(conflict, self.neighbors[var])
|
Return the number of conflicts var=val has with other variables.
|
def parse_netchop(netchop_output):
line_iterator = iter(netchop_output.decode().split("\n"))
scores = []
for line in line_iterator:
if "pos" in line and 'AA' in line and 'score' in line:
scores.append([])
if "----" not in next(line_iterator):
raise ValueError("Dashes expected")
line = next(line_iterator)
while '-------' not in line:
score = float(line.split()[3])
scores[-1].append(score)
line = next(line_iterator)
return scores
|
Parse netChop stdout.
|
def tracemessage(self, maxlen=6):
result = ""
for i, value in enumerate(self):
result += "{0}: {1}\n".format(i, get_node_repr(value))
result = result.strip("\n")
lines = result.split("\n")
if maxlen and len(lines) > maxlen:
i = int(maxlen / 2)
lines = lines[:i] + ["..."] + lines[-(maxlen - i) :]
result = "\n".join(lines)
return result
|
if maxlen > 0, the message is shortened to maxlen traces.
|
def parse_int(s):
try:
val = int(s)
except ValueError:
print_err('\nInvalid integer: {}'.format(s))
sys.exit(1)
return val
|
Parse a string as an integer.
Exit with a message on failure.
|
def get_loadbalancer(self, datacenter_id, loadbalancer_id):
response = self._perform_request(
'/datacenters/%s/loadbalancers/%s' % (
datacenter_id, loadbalancer_id))
return response
|
Retrieves a single load balancer by ID.
:param datacenter_id: The unique ID of the data center.
:type datacenter_id: ``str``
:param loadbalancer_id: The unique ID of the load balancer.
:type loadbalancer_id: ``str``
|
def make_table(self,fromlines,tolines,fromdesc='',todesc='',context=False,
numlines=5):
self._make_prefix()
fromlines,tolines = self._tab_newline_replace(fromlines,tolines)
if context:
context_lines = numlines
else:
context_lines = None
diffs = _mdiff(fromlines,tolines,context_lines,linejunk=self._linejunk,
charjunk=self._charjunk)
if self._wrapcolumn:
diffs = self._line_wrapper(diffs)
fromlist,tolist,flaglist = self._collect_lines(diffs)
fromlist,tolist,flaglist,next_href,next_id = self._convert_flags(
fromlist,tolist,flaglist,context,numlines)
s = []
fmt = ' <tr><td class="diff_next"%s>%s</td>%s' + \
'<td class="diff_next">%s</td>%s</tr>\n'
for i in range(len(flaglist)):
if flaglist[i] is None:
if i > 0:
s.append(' </tbody> \n <tbody>\n')
else:
s.append( fmt % (next_id[i],next_href[i],fromlist[i],
next_href[i],tolist[i]))
if fromdesc or todesc:
header_row = '<thead><tr>%s%s%s%s</tr></thead>' % (
'<th class="diff_next"><br /></th>',
'<th colspan="2" class="diff_header">%s</th>' % fromdesc,
'<th class="diff_next"><br /></th>',
'<th colspan="2" class="diff_header">%s</th>' % todesc)
else:
header_row = ''
table = self._table_template % dict(
data_rows=''.join(s),
header_row=header_row,
prefix=self._prefix[1])
return table.replace('\0+','<span class="diff_add">'). \
replace('\0-','<span class="diff_sub">'). \
replace('\0^','<span class="diff_chg">'). \
replace('\1','</span>'). \
replace('\t',' ')
|
Returns HTML table of side by side comparison with change highlights
Arguments:
fromlines -- list of "from" lines
tolines -- list of "to" lines
fromdesc -- "from" file column header string
todesc -- "to" file column header string
context -- set to True for contextual differences (defaults to False
which shows full differences).
numlines -- number of context lines. When context is set True,
controls number of lines displayed before and after the change.
When context is False, controls the number of lines to place
the "next" link anchors before the next change (so click of
"next" link jumps to just before the change).
|
def _parse_stat(stat_output):
lines = stat_output.split('\n')
stat_dict = {}
for line in lines:
split_line = line.split(':')
if len(split_line) == 2:
key = split_line[0]
val = split_line[1].strip(' ')
try:
val = float(val)
except ValueError:
val = None
stat_dict[key] = val
return stat_dict
|
Parse the string output from sox's stat function
Parameters
----------
stat_output : str
Sox output from stderr.
Returns
-------
stat_dictionary : dict
Dictionary of audio statistics.
|
def teardown(self):
self.shut_down_instance(self.instances)
self.instances = []
try:
self.client.delete_internet_gateway(InternetGatewayId=self.internet_gateway)
self.internet_gateway = None
self.client.delete_route_table(RouteTableId=self.route_table)
self.route_table = None
for subnet in list(self.sn_ids):
self.client.delete_subnet(SubnetId=subnet)
self.sn_ids.remove(subnet)
self.client.delete_security_group(GroupId=self.sg_id)
self.sg_id = None
self.client.delete_vpc(VpcId=self.vpc_id)
self.vpc_id = None
except Exception as e:
logger.error("{}".format(e))
raise e
self.show_summary()
os.remove(self.config['state_file_path'])
|
Teardown the EC2 infastructure.
Terminate all EC2 instances, delete all subnets, delete security group, delete VPC,
and reset all instance variables.
|
def aN(a, dim=3, dtype='int'):
if not hasattr(a, '__iter__'):
return np.array([a]*dim, dtype=dtype)
return np.array(a).astype(dtype)
|
Convert an integer or iterable list to numpy array of length dim. This func
is used to allow other methods to take both scalars non-numpy arrays with
flexibility.
Parameters
----------
a : number, iterable, array-like
The object to convert to numpy array
dim : integer
The length of the resulting array
dtype : string or np.dtype
Type which the resulting array should be, e.g. 'float', np.int8
Returns
-------
arr : numpy array
Resulting numpy array of length ``dim`` and type ``dtype``
Examples
--------
>>> aN(1, dim=2, dtype='float')
array([ 1., 1.])
>>> aN(1, dtype='int')
array([1, 1, 1])
>>> aN(np.array([1,2,3]), dtype='float')
array([ 1., 2., 3.])
|
def recv(self, socket_, encoding=None):
unpacker = msgpack.Unpacker(encoding=encoding)
response = socket_.recv(8)
if response == b"":
raise TensorForceError("No data received by socket.recv in call to method `recv` " +
"(listener possibly closed)!")
orig_len = int(response)
received_len = 0
while True:
data = socket_.recv(min(orig_len - received_len, self.max_msg_len))
if not data:
raise TensorForceError("No data of len {} received by socket.recv in call to method `recv`!".
format(orig_len - received_len))
data_len = len(data)
received_len += data_len
unpacker.feed(data)
if received_len == orig_len:
break
for message in unpacker:
sts = message.get("status", message.get(b"status"))
if sts:
if sts == "ok" or sts == b"ok":
return message
else:
raise TensorForceError("RemoteEnvironment server error: {}".
format(message.get("message", "not specified")))
else:
raise TensorForceError("Message without field 'status' received!")
raise TensorForceError("No message encoded in data stream (data stream had len={})".
format(orig_len))
|
Receives a message as msgpack-numpy encoded byte-string from the given socket object.
Blocks until something was received.
Args:
socket_: The python socket object to use.
encoding (str): The encoding to use for unpacking messages from the socket.
Returns: The decoded (as dict) message received.
|
def load(path_or_file, validate=True, strict=True, fmt='auto'):
r
with _open(path_or_file, mode='r', fmt=fmt) as fdesc:
jam = JAMS(**json.load(fdesc))
if validate:
jam.validate(strict=strict)
return jam
|
r"""Load a JAMS Annotation from a file.
Parameters
----------
path_or_file : str or file-like
Path to the JAMS file to load
OR
An open file handle to load from.
validate : bool
Attempt to validate the JAMS object
strict : bool
if `validate == True`, enforce strict schema validation
fmt : str ['auto', 'jams', 'jamz']
The encoding format of the input
If `auto`, encoding is inferred from the file name.
If the input is an open file handle, `jams` encoding
is used.
Returns
-------
jam : JAMS
The loaded JAMS object
Raises
------
SchemaError
if `validate == True`, `strict==True`, and validation fails
See also
--------
JAMS.validate
JAMS.save
Examples
--------
>>> # Load a jams object from a file name
>>> J = jams.load('data.jams')
>>> # Or from an open file descriptor
>>> with open('data.jams', 'r') as fdesc:
... J = jams.load(fdesc)
>>> # Non-strict validation
>>> J = jams.load('data.jams', strict=False)
>>> # No validation at all
>>> J = jams.load('data.jams', validate=False)
|
def create_distributions(self):
distributions = dict()
for name in sorted(self.actions_spec):
action = self.actions_spec[name]
if self.distributions_spec is not None and name in self.distributions_spec:
kwargs = dict(action)
kwargs['scope'] = name
kwargs['summary_labels'] = self.summary_labels
distributions[name] = Distribution.from_spec(
spec=self.distributions_spec[name],
kwargs=kwargs
)
elif action['type'] == 'bool':
distributions[name] = Bernoulli(
shape=action['shape'],
scope=name,
summary_labels=self.summary_labels
)
elif action['type'] == 'int':
distributions[name] = Categorical(
shape=action['shape'],
num_actions=action['num_actions'],
scope=name,
summary_labels=self.summary_labels
)
elif action['type'] == 'float':
if 'min_value' in action:
distributions[name] = Beta(
shape=action['shape'],
min_value=action['min_value'],
max_value=action['max_value'],
scope=name,
summary_labels=self.summary_labels
)
else:
distributions[name] = Gaussian(
shape=action['shape'],
scope=name,
summary_labels=self.summary_labels
)
return distributions
|
Creates and returns the Distribution objects based on self.distributions_spec.
Returns: Dict of distributions according to self.distributions_spec.
|
def SetClipboardText(text: str) -> bool:
if ctypes.windll.user32.OpenClipboard(0):
ctypes.windll.user32.EmptyClipboard()
textByteLen = (len(text) + 1) * 2
hClipboardData = ctypes.windll.kernel32.GlobalAlloc(0, textByteLen)
hDestText = ctypes.windll.kernel32.GlobalLock(hClipboardData)
ctypes.cdll.msvcrt.wcsncpy(ctypes.c_wchar_p(hDestText), ctypes.c_wchar_p(text), textByteLen // 2)
ctypes.windll.kernel32.GlobalUnlock(hClipboardData)
ctypes.windll.user32.SetClipboardData(13, hClipboardData)
ctypes.windll.user32.CloseClipboard()
return True
return False
|
Return bool, True if succeed otherwise False.
|
def sasl_mechanism(name, secure, preference = 50):
def decorator(klass):
klass._pyxmpp_sasl_secure = secure
klass._pyxmpp_sasl_preference = preference
if issubclass(klass, ClientAuthenticator):
_register_client_authenticator(klass, name)
elif issubclass(klass, ServerAuthenticator):
_register_server_authenticator(klass, name)
else:
raise TypeError("Not a ClientAuthenticator"
" or ServerAuthenticator class")
return klass
return decorator
|
Class decorator generator for `ClientAuthenticator` or
`ServerAuthenticator` subclasses. Adds the class to the pyxmpp.sasl
mechanism registry.
:Parameters:
- `name`: SASL mechanism name
- `secure`: if the mechanims can be considered secure - `True`
if it can be used over plain-text channel
- `preference`: mechanism preference level (the higher the better)
:Types:
- `name`: `unicode`
- `secure`: `bool`
- `preference`: `int`
|
def set_callbacks(self, **kwargs):
for name in self.SUPPORTED_CALLBACKS:
func = kwargs.get(name, getattr(self, name))
setattr(self, name, func)
|
Set callbacks for input events
|
def execute(self, args, kwargs):
return self.lookup_explicit(args, kwargs)(*args, **kwargs)
|
Dispatch a call. Call the first function whose type signature matches
the arguemts.
|
def grid_evaluation(X, Y, f,vectorized=True):
XX = np.reshape(np.concatenate([X[..., None], Y[..., None]], axis=2), (X.size, 2), order='C')
if vectorized:
ZZ = f(XX)
else:
ZZ = np.array([f(x) for x in XX])
return np.reshape(ZZ, X.shape, order='C')
|
Evaluate function on given grid and return values in grid format
Assume X and Y are 2-dimensional arrays containing x and y coordinates,
respectively, of a two-dimensional grid, and f is a function that takes
1-d arrays with two entries. This function evaluates f on the grid points
described by X and Y and returns another 2-dimensional array of the shape
of X and Y that contains the values of f.
:param X: 2-dimensional array of x-coordinates
:param Y: 2-dimensional array of y-coordinates
:param f: function to be evaluated on grid
:param vectorized: `f` can handle arrays of inputs
:return: 2-dimensional array of values of f
|
def addattachments(message, template_path):
if 'attachment' not in message:
return message, 0
message = make_message_multipart(message)
attachment_filepaths = message.get_all('attachment', failobj=[])
template_parent_dir = os.path.dirname(template_path)
for attachment_filepath in attachment_filepaths:
attachment_filepath = os.path.expanduser(attachment_filepath.strip())
if not attachment_filepath:
continue
if not os.path.isabs(attachment_filepath):
attachment_filepath = os.path.join(template_parent_dir,
attachment_filepath)
normalized_path = os.path.abspath(attachment_filepath)
if not os.path.exists(normalized_path):
print("Error: can't find attachment " + normalized_path)
sys.exit(1)
filename = os.path.basename(normalized_path)
with open(normalized_path, "rb") as attachment:
part = email.mime.application.MIMEApplication(attachment.read(),
Name=filename)
part.add_header('Content-Disposition',
'attachment; filename="{}"'.format(filename))
message.attach(part)
print(">>> attached {}".format(normalized_path))
del message['attachment']
return message, len(attachment_filepaths)
|
Add the attachments from the message from the commandline options.
|
def rehome(old, new, struct):
if old == new:
return
if isinstance(struct, list):
for item in struct:
rehome(old, new, item)
elif isinstance(struct, dict):
for key, val in struct.iteritems():
if isinstance(val, (dict, list)):
rehome(old, new, val)
elif "conf" in key:
continue
elif "orig" in key:
continue
elif "root" in key or "path" in key:
struct[key] = struct[key].replace(old, new)
|
Replace all absolute paths to "re-home" it
|
def _convert_pagenum(self, kwargs):
for key in ('next', 'previous'):
if not kwargs.get(key):
continue
match = re.search(r'page=(?P<num>[\d]+)', kwargs[key])
if match is None and key == 'previous':
kwargs[key] = 1
continue
kwargs[key] = int(match.groupdict()['num'])
|
Convert next and previous from URLs to integers
|
def update(self, file_id, data):
self.file_id = file_id
if 'name' not in data:
raise KeyError('The file must have a name')
if 'file_data' not in data:
raise KeyError('The file must have file_data')
return self._mc_client._patch(url=self._build_path(file_id), data=data)
|
Update a file in the File Manager.
:param file_id: The unique id for the File Manager file.
:type file_id: :py:class:`str`
:param data: The request body parameters
:type data: :py:class:`dict`
data = {
"name": string*,
"file_data": string*
}
|
def extend(self, iterable):
if not hasattr(self, "_dict") or self._dict is None:
self._dict = {}
_dict = self._dict
current_length = len(self)
list.extend(self, iterable)
for i, obj in enumerate(islice(self, current_length, None),
current_length):
the_id = obj.id
if the_id not in _dict:
_dict[the_id] = i
else:
self = self[:current_length]
self._check(the_id)
raise ValueError("id '%s' at index %d is non-unique. "
"Is it present twice?" % (str(the_id), i))
|
extend list by appending elements from the iterable
|
def _tf_repeat(self, a, repeats):
if len(a.get_shape()) != 1:
raise AssertionError("This is not a 1D Tensor")
a = tf.expand_dims(a, -1)
a = tf.tile(a, [1, repeats])
a = self.tf_flatten(a)
return a
|
Tensorflow version of np.repeat for 1D
|
def map(self, features=None, query=None, styles=None,
bbox=[-180,-90,180,90], zoom=10, center=None,
image=None, image_bounds=None, cmap='viridis',
api_key=os.environ.get('MAPBOX_API_KEY', None), **kwargs):
try:
from IPython.display import display
except:
print("IPython is required to produce maps.")
return
assert api_key is not None, "No Mapbox API Key found. You can either pass in a key or set the MAPBOX_API_KEY environment variable. Use outside of GBDX Notebooks requires a MapBox API key, sign up for free at https://www.mapbox.com/pricing/"
if features is None and query is not None:
wkt = box(*bbox).wkt
features = self.query(wkt, query, index=None)
elif features is None and query is None and image is None:
print('Must provide either a list of features or a query or an image')
return
if styles is not None and not isinstance(styles, list):
styles = [styles]
geojson = {"type":"FeatureCollection", "features": features}
if center is None and features is not None:
union = cascaded_union([shape(f['geometry']) for f in features])
lon, lat = union.centroid.coords[0]
elif center is None and image is not None:
try:
lon, lat = shape(image).centroid.coords[0]
except:
lon, lat = box(*image_bounds).centroid.coords[0]
else:
lat, lon = center
map_id = "map_{}".format(str(int(time.time())))
map_data = VectorGeojsonLayer(geojson, styles=styles, **kwargs)
image_layer = self._build_image_layer(image, image_bounds, cmap)
template = BaseTemplate(map_id, **{
"lat": lat,
"lon": lon,
"zoom": zoom,
"datasource": json.dumps(map_data.datasource),
"layers": json.dumps(map_data.layers),
"image_layer": image_layer,
"mbkey": api_key,
"token": 'dummy'
})
template.inject()
|
Renders a mapbox gl map from a vector service query or a list of geojson features
Args:
features (list): a list of geojson features
query (str): a VectorServices query
styles (list): a list of VectorStyles to apply to the features
bbox (list): a bounding box to query for features ([minx, miny, maxx, maxy])
zoom (int): the initial zoom level of the map
center (list): a list of [lat, lon] used to center the map
api_key (str): a valid Mapbox API key
image (dict): a CatalogImage or a ndarray
image_bounds (list): a list of bounds for image positioning
Use outside of GBDX Notebooks requires a MapBox API key, sign up for free at https://www.mapbox.com/pricing/
Pass the key using the `api_key` keyword or set an environmental variable called `MAPBOX API KEY`
cmap (str): MatPlotLib colormap to use for rendering single band images (default: viridis)
|
def load_noise_map(noise_map_path, noise_map_hdu, pixel_scale, image, background_noise_map, exposure_time_map,
convert_noise_map_from_weight_map, convert_noise_map_from_inverse_noise_map,
noise_map_from_image_and_background_noise_map, convert_from_electrons, gain, convert_from_adus):
noise_map_options = sum([convert_noise_map_from_weight_map,
convert_noise_map_from_inverse_noise_map,
noise_map_from_image_and_background_noise_map])
if noise_map_options > 1:
raise exc.DataException('You have specified more than one method to load the noise_map map, e.g.:'
'convert_noise_map_from_weight_map | '
'convert_noise_map_from_inverse_noise_map |'
'noise_map_from_image_and_background_noise_map')
if noise_map_options == 0 and noise_map_path is not None:
return NoiseMap.from_fits_with_pixel_scale(file_path=noise_map_path, hdu=noise_map_hdu, pixel_scale=pixel_scale)
elif convert_noise_map_from_weight_map and noise_map_path is not None:
weight_map = Array.from_fits(file_path=noise_map_path, hdu=noise_map_hdu)
return NoiseMap.from_weight_map(weight_map=weight_map, pixel_scale=pixel_scale)
elif convert_noise_map_from_inverse_noise_map and noise_map_path is not None:
inverse_noise_map = Array.from_fits(file_path=noise_map_path, hdu=noise_map_hdu)
return NoiseMap.from_inverse_noise_map(inverse_noise_map=inverse_noise_map, pixel_scale=pixel_scale)
elif noise_map_from_image_and_background_noise_map:
if background_noise_map is None:
raise exc.DataException('Cannot compute the noise-map from the image and background noise_map map if a '
'background noise_map map is not supplied.')
if not (convert_from_electrons or convert_from_adus) and exposure_time_map is None:
raise exc.DataException('Cannot compute the noise-map from the image and background noise_map map if an '
'exposure-time (or exposure time map) is not supplied to convert to adus')
if convert_from_adus and gain is None:
raise exc.DataException('Cannot compute the noise-map from the image and background noise_map map if a'
'gain is not supplied to convert from adus')
return NoiseMap.from_image_and_background_noise_map(pixel_scale=pixel_scale, image=image,
background_noise_map=background_noise_map,
exposure_time_map=exposure_time_map,
convert_from_electrons=convert_from_electrons,
gain=gain, convert_from_adus=convert_from_adus)
else:
raise exc.DataException(
'A noise_map map was not loaded, specify a noise_map_path or option to compute a noise_map map.')
|
Factory for loading the noise-map from a .fits file.
This factory also includes a number of routines for converting the noise-map from from other units (e.g. \
a weight map) or computing the noise-map from other unblurred_image_1d (e.g. the ccd image and background noise-map).
Parameters
----------
noise_map_path : str
The path to the noise_map .fits file containing the noise_map (e.g. '/path/to/noise_map.fits')
noise_map_hdu : int
The hdu the noise_map is contained in the .fits file specified by *noise_map_path*.
pixel_scale : float
The size of each pixel in arc seconds.
image : ndarray
The image-image, which the noise-map can be calculated using.
background_noise_map : ndarray
The background noise-map, which the noise-map can be calculated using.
exposure_time_map : ndarray
The exposure-time map, which the noise-map can be calculated using.
convert_noise_map_from_weight_map : bool
If True, the noise-map loaded from the .fits file is converted from a weight-map to a noise-map (see \
*NoiseMap.from_weight_map).
convert_noise_map_from_inverse_noise_map : bool
If True, the noise-map loaded from the .fits file is converted from an inverse noise-map to a noise-map (see \
*NoiseMap.from_inverse_noise_map).
background_noise_map_path : str
The path and filename of the .fits image containing the background noise-map.
background_noise_map_hdu : int
The hdu the background noise-map is contained in the .fits file that *background_noise_map_path* points too.
convert_background_noise_map_from_weight_map : bool
If True, the bacground noise-map loaded from the .fits file is converted from a weight-map to a noise-map (see \
*NoiseMap.from_weight_map).
convert_background_noise_map_from_inverse_noise_map : bool
If True, the background noise-map loaded from the .fits file is converted from an inverse noise-map to a \
noise-map (see *NoiseMap.from_inverse_noise_map).
noise_map_from_image_and_background_noise_map : bool
If True, the noise-map is computed from the observed image and background noise-map \
(see NoiseMap.from_image_and_background_noise_map).
convert_from_electrons : bool
If True, the input unblurred_image_1d are in units of electrons and all converted to electrons / second using the exposure \
time map.
gain : float
The image gain, used for convert from ADUs.
convert_from_adus : bool
If True, the input unblurred_image_1d are in units of adus and all converted to electrons / second using the exposure \
time map and gain.
|
def save_npz_dict(save_list=None, name='model.npz', sess=None):
if sess is None:
raise ValueError("session is None.")
if save_list is None:
save_list = []
save_list_names = [tensor.name for tensor in save_list]
save_list_var = sess.run(save_list)
save_var_dict = {save_list_names[idx]: val for idx, val in enumerate(save_list_var)}
np.savez(name, **save_var_dict)
save_list_var = None
save_var_dict = None
del save_list_var
del save_var_dict
logging.info("[*] Model saved in npz_dict %s" % name)
|
Input parameters and the file name, save parameters as a dictionary into .npz file.
Use ``tl.files.load_and_assign_npz_dict()`` to restore.
Parameters
----------
save_list : list of parameters
A list of parameters (tensor) to be saved.
name : str
The name of the `.npz` file.
sess : Session
TensorFlow Session.
|
def ParseNolintSuppressions(filename, raw_line, linenum, error):
matched = Search(r'\bNOLINT(NEXTLINE)?\b(\([^)]+\))?', raw_line)
if matched:
if matched.group(1):
suppressed_line = linenum + 1
else:
suppressed_line = linenum
category = matched.group(2)
if category in (None, '(*)'):
_error_suppressions.setdefault(None, set()).add(suppressed_line)
else:
if category.startswith('(') and category.endswith(')'):
category = category[1:-1]
if category in _ERROR_CATEGORIES:
_error_suppressions.setdefault(category, set()).add(suppressed_line)
elif category not in _LEGACY_ERROR_CATEGORIES:
error(filename, linenum, 'readability/nolint', 5,
'Unknown NOLINT error category: %s' % category)
|
Updates the global list of line error-suppressions.
Parses any NOLINT comments on the current line, updating the global
error_suppressions store. Reports an error if the NOLINT comment
was malformed.
Args:
filename: str, the name of the input file.
raw_line: str, the line of input text, with comments.
linenum: int, the number of the current line.
error: function, an error handler.
|
def dumps(obj,
key=None,
salt='django.core.signing',
serializer=JSONSerializer,
compress=False):
data = serializer().dumps(obj)
is_compressed = False
if compress:
compressed = zlib.compress(data)
if len(compressed) < (len(data) - 1):
data = compressed
is_compressed = True
base64d = b64_encode(data)
if is_compressed:
base64d = b'.' + base64d
return TimestampSigner(key, salt=salt).sign(base64d)
|
Returns URL-safe, sha1 signed base64 compressed JSON string. If key is
None, settings.SECRET_KEY is used instead.
If compress is True (not the default) checks if compressing using zlib can
save some space. Prepends a '.' to signify compression. This is included
in the signature, to protect against zip bombs.
Salt can be used to namespace the hash, so that a signed string is
only valid for a given namespace. Leaving this at the default
value or re-using a salt value across different parts of your
application without good cause is a security risk.
The serializer is expected to return a bytestring.
|
def get_required_query_params(self, request):
username = get_request_value(request, self.REQUIRED_PARAM_USERNAME, '')
course_id = get_request_value(request, self.REQUIRED_PARAM_COURSE_ID, '')
program_uuid = get_request_value(request, self.REQUIRED_PARAM_PROGRAM_UUID, '')
enterprise_customer_uuid = get_request_value(request, self.REQUIRED_PARAM_ENTERPRISE_CUSTOMER)
if not (username and (course_id or program_uuid) and enterprise_customer_uuid):
raise ConsentAPIRequestError(
self.get_missing_params_message([
("'username'", bool(username)),
("'enterprise_customer_uuid'", bool(enterprise_customer_uuid)),
("one of 'course_id' or 'program_uuid'", bool(course_id or program_uuid)),
])
)
return username, course_id, program_uuid, enterprise_customer_uuid
|
Gets ``username``, ``course_id``, and ``enterprise_customer_uuid``,
which are the relevant query parameters for this API endpoint.
:param request: The request to this endpoint.
:return: The ``username``, ``course_id``, and ``enterprise_customer_uuid`` from the request.
|
def remove_legend(ax=None):
from pylab import gca, draw
if ax is None:
ax = gca()
ax.legend_ = None
draw()
|
Remove legend for axes or gca.
See http://osdir.com/ml/python.matplotlib.general/2005-07/msg00285.html
|
def MSTORE8(self, address, value):
if istainted(self.pc):
for taint in get_taints(self.pc):
value = taint_with(value, taint)
self._allocate(address, 1)
self._store(address, Operators.EXTRACT(value, 0, 8), 1)
|
Save byte to memory
|
def with_prefix(self, prefix, strict=False):
def decorated(func):
return EventHandler(func=func, event=self.event,
prefix=prefix, strict=strict)
return decorated
|
decorator to handle commands with prefixes
Parameters
----------
prefix : str
the prefix of the command
strict : bool, optional
If set to True the command must be at the beginning
of the message. Defaults to False.
Returns
-------
function
a decorator that returns an :class:`EventHandler` instance
|
def GetLineWidth(line):
if isinstance(line, unicode):
width = 0
for uc in unicodedata.normalize('NFC', line):
if unicodedata.east_asian_width(uc) in ('W', 'F'):
width += 2
elif not unicodedata.combining(uc):
width += 1
return width
else:
return len(line)
|
Determines the width of the line in column positions.
Args:
line: A string, which may be a Unicode string.
Returns:
The width of the line in column positions, accounting for Unicode
combining characters and wide characters.
|
def get(self, pk=None, **kwargs):
self._separate(kwargs)
return super(Resource, self).get(pk=pk, **kwargs)
|
Return one and exactly one notification template.
Note here configuration-related fields like
'notification_configuration' and 'channels' will not be
used even provided.
Lookups may be through a primary key, specified as a positional
argument, and/or through filters specified through keyword arguments.
If the number of results does not equal one, raise an exception.
=====API DOCS=====
Retrieve one and exactly one object.
:param pk: Primary key of the resource to be read. Tower CLI will only attempt to read *that* object
if ``pk`` is provided (not ``None``).
:type pk: int
:param `**kwargs`: Keyword arguments used to look up resource object to retrieve if ``pk`` is not provided.
:returns: loaded JSON of the retrieved resource object.
:rtype: dict
=====API DOCS=====
|
def _self_referential_fk(klass_model):
for f in klass_model._meta.concrete_fields:
if f.related_model:
if issubclass(klass_model, f.related_model):
return f.attname
return None
|
Return whether this model has a self ref FK, and the name for the field
|
def search_item_by_name(self, name, token=None):
parameters = dict()
parameters['name'] = name
if token:
parameters['token'] = token
response = self.request('midas.item.searchbyname', parameters)
return response['items']
|
Return all items.
:param name: The name of the item to search by.
:type name: string
:param token: (optional) A valid token for the user in question.
:type token: None | string
:returns: A list of all items with the given name.
:rtype: list[dict]
|
def collect_files(self):
self.files = []
for bundle in self.bundles:
bundle.init_build(self, self.builder)
bundle_files = bundle.prepare()
self.files.extend(bundle_files)
return self
|
Return collected files links
:rtype: list[static_bundle.files.StaticFileResult]
|
def get_ties(G):
ties = []
dep_dict = {}
for node in G.nodes(data=True):
if 'dependencies' in node[1]:
for item in node[1]['dependencies']:
if item not in dep_dict:
dep_dict[item] = []
dep_dict[item].append(node[0])
for item in dep_dict:
if len(list(set(dep_dict[item]))) > 1:
ties.append(list(set(dep_dict[item])))
return ties
|
If you specify a target that shares a dependency with another target,
both targets need to be updated. This is because running one will resolve
the sha mismatch and sake will think that the other one doesn't have to
run. This is called a "tie". This function will find such ties.
|
def write_temporal_networks_by_route_type(gtfs, extract_output_dir):
util.makedirs(extract_output_dir)
for route_type in route_types.TRANSIT_ROUTE_TYPES:
pandas_data_frame = temporal_network(gtfs, start_time_ut=None, end_time_ut=None, route_type=route_type)
tag = route_types.ROUTE_TYPE_TO_LOWERCASE_TAG[route_type]
out_file_name = os.path.join(extract_output_dir, tag + ".tnet")
pandas_data_frame.to_csv(out_file_name, encoding='utf-8', index=False)
|
Write temporal networks by route type to disk.
Parameters
----------
gtfs: gtfspy.GTFS
extract_output_dir: str
|
def recv_task_request_from_workers(self):
info = MPI.Status()
comm.recv(source=MPI.ANY_SOURCE, tag=TASK_REQUEST_TAG, status=info)
worker_rank = info.Get_source()
logger.info("Received task request from worker:{}".format(worker_rank))
return worker_rank
|
Receives 1 task request from MPI comm
Returns:
--------
worker_rank: worker_rank id
|
def calculate(self, T, P, zs, ws, method):
r
if method == SIMPLE:
Cpsms = [i(T) for i in self.HeatCapacitySolids]
return mixing_simple(zs, Cpsms)
else:
raise Exception('Method not valid')
|
r'''Method to calculate heat capacity of a solid mixture at
temperature `T`, pressure `P`, mole fractions `zs` and weight fractions
`ws` with a given method.
This method has no exception handling; see `mixture_property`
for that.
Parameters
----------
T : float
Temperature at which to calculate the property, [K]
P : float
Pressure at which to calculate the property, [Pa]
zs : list[float]
Mole fractions of all species in the mixture, [-]
ws : list[float]
Weight fractions of all species in the mixture, [-]
method : str
Name of the method to use
Returns
-------
Cpsm : float
Molar heat capacity of the solid mixture at the given conditions, [J/mol]
|
def _restore_buffers(obj, buffers):
if isinstance(obj, CannedObject) and obj.buffers:
for i, buf in enumerate(obj.buffers):
if buf is None:
obj.buffers[i] = buffers.pop(0)
|
Restore extracted buffers.
|
def getBookmark(self):
if self._write and self._recordCount==0:
return None
rowDict = dict(filepath=os.path.realpath(self._filename),
currentRow=self._recordCount)
return json.dumps(rowDict)
|
Gets a bookmark or anchor to the current position.
:returns: an anchor to the current position in the data. Passing this
anchor to a constructor makes the current position to be the first
returned record.
|
def _ExpandDirectories(filenames):
expanded = set()
for filename in filenames:
if not os.path.isdir(filename):
expanded.add(filename)
continue
for root, _, files in os.walk(filename):
for loopfile in files:
fullname = os.path.join(root, loopfile)
if fullname.startswith('.' + os.path.sep):
fullname = fullname[len('.' + os.path.sep):]
expanded.add(fullname)
filtered = []
for filename in expanded:
if os.path.splitext(filename)[1][1:] in GetAllExtensions():
filtered.append(filename)
return filtered
|
Searches a list of filenames and replaces directories in the list with
all files descending from those directories. Files with extensions not in
the valid extensions list are excluded.
Args:
filenames: A list of files or directories
Returns:
A list of all files that are members of filenames or descended from a
directory in filenames
|
def maximum_deck_area(self, water_plane_coef=0.88):
AD = self.beam * self.length * water_plane_coef
return AD
|
Return the maximum deck area of the ship
:param water_plane_coef: optional water plane coefficient
:return: Area of the deck
|
def sav_to_pandas_rpy2(input_file):
import pandas.rpy.common as com
w = com.robj.r('foreign::read.spss("%s", to.data.frame=TRUE)' % input_file)
return com.convert_robj(w)
|
SPSS .sav files to Pandas DataFrame through Rpy2
:param input_file: string
:return:
|
def kiss_metrics(parser, token):
bits = token.split_contents()
if len(bits) > 1:
raise TemplateSyntaxError("'%s' takes no arguments" % bits[0])
return KissMetricsNode()
|
KISSinsights tracking template tag.
Renders Javascript code to track page visits. You must supply
your KISSmetrics API key in the ``KISS_METRICS_API_KEY``
setting.
|
def convergent_round(value, ndigits=0):
if sys.version_info[0] < 3:
if value < 0.0:
return -convergent_round(-value)
epsilon = 0.0000001
integral_part, _ = divmod(value, 1)
if abs(value - (integral_part + 0.5)) < epsilon:
if integral_part % 2.0 < epsilon:
return integral_part
else:
nearest_even = integral_part + 0.5
return math.ceil(nearest_even)
return round(value, ndigits)
|
Convergent rounding.
Round to neareas even, similar to Python3's round() method.
|
async def listen(self):
while not self._shutdown:
try:
data = json.loads(await self._ws.recv())
except websockets.ConnectionClosed as error:
log.warning('Disconnected from Lavalink: {}'.format(str(error)))
for g in self._lavalink.players._players.copy().keys():
ws = self._lavalink.bot._connection._get_websocket(int(g))
await ws.voice_state(int(g), None)
self._lavalink.players.clear()
if self._shutdown:
break
if await self._attempt_reconnect():
return
log.warning('Unable to reconnect to Lavalink!')
break
op = data.get('op', None)
log.debug('Received WebSocket data {}'.format(str(data)))
if not op:
return log.debug('Received WebSocket message without op {}'.format(str(data)))
if op == 'event':
log.debug('Received event of type {}'.format(data['type']))
player = self._lavalink.players[int(data['guildId'])]
event = None
if data['type'] == 'TrackEndEvent':
event = TrackEndEvent(player, data['track'], data['reason'])
elif data['type'] == 'TrackExceptionEvent':
event = TrackExceptionEvent(player, data['track'], data['error'])
elif data['type'] == 'TrackStuckEvent':
event = TrackStuckEvent(player, data['track'], data['thresholdMs'])
if event:
await self._lavalink.dispatch_event(event)
elif op == 'playerUpdate':
await self._lavalink.update_state(data)
elif op == 'stats':
self._lavalink.stats._update(data)
await self._lavalink.dispatch_event(StatsUpdateEvent(self._lavalink.stats))
log.debug('Closing WebSocket...')
await self._ws.close()
|
Waits to receive a payload from the Lavalink server and processes it.
|
def status(self, workflow_id):
self.logger.debug('Get status of workflow: ' + workflow_id)
url = '%(wf_url)s/%(wf_id)s' % {
'wf_url': self.workflows_url, 'wf_id': workflow_id
}
r = self.gbdx_connection.get(url)
r.raise_for_status()
return r.json()['state']
|
Checks workflow status.
Args:
workflow_id (str): Workflow id.
Returns:
Workflow status (str).
|
def remove_tweets(self, url):
try:
del self.cache[url]
self.mark_updated()
return True
except KeyError:
return False
|
Tries to remove cached tweets.
|
def proxy_factory(BaseSchema, label, ProxiedClass, get_key):
def local():
key = get_key()
try:
return proxies[BaseSchema][label][key]
except KeyError:
proxies[BaseSchema][label][key] = ProxiedClass()
return proxies[BaseSchema][label][key]
return LocalProxy(local)
|
Create a proxy to a class instance stored in ``proxies``.
:param class BaseSchema: Base schema (e.g. ``StoredObject``)
:param str label: Name of class variable to set
:param class ProxiedClass: Class to get or create
:param function get_key: Extension-specific key function; may return e.g.
the current Flask request
|
def score(self, word, docid):
"Compute a score for this word on this docid."
return (math.log(1 + self.index[word][docid])
/ math.log(1 + self.documents[docid].nwords))
|
Compute a score for this word on this docid.
|
def run(self):
options = {}
if bool(self.config['use_proxy']):
options['proxies'] = {"http": self.config['proxy'], "https": self.config['proxy']}
options["url"] = self.config['url']
options["data"] = {"issues": json.dumps(map(lambda x: x.__todict__(), self.issues))}
if 'get' == self.config['method'].lower():
requests.get(**options)
else:
requests.post(**options)
|
Method executed dynamically by framework. This method will do a http request to
endpoint setted into config file with the issues and other data.
|
def value_to_string(self, obj):
value = self._get_val_from_obj(obj)
return self.get_prep_value(value)
|
Convert the field value from the provided model to a string.
Used during model serialization.
Args:
obj: db.Model, model object
Returns:
string, the serialized field value
|
def getTotalw(self):
w = sum([field.w for field in self.fields])
return w
|
Returns the cumulative w for all the fields in the dataset
|
def lock(self, function, argument):
if self.testandset():
function(argument)
else:
self.queue.append((function, argument))
|
Lock a mutex, call the function with supplied argument
when it is acquired. If the mutex is already locked, place
function and argument in the queue.
|
def show_message(self, message_str):
if self._message_handle is not None:
self._message_handle.cancel()
self._message_handle = asyncio.get_event_loop().call_later(
self._MESSAGE_DELAY_SECS, self._clear_message
)
self._message = message_str
self._update()
|
Show a temporary message.
|
def list_lans(self, datacenter_id, depth=1):
response = self._perform_request(
'/datacenters/%s/lans?depth=%s' % (
datacenter_id,
str(depth)))
return response
|
Retrieves a list of LANs available in the account.
:param datacenter_id: The unique ID of the data center.
:type datacenter_id: ``str``
:param depth: The depth of the response data.
:type depth: ``int``
|
def charge(self):
try:
return self._charge
except AttributeError:
self._charge = charge_from_formula(self.formula)
return self._charge
|
Charge of the species as an integer. Computed as a property as most
species do not have a charge and so storing it would be a waste of
memory.
|
def get(self, id):
info = self._get_droplet_info(id)
return DropletActions(self.api, self, **info)
|
Retrieve a droplet by id
Parameters
----------
id: int
droplet id
Returns
-------
droplet: DropletActions
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.