id
int64 11
59.9k
| original
stringlengths 33
150k
| modified
stringlengths 37
150k
|
---|---|---|
36,951 |
def _show_json(renderers, path: None):
if any(lambda r: r.needs_output_path for r in renderers) and not path:
raise DvcException("Output path ('-o') is required!")
result = {
renderer.filename: json.loads(renderer.as_json(path=path))
for renderer in renderers
}
if result:
ui.write(json.dumps(result, indent=4))
|
def _show_json(renderers, path: None):
if any(lambda r: r.needs_output_path for r in renderers) and not path:
raise DvcException("Output path ('-o') is required!")
result = {
renderer.filename: json.loads(renderer.as_json(path=path))
for renderer in renderers
}
if result:
ui.write_json(result)
|
29,805 |
def get_pipeline_deploy_groups(
service: str, soa_dir: str = DEFAULT_SOA_DIR
) -> List[str]:
pipeline_steps = []
for step in get_pipeline_config(service, soa_dir):
if(step.get("parallel", False)):
for parallel_step in step.get("parallel"):
if(parallel_step.get("step", False)):
pipeline_steps.append(parallel_step["step"])
else:
pipeline_steps.append(step["step"])
return [step for step in pipeline_steps if is_deploy_step(step)]
|
def get_pipeline_deploy_groups(
service: str, soa_dir: str = DEFAULT_SOA_DIR
) -> List[str]:
pipeline_steps = []
for step in get_pipeline_config(service, soa_dir):
if step.get("parallel", False):
for parallel_step in step.get("parallel"):
if(parallel_step.get("step", False)):
pipeline_steps.append(parallel_step["step"])
else:
pipeline_steps.append(step["step"])
return [step for step in pipeline_steps if is_deploy_step(step)]
|
49,419 |
def scan_files(dirname):
"""
Scan pair of bin/meta files and return information about it.
"""
info_list = []
for root, dirs, files in os.walk(dirname):
for file in files:
if not file.endswith('.meta'):
continue
meta_filename = Path(root) / file
bin_filename = Path(root) / file.replace('.meta', '.bin')
meta = read_meta_file(meta_filename)
num_chan = int(meta['nSavedChans'])
# when file is Noise4Sam_g0_t0.nidq.bin or Noise4Sam_g0_t0.imec0.lf.bin
# name is the first part "Noise4Sam_g0_t0"
# gtX X is the seg_index here 0
# nidq or imec0 is the device
# lf or ap is "signal_kind"
# stream_name = device + signal_kind
name = file.split('.')[0]
r = re.findall(r'_g(\d*)_t', name)
seg_index = int(r[0][0])
device = file.split('.')[1]
if 'imec' in device:
signal_kind = file.split('.')[2]
stream_name = device + '.' + signal_kind
units = 'uV'
# please note the 1e6 in gain for this uV
# metad['imroTbl'] contain two gain per channel AP and LF
# except for the last fake channel
per_channel_gain = np.ones(num_chan, dtype='float64')
if signal_kind == 'ap':
index_imroTbl = 3
elif signal_kind == 'lf':
index_imroTbl = 4
for c in range(num_chan - 1):
# the last channel don't have gain
per_channel_gain[c] = 1. / float(meta['imroTbl'][c].split(' ')[index_imroTbl])
gain_factor = float(meta['imAiRangeMax']) / 512
channel_gains = per_channel_gain * gain_factor * 1e6
else:
signal_kind = ''
stream_name = device
units = 'V'
channel_gains = np.ones(num_chan)
# there differents kind of channel with diffrents gain
mn, ma, xa, dw = [int(e) for e in meta['snsMnMaXaDw'].split(sep=',')]
per_channel_gain = np.ones(num_chan, dtype='float64')
per_channel_gain[0:mn] = float(meta['niMNGain'])
per_channel_gain[mn:mn + ma] = float(meta['niMAGain'])
gain_factor = float(meta['niAiRangeMax']) / 32768
channel_gains = per_channel_gain * gain_factor
info = {}
info['name'] = name
info['meta'] = meta
info['bin_file'] = str(bin_filename)
for k in ('niSampRate', 'imSampRate'):
if k in meta:
info['sampling_rate'] = float(meta[k])
info['num_chan'] = num_chan
info['sample_length'] = int(meta['fileSizeBytes']) // 2 // num_chan
info['seg_index'] = seg_index
info['device'] = device
info['signal_kind'] = signal_kind
info['stream_name'] = stream_name
info['units'] = units
info['channel_names'] = [txt.split(';')[0] for txt in meta['snsChanMap']]
info['channel_gains'] = channel_gains
info['channel_offsets'] = np.zeros(info['num_chan'])
if signal_kind == 'ap':
channel_location = []
for e in meta['snsShankMap']:
x_pos = int(e.split(':')[1])
y_pos = int(e.split(':')[2])
channel_location.append([x_pos, y_pos])
info['channel_location'] = np.array(channel_location)
info_list.append(info)
return info_list
|
def scan_files(dirname):
"""
Scan pair of bin/meta files and return information about it.
"""
info_list = []
for root, dirs, files in os.walk(dirname):
for file in files:
if not file.endswith('.meta'):
continue
meta_filename = Path(root) / file
bin_filename = Path(root) / file.replace('.meta', '.bin')
meta = read_meta_file(meta_filename)
num_chan = int(meta['nSavedChans'])
# Example file name structure:
# Consider the filenames: `Noise4Sam_g0_t0.nidq.bin` or `Noise4Sam_g0_t0.imec0.lf.bin`
# The filenames consist of 3 or 4 parts separated by `.`
# `name` corresponds to everything before first dot, here `Noise4Sam_g0_t0`
# `seg_index` corresponds to X in `gtX`, here 0
# `device` corresponds to the second part of the filename, here `nidq` or `imec0`
# `signal_kind` corresponds to the optional third part of the filename, here `lf`. Valid values for `signal_kind` are `lf` and `ap`.
# `stream_name` is the concatenation of `device.signal_kind`
name = file.split('.')[0]
r = re.findall(r'_g(\d*)_t', name)
seg_index = int(r[0][0])
device = file.split('.')[1]
if 'imec' in device:
signal_kind = file.split('.')[2]
stream_name = device + '.' + signal_kind
units = 'uV'
# please note the 1e6 in gain for this uV
# metad['imroTbl'] contain two gain per channel AP and LF
# except for the last fake channel
per_channel_gain = np.ones(num_chan, dtype='float64')
if signal_kind == 'ap':
index_imroTbl = 3
elif signal_kind == 'lf':
index_imroTbl = 4
for c in range(num_chan - 1):
# the last channel don't have gain
per_channel_gain[c] = 1. / float(meta['imroTbl'][c].split(' ')[index_imroTbl])
gain_factor = float(meta['imAiRangeMax']) / 512
channel_gains = per_channel_gain * gain_factor * 1e6
else:
signal_kind = ''
stream_name = device
units = 'V'
channel_gains = np.ones(num_chan)
# there differents kind of channel with diffrents gain
mn, ma, xa, dw = [int(e) for e in meta['snsMnMaXaDw'].split(sep=',')]
per_channel_gain = np.ones(num_chan, dtype='float64')
per_channel_gain[0:mn] = float(meta['niMNGain'])
per_channel_gain[mn:mn + ma] = float(meta['niMAGain'])
gain_factor = float(meta['niAiRangeMax']) / 32768
channel_gains = per_channel_gain * gain_factor
info = {}
info['name'] = name
info['meta'] = meta
info['bin_file'] = str(bin_filename)
for k in ('niSampRate', 'imSampRate'):
if k in meta:
info['sampling_rate'] = float(meta[k])
info['num_chan'] = num_chan
info['sample_length'] = int(meta['fileSizeBytes']) // 2 // num_chan
info['seg_index'] = seg_index
info['device'] = device
info['signal_kind'] = signal_kind
info['stream_name'] = stream_name
info['units'] = units
info['channel_names'] = [txt.split(';')[0] for txt in meta['snsChanMap']]
info['channel_gains'] = channel_gains
info['channel_offsets'] = np.zeros(info['num_chan'])
if signal_kind == 'ap':
channel_location = []
for e in meta['snsShankMap']:
x_pos = int(e.split(':')[1])
y_pos = int(e.split(':')[2])
channel_location.append([x_pos, y_pos])
info['channel_location'] = np.array(channel_location)
info_list.append(info)
return info_list
|
30,850 |
def main():
global fields_to_hash, unpopulate_fields, populate_fields
fields_to_hash = frozenset([x for x in argToList(demisto.args().get('fieldsToHash', '')) if x]) # type: ignore
unpopulate_fields = frozenset([x for x in argToList(demisto.args().get('dontPopulateFields', ''))]) # type: ignore
populate_fields = frozenset([x for x in argToList(demisto.args().get('populateFields', ''))]) # type: ignore
limit = int(demisto.args().get('limit', PAGE_SIZE))
query = demisto.args().get('query', '')
offset = int(demisto.args().get('offset', 0))
indicators = find_indicators_with_limit(query, limit, offset)
entry = fileResult("indicators.json", json.dumps(indicators).encode('utf8'))
entry['Contents'] = indicators
entry['ContentsFormat'] = formats['json']
entry['HumanReadable'] = "Fetched %d indicators successfully by the query: %s" % (len(indicators), query)
return entry
|
def main():
global fields_to_hash, unpopulate_fields, populate_fields
fields_to_hash = frozenset([x for x in argToList(demisto.args().get('fieldsToHash', '')) if x]) # type: ignore
unpopulate_fields = frozenset([x for x in argToList(demisto.args().get('dontPopulateFields', ''))]) # type: ignore
populate_fields = frozenset([x for x in argToList(demisto.args().get('populateFields', ''))]) # type: ignore
limit = int(demisto.args().get('limit', PAGE_SIZE))
query = demisto.args().get('query', '')
offset = int(demisto.args().get('offset', 0))
indicators = find_indicators_with_limit(query, limit, offset)
entry = fileResult("indicators.json", json.dumps(indicators).encode('utf8'))
entry['Contents'] = indicators
entry['ContentsFormat'] = formats['json']
entry['HumanReadable'] = f'Fetched {len(indicators)} indicators successfully by the query: {query}'
return entry
|
22,308 |
def pbkdf2_bin(data, salt, iterations=COST_FACTOR, keylen=24, hashfunc=None):
"""Returns a binary digest for the PBKDF2 hash algorithm of `data`
with the given `salt`. It iterates `iterations` time and produces a
key of `keylen` bytes. By default SHA-256 is used as hash function,
a different hashlib `hashfunc` can be provided.
"""
hashfunc = hashfunc or HASH_FUNCTION
data = bytes(smart_str(data))
salt = bytes(smart_str(salt))
return hashlib.pbkdf2_hmac(hashfunc, data, salt, iterations, keylen)
|
def pbkdf2_bin(data, salt, iterations=COST_FACTOR, keylen=24, hashfunc=None):
"""Returns a binary digest for the PBKDF2 hash algorithm of `data`
with the given `salt`. It iterates `iterations` time and produces a
key of `keylen` bytes. By default SHA-256 is used as hash function,
a different hashlib `hashfunc` can be provided.
"""
hashfunc = hashfunc or HASH_FUNCTION
data = smart_str(data)
salt = bytes(smart_str(salt))
return hashlib.pbkdf2_hmac(hashfunc, data, salt, iterations, keylen)
|
2,075 |
def plot_det_curve(
estimator,
X,
y,
*,
sample_weight=None,
response_method="auto",
name=None,
ax=None,
pos_label=None,
**kwargs
):
"""Plot detection error tradeoff (DET) curve.
Extra keyword arguments will be passed to matplotlib's `plot`.
Read more in the :ref:`User Guide <visualizations>`.
.. versionadded:: 0.24
Parameters
----------
estimator : estimator instance
Fitted classifier or a fitted :class:`~sklearn.pipeline.Pipeline`
in which the last estimator is a classifier.
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Input values.
y : array-like of shape (n_samples,)
Target values.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights.
response_method : {'predict_proba', 'decision_function', 'auto'} \
default='auto'
Specifies whether to use :term:`predict_proba` or
:term:`decision_function` as the target response. If set to 'auto',
:term:`predict_proba` is tried first and if it does not exist
:term:`decision_function` is tried next.
name : str, default=None
Name of ROC Curve for labeling. If `None`, use the name of the
estimator.
ax : matplotlib axes, default=None
Axes object to plot on. If `None`, a new figure and axes is created.
pos_label : str or int, default=None
The label of the positive class.
When `pos_label=None`, if `y_true` is in {-1, 1} or {0, 1},
`pos_label` is set to 1, otherwise an error will be raised.
Returns
-------
display : :class:`~sklearn.metrics.DetCurveDisplay`
Object that stores computed values.
See Also
--------
roc_auc_score : Compute the area under the ROC curve
roc_curve : Compute Receiver operating characteristic (ROC) curve
Examples
--------
"""
check_matplotlib_support('plot_det_curve')
y_pred, pos_label = _get_response(
X, estimator, response_method, pos_label=pos_label
)
fpr, fnr, _ = det_curve(
y, y_pred, pos_label=pos_label, sample_weight=sample_weight,
)
name = estimator.__class__.__name__ if name is None else name
viz = DetCurveDisplay(
fpr=fpr,
fnr=fnr,
estimator_name=name,
pos_label=pos_label
)
return viz.plot(ax=ax, name=name, **kwargs)
|
def plot_det_curve(
estimator,
X,
y,
*,
sample_weight=None,
response_method="auto",
name=None,
ax=None,
pos_label=None,
**kwargs
):
"""Plot detection error tradeoff (DET) curve.
Extra keyword arguments will be passed to matplotlib's `plot`.
Read more in the :ref:`User Guide <visualizations>`.
.. versionadded:: 0.24
Parameters
----------
estimator : estimator instance
Fitted classifier or a fitted :class:`~sklearn.pipeline.Pipeline`
in which the last estimator is a classifier.
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Input values.
y : array-like of shape (n_samples,)
Target values.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights.
response_method : {'predict_proba', 'decision_function', 'auto'} \
default='auto'
Specifies whether to use :term:`predict_proba` or
:term:`decision_function` as the target response. If set to 'auto',
:term:`predict_proba` is tried first and if it does not exist
:term:`decision_function` is tried next.
name : str, default=None
Name of ROC Curve for labeling. If `None`, use the name of the
estimator.
ax : matplotlib axes, default=None
Axes object to plot on. If `None`, a new figure and axes is created.
pos_label : str or int, default=None
The label of the positive class.
When `pos_label=None`, if `y_true` is in {-1, 1} or {0, 1},
`pos_label` is set to 1, otherwise an error will be raised.
Returns
-------
display : :class:`~sklearn.metrics.DetCurveDisplay`
Object that stores computed values.
See Also
--------
det_curve : Compute error rates for different probability thresholds
roc_curve : Compute Receiver operating characteristic (ROC) curve
Examples
--------
"""
check_matplotlib_support('plot_det_curve')
y_pred, pos_label = _get_response(
X, estimator, response_method, pos_label=pos_label
)
fpr, fnr, _ = det_curve(
y, y_pred, pos_label=pos_label, sample_weight=sample_weight,
)
name = estimator.__class__.__name__ if name is None else name
viz = DetCurveDisplay(
fpr=fpr,
fnr=fnr,
estimator_name=name,
pos_label=pos_label
)
return viz.plot(ax=ax, name=name, **kwargs)
|
13,781 |
def render_body(raw_body):
"""
Render raw_body to HTML.
This includes the following steps:
* Convert Markdown to HTML
* Strip non-whitelisted HTML
* Remove unbalanced HTML tags
Note that this does not prevent Markdown syntax inside a MathJax block from
being processed, which the forums JavaScript code does.
"""
rendered = markdown.markdown(raw_body)
rendered = bleach.clean(
rendered,
tags=bleach.ALLOWED_TAGS + [
"dl", "pre", "p", "br", "sup", "strike", "sub", "del", "h1", "h2", "h3", "h4", "blockquote", "dd", "dl", "dt", "kbd", "pre", "s", "hr", "img"
],
protocols=["http", "https", "ftp", "mailto"],
strip=True,
attributes={
"a": ["href", "title"],
"img": ["src", "alt", "title", "width", "height"],
}
)
# rendered = _sanitize_html(rendered)
# rendered = _remove_unpaired_tags(rendered)
return rendered
|
def render_body(raw_body):
"""
Render raw_body to HTML.
This includes the following steps:
* Convert Markdown to HTML
* Strip non-whitelisted HTML
* Remove unbalanced HTML tags
Note that this does not prevent Markdown syntax inside a MathJax block from
being processed, which the forums JavaScript code does.
"""
rendered = markdown.markdown(raw_body)
rendered = bleach.clean(
rendered,
tags=bleach.ALLOWED_TAGS + [
"dl", "pre", "p", "br", "sup", "strike", "sub", "del", "h1", "h2", "h3", "h4", "blockquote", "dd", "dl", "dt", "kbd", "pre", "s", "hr", "img"
],
protocols=bleach.ALLOWED_PROTOCOLS + ["ftp"],
strip=True,
attributes={
"a": ["href", "title"],
"img": ["src", "alt", "title", "width", "height"],
}
)
# rendered = _sanitize_html(rendered)
# rendered = _remove_unpaired_tags(rendered)
return rendered
|
3,193 |
def trim_function_name(function, platform):
"""Given a function value from the frame's function attribute this returns
a trimmed version that can be stored in `function_name`. This is only used
if the client did not supply a value itself already.
"""
if get_behavior_family_for_platform(platform) != 'native':
return function
if function in ('<redacted>', '<unknown>'):
return function
original_function = function
function = function.strip()
# Ensure we don't operated on objc functions
if function.startswith(('[', '+[', '-[')):
return function
# Chop off C++ trailers
while 1:
match = _cpp_trailer_re.search(function)
if match is None:
break
function = function[:match.start()].rstrip()
# Because operator<< really screws with our balancing, so let's work
# around that by replacing it with a character we do not observe in
# `split_func_tokens` or `replace_enclosed_string`.
function = function \
.replace('operator<<', u'operator⟨⟨') \
.replace('operator<', u'operator⟨') \
.replace('operator()', u'operator◯')\
.replace(' -> ', u' ⟿ ')
# Remove the arguments if there is one.
def process_args(value, start):
value = value.strip()
if value in ('anonymous namespace', 'operator'):
return '(%s)' % value
return ''
function = replace_enclosed_string(function, '(', ')', process_args)
# Resolve generic types, but special case rust which uses things like
# <Foo as Bar>::baz to denote traits.
def process_generics(value, start):
# Rust special case
if start == 0:
return '<%s>' % replace_enclosed_string(value, '<', '>', process_generics)
return '<T>'
function = replace_enclosed_string(function, '<', '>', process_generics)
tokens = split_func_tokens(function)
# find the token which is the function name. Since we chopped of C++
# trailers there are only two cases we care about: the token left to
# the -> return marker which is for instance used in Swift and if that
# is not found, the last token in the last.
#
# ["unsigned", "int", "whatever"] -> whatever
# ["@objc", "whatever", "->", "int"] -> whatever
try:
func_token = tokens[tokens.index(u'⟿') - 1]
except ValueError:
if tokens:
func_token = tokens[-1]
else:
func_token = None
if func_token:
function = func_token.replace(u'⟨', '<') \
.replace(u'◯', '()') \
.replace(u' ⟿ ', ' -> ')
# This really should never happen
else:
function = original_function
# trim off rust markers
function = _rust_hash.sub('', function)
# trim off windows decl markers
return _windecl_hash.sub('\\1', function)
|
def trim_function_name(function, platform):
"""Given a function value from the frame's function attribute this returns
a trimmed version that can be stored in `function_name`. This is only used
if the client did not supply a value itself already.
"""
if get_behavior_family_for_platform(platform) != 'native':
return function
if function in ('<redacted>', '<unknown>'):
return function
original_function = function
function = function.strip()
# Ensure we don't operate on objc functions
if function.startswith(('[', '+[', '-[')):
return function
# Chop off C++ trailers
while 1:
match = _cpp_trailer_re.search(function)
if match is None:
break
function = function[:match.start()].rstrip()
# Because operator<< really screws with our balancing, so let's work
# around that by replacing it with a character we do not observe in
# `split_func_tokens` or `replace_enclosed_string`.
function = function \
.replace('operator<<', u'operator⟨⟨') \
.replace('operator<', u'operator⟨') \
.replace('operator()', u'operator◯')\
.replace(' -> ', u' ⟿ ')
# Remove the arguments if there is one.
def process_args(value, start):
value = value.strip()
if value in ('anonymous namespace', 'operator'):
return '(%s)' % value
return ''
function = replace_enclosed_string(function, '(', ')', process_args)
# Resolve generic types, but special case rust which uses things like
# <Foo as Bar>::baz to denote traits.
def process_generics(value, start):
# Rust special case
if start == 0:
return '<%s>' % replace_enclosed_string(value, '<', '>', process_generics)
return '<T>'
function = replace_enclosed_string(function, '<', '>', process_generics)
tokens = split_func_tokens(function)
# find the token which is the function name. Since we chopped of C++
# trailers there are only two cases we care about: the token left to
# the -> return marker which is for instance used in Swift and if that
# is not found, the last token in the last.
#
# ["unsigned", "int", "whatever"] -> whatever
# ["@objc", "whatever", "->", "int"] -> whatever
try:
func_token = tokens[tokens.index(u'⟿') - 1]
except ValueError:
if tokens:
func_token = tokens[-1]
else:
func_token = None
if func_token:
function = func_token.replace(u'⟨', '<') \
.replace(u'◯', '()') \
.replace(u' ⟿ ', ' -> ')
# This really should never happen
else:
function = original_function
# trim off rust markers
function = _rust_hash.sub('', function)
# trim off windows decl markers
return _windecl_hash.sub('\\1', function)
|
2,002 |
def _weighted_percentile(array, sample_weight, percentile=50,
interpolation="nearest"):
"""Compute weighted percentile
Computes lower weighted percentile. If `array` is a 2D array, the
`percentile` is computed along the axis 0.
.. versionchanged:: 0.24
Accepts 2D `array`.
Parameters
----------
array : ndarray of shape (n,) or (n, m)
Values to take the weighted percentile of.
sample_weight: ndarray of (n,) or (n, m)
Weights for each value in `array`. Must be same shape as `array` or
of shape `(array.shape[0],)`.
percentile: inr or float, default=50
Percentile to compute. Must be value between 0 and 100.
interpolation : {"linear", "lower", "higher", "nearest"}, default="lower"
The interpolation method to use when the percentile lies between
data points `i` and `j`:
* `"linear"`: `i + (j - i) * fraction`, where `fraction` is the
fractional part of the index surrounded by `i` and `j`;
* `"lower"`: i`;
* `"higher"`: `j`;
* `"nearest"`: `i` or `j`, whichever is nearest (default).
.. versionadded: 0.24
Returns
-------
percentile_value : float or int if `array` of shape (n,), otherwise\
ndarray of shape (m,)
Weighted percentile.
"""
possible_interpolation = ("linear", "lower", "higher", "nearest")
if interpolation not in possible_interpolation:
raise ValueError(
f"'interpolation' should be one of "
f"{', '.join(possible_interpolation)}. Got '{interpolation}' "
f"instead."
)
if np.any(np.count_nonzero(sample_weight, axis=0) < 1):
raise ValueError(
"All weights cannot be null when computing a weighted percentile."
)
n_dim = array.ndim
if n_dim == 0:
return array[()]
if array.ndim == 1:
array = array.reshape((-1, 1))
if (array.shape != sample_weight.shape and
array.shape[0] == sample_weight.shape[0]):
# when `sample_weight` is 1D, we repeat it for each column of `array`
sample_weight = np.tile(sample_weight, (array.shape[1], 1)).T
n_rows, n_cols = array.shape
sorted_idx = np.argsort(array, axis=0)
sorted_weights = _take_along_axis(sample_weight, sorted_idx, axis=0)
percentile = np.array([percentile / 100] * n_cols)
cum_weigths = stable_cumsum(sorted_weights, axis=0)
def _squeeze_arr(arr, n_dim):
return arr[0] if n_dim == 1 else arr
# Percentile can be computed with 3 different alternative:
# https://en.wikipedia.org/wiki/Percentile
# These 3 alternatives depend of the value of a parameter C. NumPy uses
# the variant where C=0 which allows to obtained a strictly monotically
# increasing function which is defined as:
# P = (x - 1) / (N - 1); x in [1, N]
# Weighted percentile change this formula by taking into account the
# weights instead of the data frequency.
# P_w = (x - w) / (S_w - w), x in [1, N], w being the weight and S_w being
# the sum of the weights.
adjusted_percentile = (cum_weigths - sorted_weights)
with np.errstate(invalid="ignore"):
adjusted_percentile /= cum_weigths[-1] - sorted_weights
nan_mask = np.isnan(adjusted_percentile)
adjusted_percentile[nan_mask] = 1
if interpolation in ("lower", "higher", "nearest"):
percentile_idx = np.array([
np.searchsorted(adjusted_percentile[:, col], percentile[col],
side="left")
for col in range(n_cols)
])
if interpolation == "lower" and np.all(percentile < 1):
# P = 100 is a corner case for "lower"
percentile_idx -= 1
elif interpolation == "nearest" and np.all(percentile < 1):
for col in range(n_cols):
error_higher = abs(
adjusted_percentile[percentile_idx[col], col] -
percentile[col]
)
error_lower = abs(
adjusted_percentile[percentile_idx[col] - 1, col] -
percentile[col]
)
if error_higher >= error_lower:
percentile_idx[col] -= 1
percentile_idx = np.apply_along_axis(
lambda x: np.clip(x, 0, n_rows - 1), axis=0,
arr=percentile_idx
)
percentile_value = array[
sorted_idx[percentile_idx, np.arange(n_cols)],
np.arange(n_cols)
]
percentile_value = _squeeze_arr(percentile_value, n_dim)
else: # interpolation == "linear"
percentile_value = np.array([
np.interp(
x=percentile[col],
xp=adjusted_percentile[:, col],
fp=array[sorted_idx[:, col], col],
)
for col in range(n_cols)
])
percentile_value = _squeeze_arr(percentile_value, n_dim)
single_sample_weight = np.count_nonzero(sample_weight, axis=0)
if np.any(single_sample_weight == 1):
# edge case where a single weight is non-null in which case the
# previous methods will fail
if not isinstance(percentile_value, Iterable):
percentile_value = _squeeze_arr(
array[np.nonzero(sample_weight)], n_dim
)
else:
percentile_value = np.array([
array[np.flatnonzero(sample_weight[:, col])[0], col]
if n_nonzero == 1 else percentile_value[col]
for col, n_nonzero in enumerate(single_sample_weight)
])
return percentile_value
|
def _weighted_percentile(array, sample_weight, percentile=50,
interpolation="nearest"):
"""Compute weighted percentile
Computes lower weighted percentile. If `array` is a 2D array, the
`percentile` is computed along the axis 0.
.. versionchanged:: 0.24
Accepts 2D `array`.
Parameters
----------
array : ndarray of shape (n,) or (n, m)
Values to take the weighted percentile of.
sample_weight: ndarray of shape (n,) or (n, m)
Weights for each value in `array`. Must be same shape as `array` or
of shape `(array.shape[0],)`.
percentile: inr or float, default=50
Percentile to compute. Must be value between 0 and 100.
interpolation : {"linear", "lower", "higher", "nearest"}, default="lower"
The interpolation method to use when the percentile lies between
data points `i` and `j`:
* `"linear"`: `i + (j - i) * fraction`, where `fraction` is the
fractional part of the index surrounded by `i` and `j`;
* `"lower"`: i`;
* `"higher"`: `j`;
* `"nearest"`: `i` or `j`, whichever is nearest (default).
.. versionadded: 0.24
Returns
-------
percentile_value : float or int if `array` of shape (n,), otherwise\
ndarray of shape (m,)
Weighted percentile.
"""
possible_interpolation = ("linear", "lower", "higher", "nearest")
if interpolation not in possible_interpolation:
raise ValueError(
f"'interpolation' should be one of "
f"{', '.join(possible_interpolation)}. Got '{interpolation}' "
f"instead."
)
if np.any(np.count_nonzero(sample_weight, axis=0) < 1):
raise ValueError(
"All weights cannot be null when computing a weighted percentile."
)
n_dim = array.ndim
if n_dim == 0:
return array[()]
if array.ndim == 1:
array = array.reshape((-1, 1))
if (array.shape != sample_weight.shape and
array.shape[0] == sample_weight.shape[0]):
# when `sample_weight` is 1D, we repeat it for each column of `array`
sample_weight = np.tile(sample_weight, (array.shape[1], 1)).T
n_rows, n_cols = array.shape
sorted_idx = np.argsort(array, axis=0)
sorted_weights = _take_along_axis(sample_weight, sorted_idx, axis=0)
percentile = np.array([percentile / 100] * n_cols)
cum_weigths = stable_cumsum(sorted_weights, axis=0)
def _squeeze_arr(arr, n_dim):
return arr[0] if n_dim == 1 else arr
# Percentile can be computed with 3 different alternative:
# https://en.wikipedia.org/wiki/Percentile
# These 3 alternatives depend of the value of a parameter C. NumPy uses
# the variant where C=0 which allows to obtained a strictly monotically
# increasing function which is defined as:
# P = (x - 1) / (N - 1); x in [1, N]
# Weighted percentile change this formula by taking into account the
# weights instead of the data frequency.
# P_w = (x - w) / (S_w - w), x in [1, N], w being the weight and S_w being
# the sum of the weights.
adjusted_percentile = (cum_weigths - sorted_weights)
with np.errstate(invalid="ignore"):
adjusted_percentile /= cum_weigths[-1] - sorted_weights
nan_mask = np.isnan(adjusted_percentile)
adjusted_percentile[nan_mask] = 1
if interpolation in ("lower", "higher", "nearest"):
percentile_idx = np.array([
np.searchsorted(adjusted_percentile[:, col], percentile[col],
side="left")
for col in range(n_cols)
])
if interpolation == "lower" and np.all(percentile < 1):
# P = 100 is a corner case for "lower"
percentile_idx -= 1
elif interpolation == "nearest" and np.all(percentile < 1):
for col in range(n_cols):
error_higher = abs(
adjusted_percentile[percentile_idx[col], col] -
percentile[col]
)
error_lower = abs(
adjusted_percentile[percentile_idx[col] - 1, col] -
percentile[col]
)
if error_higher >= error_lower:
percentile_idx[col] -= 1
percentile_idx = np.apply_along_axis(
lambda x: np.clip(x, 0, n_rows - 1), axis=0,
arr=percentile_idx
)
percentile_value = array[
sorted_idx[percentile_idx, np.arange(n_cols)],
np.arange(n_cols)
]
percentile_value = _squeeze_arr(percentile_value, n_dim)
else: # interpolation == "linear"
percentile_value = np.array([
np.interp(
x=percentile[col],
xp=adjusted_percentile[:, col],
fp=array[sorted_idx[:, col], col],
)
for col in range(n_cols)
])
percentile_value = _squeeze_arr(percentile_value, n_dim)
single_sample_weight = np.count_nonzero(sample_weight, axis=0)
if np.any(single_sample_weight == 1):
# edge case where a single weight is non-null in which case the
# previous methods will fail
if not isinstance(percentile_value, Iterable):
percentile_value = _squeeze_arr(
array[np.nonzero(sample_weight)], n_dim
)
else:
percentile_value = np.array([
array[np.flatnonzero(sample_weight[:, col])[0], col]
if n_nonzero == 1 else percentile_value[col]
for col, n_nonzero in enumerate(single_sample_weight)
])
return percentile_value
|
28,075 |
def get_source_file_paths(
file_filters: List[str],
compile_commands: tu_collector.CompilationDB,
header_file_extensions=(
'.h', '.hh', '.H', '.hp', '.hxx', '.hpp', '.HPP', '.h++', '.tcc')
) -> List[str]:
"""
Returns a list of source files for existing header file otherwise returns
with the same file path expression.
"""
file_paths = []
for file_filter in file_filters:
file_paths.append(file_filter)
if os.path.exists(file_filter) and \
file_filter.endswith(header_file_extensions):
LOG.info("Get dependent source files for '%s'...", file_filter)
dependent_sources = tu_collector.get_dependent_sources(
compile_commands, file_filter)
LOG.info("Get dependent source files for '%s' done.", file_filter)
LOG.debug("Dependent source files: %s",
', '.join(dependent_sources))
file_paths.extend(dependent_sources)
return file_paths
|
def get_source_file_paths(
file_filters: List[str],
compile_commands: tu_collector.CompilationDB,
header_file_extensions=(
'.h', '.hh', '.H', '.hp', '.hxx', '.hpp', '.HPP', '.h++', '.tcc')
) -> List[str]:
"""
Returns a list of source files for existing header file otherwise returns
with the same file path expression.
"""
file_paths = set()
for file_filter in file_filters:
file_paths.append(file_filter)
if os.path.exists(file_filter) and \
file_filter.endswith(header_file_extensions):
LOG.info("Get dependent source files for '%s'...", file_filter)
dependent_sources = tu_collector.get_dependent_sources(
compile_commands, file_filter)
LOG.info("Get dependent source files for '%s' done.", file_filter)
LOG.debug("Dependent source files: %s",
', '.join(dependent_sources))
file_paths.extend(dependent_sources)
return file_paths
|
42,557 |
def test_all_location_in_db(database):
"""
Test that all locations in DB deserialize to a valid Location
"""
# Query for all locations
cursor = database.conn.cursor()
locations = cursor.execute("SELECT location,seq from location")
# We deserialize, then serialize and compare the result
for location_letter, seq in locations:
deserialized_location = deserialize_location_from_db(location_letter)
assert deserialized_location.value == seq
assert Location(seq).serialize_for_db() == location_letter
location_name = deserialize_location(str(deserialized_location))
assert location_name == deserialized_location
|
def test_all_locations_in_db(database):
"""
Test that all locations in DB deserialize to a valid Location
"""
# Query for all locations
cursor = database.conn.cursor()
locations = cursor.execute("SELECT location,seq from location")
# We deserialize, then serialize and compare the result
for location_letter, seq in locations:
deserialized_location = deserialize_location_from_db(location_letter)
assert deserialized_location.value == seq
assert Location(seq).serialize_for_db() == location_letter
location_name = deserialize_location(str(deserialized_location))
assert location_name == deserialized_location
|
10,088 |
def transform_tables_representation(tbl_list):
"""Add 'public.' to names of tables where a shema identifier is absent
and add quotes to each element.
Args:
tbl_list (list): List of table names.
Returns:
tbl_list (list): Changed list.
"""
for i, table in enumerate(tbl_list):
if '.' not in table:
tbl_list[i] = pg_quote_identifier('public.%s' % table.strip(), 'table')
else:
tbl_list[i] = pg_quote_identifier(table.strip(), 'table')
return tbl_list
|
def transform_tables_representation(tbl_list):
"""Add 'public.' to names of tables where a schema identifier is absent
and add quotes to each element.
Args:
tbl_list (list): List of table names.
Returns:
tbl_list (list): Changed list.
"""
for i, table in enumerate(tbl_list):
if '.' not in table:
tbl_list[i] = pg_quote_identifier('public.%s' % table.strip(), 'table')
else:
tbl_list[i] = pg_quote_identifier(table.strip(), 'table')
return tbl_list
|
46,535 |
def pack(values: Series):
if isinstance(values, bytes): # Bytes and BytesN are already packed
return values
elif isinstance(values, Bitvector) or isinstance(values, Bitlist):
as_bytearray = [0] * ((len(values) + 7) // 8)
for i in range(len(values)):
as_bytearray[i // 8] |= values[i] << (i % 8)
return bytes(as_bytearray)
return b''.join([serialize_basic(value) for value in values])
|
def pack(values: Series):
if isinstance(values, bytes): # Bytes and BytesN are already packed
return values
elif isinstance(values, (Bitvector, Bitlist)):
as_bytearray = [0] * ((len(values) + 7) // 8)
for i in range(len(values)):
as_bytearray[i // 8] |= values[i] << (i % 8)
return bytes(as_bytearray)
return b''.join([serialize_basic(value) for value in values])
|
13,494 |
def pid_by_name(name):
"""pid_by_name(name) -> int list
Arguments:
name (str): Name of program.
Returns:
List of PIDs matching `name` sorted by lifetime, youngest to oldest.
Example:
>>> os.getpid() in pid_by_name(name(os.getpid()))
True
"""
def match(p):
if p.status() == 'zombie':
return False
if p.name() == name:
return True
try:
if p.exe() == name:
return True
except Exception:
pass
return False
processes = (p for p in psutil.process_iter() if match(p))
processes = sorted(processes, key=lambda p: p.create_time())
return list(reversed([p.pid for p in processes]))
|
def pid_by_name(name):
"""pid_by_name(name) -> int list
Arguments:
name (str): Name of program.
Returns:
List of PIDs matching `name` sorted by lifetime, youngest to oldest.
Example:
>>> os.getpid() in pid_by_name(name(os.getpid()))
True
"""
def match(p):
if p.status() == 'zombie':
return False
if p.name() == name:
return True
try:
if p.exe() == name:
return True
except Exception:
pass
return False
processes = (p for p in psutil.process_iter() if match(p))
processes = sorted(processes, key=lambda p: p.create_time())
return [p.pid for p in processes]
|
45,732 |
def forecast(
vil,
velocity,
timesteps,
rainrate=None,
n_cascade_levels=8,
extrap_method="semilagrangian",
ar_order=2,
ar_window_radius=50,
r_vil_window_radius=3,
fft_method="numpy",
apply_rainrate_mask=True,
num_workers=1,
extrap_kwargs=None,
filter_kwargs=None,
measure_time=False,
):
"""
Generate a nowcast by using the autoregressive nowcasting using VIL
(ANVIL) method. ANVIL is built on top of an extrapolation-based nowcast.
The key features are:
1) Growth and decay: implemented by using a cascade decomposition and
a multiscale autoregressive integrated ARI(p,1) model. Instead of the
original time series, the ARI model is applied to the differenced one
corresponding to time derivatives.
2) Originally designed for using integrated liquid (VIL) as the input data.
In this case, the rain rate (R) is obtained from VIL via an empirical
relation. This implementation is more general so that the input can be
any two-dimensional precipitation field.
3) The parameters of the ARI model and the R(VIL) relation are allowed to
be spatially variable. The estimation is done using a moving window.
Parameters
----------
vil: array_like
Array of shape (ar_order+2,m,n) containing the input fields ordered by
timestamp from oldest to newest. The inputs are expected to contain VIL
or rain rate. The time steps between the inputs are assumed to be regular.
velocity: array_like
Array of shape (2,m,n) containing the x- and y-components of the
advection field. The velocities are assumed to represent one time step
between the inputs. All values are required to be finite.
timesteps: int or list of floats
Number of time steps to forecast or a list of time steps for which the
forecasts are computed (relative to the input time step). The elements
of the list are required to be in ascending order.
rainrate: array_like
Array of shape (m,n) containing the most recently observed rain rate
field. If set to None, no R(VIL) conversion is done and the outputs
are in the same units as the inputs.
n_cascade_levels: int, optional
The number of cascade levels to use.
extrap_method: str, optional
Name of the extrapolation method to use. See the documentation of
pysteps.extrapolation.interface.
ar_order: int, optional
The order of the autoregressive model to use. The recommended values
are 1 or 2. Using a higher-order model is strongly discouraged because
the stationarity of the AR process cannot be guaranteed.
ar_window_radius: int, optional
The radius of the window to use for determining the parameters of the
autoregressive model. Set to None to disable localization.
r_vil_window_radius: int, optional
The radius of the window to use for determining the R(VIL) relation.
Applicable if rainrate is not None.
fft_method: str, optional
A string defining the FFT method to use (see utils.fft.get_method).
Defaults to 'numpy' for compatibility reasons. If pyFFTW is installed,
the recommended method is 'pyfftw'.
apply_rainrate_mask: bool
Apply mask to prevent producing precipitation to areas where it was not
originally observed. Defaults to True. Disabling this may improve some
verification metrics but increases the number of false alarms. Applicable
if rainrate is None.
num_workers: int, optional
The number of workers to use for parallel computation. Applicable if
dask is installed or pyFFTW is used for computing the FFT.
When num_workers>1, it is advisable to disable OpenMP by setting
the environment variable OMP_NUM_THREADS to 1.
This avoids slowdown caused by too many simultaneous threads.
extrap_kwargs: dict, optional
Optional dictionary containing keyword arguments for the extrapolation
method. See the documentation of pysteps.extrapolation.
filter_kwargs: dict, optional
Optional dictionary containing keyword arguments for the filter method.
See the documentation of pysteps.cascade.bandpass_filters.py.
measure_time: bool, optional
If True, measure, print and return the computation time.
Returns
-------
out: ndarray
A three-dimensional array of shape (num_timesteps,m,n) containing a time
series of forecast precipitation fields. The time series starts from
t0+timestep, where timestep is taken from the input VIL/rain rate
fields. If measure_time is True, the return value is a three-element
tuple containing the nowcast array, the initialization time of the
nowcast generator and the time used in the main loop (seconds).
References
----------
:cite:`PCLH2020`
"""
_check_inputs(vil, rainrate, velocity, timesteps, ar_order)
if extrap_kwargs is None:
extrap_kwargs = dict()
else:
extrap_kwargs = extrap_kwargs.copy()
if filter_kwargs is None:
filter_kwargs = dict()
print("Computing ANVIL nowcast")
print("-----------------------")
print("")
print("Inputs")
print("------")
print(f"input dimensions: {vil.shape[1]}x{vil.shape[2]}")
print("")
print("Methods")
print("-------")
print(f"extrapolation: {extrap_method}")
print(f"FFT: {fft_method}")
print("")
print("Parameters")
print("----------")
if isinstance(timesteps, int):
print(f"number of time steps: {timesteps}")
else:
print(f"time steps: {timesteps}")
print(f"parallel threads: {num_workers}")
print(f"number of cascade levels: {n_cascade_levels}")
print(f"order of the ARI(p,1) model: {ar_order}")
if type(ar_window_radius) == int:
print(f"ARI(p,1) window radius: {ar_window_radius}")
else:
print("ARI(p,1) window radius: none")
print(f"R(VIL) window radius: {r_vil_window_radius}")
if measure_time:
starttime_init = time.time()
m, n = vil.shape[1:]
vil = vil.copy()
if rainrate is None and apply_rainrate_mask:
rainrate_mask = vil[-1, :] < 0.1
else:
rainrate_mask = None
if rainrate is not None:
# determine the coefficients fields of the relation R=a*VIL+b by
# localized linear regression
r_vil_a, r_vil_b = _r_vil_regression(vil[-1, :], rainrate, r_vil_window_radius)
else:
r_vil_a, r_vil_b = None, None
# transform the input fields to Lagrangian coordinates by extrapolation
extrapolator = extrapolation.get_method(extrap_method)
extrap_kwargs["allow_nonfinite_values"] = (
True if np.any(~np.isfinite(vil)) else False
)
res = list()
def worker(vil, i):
return (
i,
extrapolator(
vil[i, :],
velocity,
vil.shape[0] - 1 - i,
**extrap_kwargs,
)[-1],
)
for i in range(vil.shape[0] - 1):
if not DASK_IMPORTED or num_workers == 1:
vil[i, :, :] = worker(vil, i)[1]
else:
res.append(dask.delayed(worker)(vil, i))
if DASK_IMPORTED and num_workers > 1:
num_workers_ = len(res) if num_workers > len(res) else num_workers
vil_e = dask.compute(*res, num_workers=num_workers_)
for i in range(len(vil_e)):
vil[vil_e[i][0], :] = vil_e[i][1]
# compute the final mask as the intersection of the masks of the advected
# fields
mask = np.isfinite(vil[0, :])
for i in range(1, vil.shape[0]):
mask = np.logical_and(mask, np.isfinite(vil[i, :]))
if rainrate is None and apply_rainrate_mask:
rainrate_mask = np.logical_and(rainrate_mask, mask)
# apply cascade decomposition to the advected input fields
bp_filter_method = cascade.get_method("gaussian")
bp_filter = bp_filter_method((m, n), n_cascade_levels, **filter_kwargs)
fft = utils.get_method(fft_method, shape=vil.shape[1:], n_threads=num_workers)
decomp_method, recomp_method = cascade.get_method("fft")
vil_dec = np.empty((n_cascade_levels, vil.shape[0], m, n))
for i in range(vil.shape[0]):
vil_ = vil[i, :].copy()
vil_[~np.isfinite(vil_)] = 0.0
vil_dec_i = decomp_method(vil_, bp_filter, fft_method=fft)
for j in range(n_cascade_levels):
vil_dec[j, i, :] = vil_dec_i["cascade_levels"][j, :]
# compute time-lagged correlation coefficients for the cascade levels of
# the advected and differenced input fields
gamma = np.empty((n_cascade_levels, ar_order, m, n))
for i in range(n_cascade_levels):
vil_diff = np.diff(vil_dec[i, :], axis=0)
vil_diff[~np.isfinite(vil_diff)] = 0.0
for j in range(ar_order):
gamma[i, j, :] = _moving_window_corrcoef(
vil_diff[-1, :], vil_diff[-(j + 2), :], ar_window_radius
)
if ar_order == 2:
# if the order of the ARI model is 2, adjust the correlation coefficients
# so that the resulting process is stationary
for i in range(n_cascade_levels):
gamma[i, 1, :] = autoregression.adjust_lag2_corrcoef2(
gamma[i, 0, :], gamma[i, 1, :]
)
# estimate the parameters of the ARI models
phi = []
for i in range(n_cascade_levels):
if ar_order > 2:
phi_ = autoregression.estimate_ar_params_yw_localized(gamma[i, :], d=1)
elif ar_order == 2:
phi_ = _estimate_ar2_params(gamma[i, :])
else:
phi_ = _estimate_ar1_params(gamma[i, :])
phi.append(phi_)
vil_dec = vil_dec[:, -(ar_order + 1) :, :]
if measure_time:
init_time = time.time() - starttime_init
print("Starting nowcast computation.")
rainrate_f = []
extrap_kwargs["return_displacement"] = True
state = {"vil_dec": vil_dec}
params = {
"apply_rainrate_mask": apply_rainrate_mask,
"mask": mask,
"n_cascade_levels": n_cascade_levels,
"phi": phi,
"rainrate": rainrate,
"rainrate_mask": rainrate_mask,
"recomp_method": recomp_method,
"r_vil_a": r_vil_a,
"r_vil_b": r_vil_b,
}
rainrate_f = nowcast_main_loop(
vil[-1, :],
velocity,
state,
timesteps,
extrap_method,
_update,
extrap_kwargs=extrap_kwargs,
params=params,
measure_time=measure_time,
)
if measure_time:
rainrate_f, mainloop_time = rainrate_f
if measure_time:
return np.stack(rainrate_f), init_time, mainloop_time
else:
return np.stack(rainrate_f)
|
def forecast(
vil,
velocity,
timesteps,
rainrate=None,
n_cascade_levels=8,
extrap_method="semilagrangian",
ar_order=2,
ar_window_radius=50,
r_vil_window_radius=3,
fft_method="numpy",
apply_rainrate_mask=True,
num_workers=1,
extrap_kwargs=None,
filter_kwargs=None,
measure_time=False,
):
"""
Generate a nowcast by using the autoregressive nowcasting using VIL
(ANVIL) method. ANVIL is built on top of an extrapolation-based nowcast.
The key features are:
1) Growth and decay: implemented by using a cascade decomposition and
a multiscale autoregressive integrated ARI(p,1) model. Instead of the
original time series, the ARI model is applied to the differenced one
corresponding to time derivatives.
2) Originally designed for using integrated liquid (VIL) as the input data.
In this case, the rain rate (R) is obtained from VIL via an empirical
relation. This implementation is more general so that the input can be
any two-dimensional precipitation field.
3) The parameters of the ARI model and the R(VIL) relation are allowed to
be spatially variable. The estimation is done using a moving window.
Parameters
----------
vil: array_like
Array of shape (ar_order+2,m,n) containing the input fields ordered by
timestamp from oldest to newest. The inputs are expected to contain VIL
or rain rate. The time steps between the inputs are assumed to be regular.
velocity: array_like
Array of shape (2,m,n) containing the x- and y-components of the
advection field. The velocities are assumed to represent one time step
between the inputs. All values are required to be finite.
timesteps: int or list of floats
Number of time steps to forecast or a list of time steps for which the
forecasts are computed (relative to the input time step). The elements
of the list are required to be in ascending order.
rainrate: array_like
Array of shape (m,n) containing the most recently observed rain rate
field. If set to None, no R(VIL) conversion is done and the outputs
are in the same units as the inputs.
n_cascade_levels: int, optional
The number of cascade levels to use.
extrap_method: str, optional
Name of the extrapolation method to use. See the documentation of
pysteps.extrapolation.interface.
ar_order: int, optional
The order of the autoregressive model to use. The recommended values
are 1 or 2. Using a higher-order model is strongly discouraged because
the stationarity of the AR process cannot be guaranteed.
ar_window_radius: int, optional
The radius of the window to use for determining the parameters of the
autoregressive model. Set to None to disable localization.
r_vil_window_radius: int, optional
The radius of the window to use for determining the R(VIL) relation.
Applicable if rainrate is not None.
fft_method: str, optional
A string defining the FFT method to use (see utils.fft.get_method).
Defaults to 'numpy' for compatibility reasons. If pyFFTW is installed,
the recommended method is 'pyfftw'.
apply_rainrate_mask: bool
Apply mask to prevent producing precipitation to areas where it was not
originally observed. Defaults to True. Disabling this may improve some
verification metrics but increases the number of false alarms. Applicable
if rainrate is None.
num_workers: int, optional
The number of workers to use for parallel computation. Applicable if
dask is installed or pyFFTW is used for computing the FFT.
When num_workers>1, it is advisable to disable OpenMP by setting
the environment variable OMP_NUM_THREADS to 1.
This avoids slowdown caused by too many simultaneous threads.
extrap_kwargs: dict, optional
Optional dictionary containing keyword arguments for the extrapolation
method. See the documentation of pysteps.extrapolation.
filter_kwargs: dict, optional
Optional dictionary containing keyword arguments for the filter method.
See the documentation of pysteps.cascade.bandpass_filters.py.
measure_time: bool, optional
If True, measure, print and return the computation time.
Returns
-------
out: ndarray
A three-dimensional array of shape (num_timesteps,m,n) containing a time
series of forecast precipitation fields. The time series starts from
t0+timestep, where timestep is taken from the input VIL/rain rate
fields. If measure_time is True, the return value is a three-element
tuple containing the nowcast array, the initialization time of the
nowcast generator and the time used in the main loop (seconds).
References
----------
:cite:`PCLH2020`
"""
_check_inputs(vil, rainrate, velocity, timesteps, ar_order)
if extrap_kwargs is None:
extrap_kwargs = dict()
else:
extrap_kwargs = extrap_kwargs.copy()
if filter_kwargs is None:
filter_kwargs = dict()
print("Computing ANVIL nowcast")
print("-----------------------")
print("")
print("Inputs")
print("------")
print(f"input dimensions: {vil.shape[1]}x{vil.shape[2]}")
print("")
print("Methods")
print("-------")
print(f"extrapolation: {extrap_method}")
print(f"FFT: {fft_method}")
print("")
print("Parameters")
print("----------")
if isinstance(timesteps, int):
print(f"number of time steps: {timesteps}")
else:
print(f"time steps: {timesteps}")
print(f"parallel threads: {num_workers}")
print(f"number of cascade levels: {n_cascade_levels}")
print(f"order of the ARI(p,1) model: {ar_order}")
if type(ar_window_radius) == int:
print(f"ARI(p,1) window radius: {ar_window_radius}")
else:
print("ARI(p,1) window radius: none")
print(f"R(VIL) window radius: {r_vil_window_radius}")
if measure_time:
starttime_init = time.time()
m, n = vil.shape[1:]
vil = vil.copy()
if rainrate is None and apply_rainrate_mask:
rainrate_mask = vil[-1, :] < 0.1
else:
rainrate_mask = None
if rainrate is not None:
# determine the coefficients fields of the relation R=a*VIL+b by
# localized linear regression
r_vil_a, r_vil_b = _r_vil_regression(vil[-1, :], rainrate, r_vil_window_radius)
else:
r_vil_a, r_vil_b = None, None
# transform the input fields to Lagrangian coordinates by extrapolation
extrapolator = extrapolation.get_method(extrap_method)
extrap_kwargs["allow_nonfinite_values"] = (
True if np.any(~np.isfinite(vil)) else False
)
res = list()
def worker(vil, i):
return (
i,
extrapolator(
vil[i, :],
velocity,
vil.shape[0] - 1 - i,
**extrap_kwargs,
)[-1],
)
for i in range(vil.shape[0] - 1):
if not DASK_IMPORTED or num_workers == 1:
vil[i, :, :] = worker(vil, i)[1]
else:
res.append(dask.delayed(worker)(vil, i))
if DASK_IMPORTED and num_workers > 1:
num_workers_ = len(res) if num_workers > len(res) else num_workers
vil_e = dask.compute(*res, num_workers=num_workers_)
for i in range(len(vil_e)):
vil[vil_e[i][0], :] = vil_e[i][1]
# compute the final mask as the intersection of the masks of the advected
# fields
mask = np.isfinite(vil[0, :])
for i in range(1, vil.shape[0]):
mask = np.logical_and(mask, np.isfinite(vil[i, :]))
if rainrate is None and apply_rainrate_mask:
rainrate_mask = np.logical_and(rainrate_mask, mask)
# apply cascade decomposition to the advected input fields
bp_filter_method = cascade.get_method("gaussian")
bp_filter = bp_filter_method((m, n), n_cascade_levels, **filter_kwargs)
fft = utils.get_method(fft_method, shape=vil.shape[1:], n_threads=num_workers)
decomp_method, recomp_method = cascade.get_method("fft")
vil_dec = np.empty((n_cascade_levels, vil.shape[0], m, n))
for i in range(vil.shape[0]):
vil_ = vil[i, :].copy()
vil_[~np.isfinite(vil_)] = 0.0
vil_dec_i = decomp_method(vil_, bp_filter, fft_method=fft)
for j in range(n_cascade_levels):
vil_dec[j, i, :] = vil_dec_i["cascade_levels"][j, :]
# compute time-lagged correlation coefficients for the cascade levels of
# the advected and differenced input fields
gamma = np.empty((n_cascade_levels, ar_order, m, n))
for i in range(n_cascade_levels):
vil_diff = np.diff(vil_dec[i, :], axis=0)
vil_diff[~np.isfinite(vil_diff)] = 0.0
for j in range(ar_order):
gamma[i, j, :] = _moving_window_corrcoef(
vil_diff[-1, :], vil_diff[-(j + 2), :], ar_window_radius
)
if ar_order == 2:
# if the order of the ARI model is 2, adjust the correlation coefficients
# so that the resulting process is stationary
for i in range(n_cascade_levels):
gamma[i, 1, :] = autoregression.adjust_lag2_corrcoef2(
gamma[i, 0, :], gamma[i, 1, :]
)
# estimate the parameters of the ARI models
phi = []
for i in range(n_cascade_levels):
if ar_order > 2:
phi_ = autoregression.estimate_ar_params_yw_localized(gamma[i, :], d=1)
elif ar_order == 2:
phi_ = _estimate_ar2_params(gamma[i, :])
else:
phi_ = _estimate_ar1_params(gamma[i, :])
phi.append(phi_)
vil_dec = vil_dec[:, -(ar_order + 1) :, :]
if measure_time:
init_time = time.time() - starttime_init
print("Starting nowcast computation.")
rainrate_f = []
extrap_kwargs["return_displacement"] = True
state = {"vil_dec": vil_dec}
params = {
"apply_rainrate_mask": apply_rainrate_mask,
"mask": mask,
"n_cascade_levels": n_cascade_levels,
"phi": phi,
"rainrate": rainrate,
"rainrate_mask": rainrate_mask,
"recomp_method": recomp_method,
"precip_vil_a": precip_vil_a,
"r_vil_b": r_vil_b,
}
rainrate_f = nowcast_main_loop(
vil[-1, :],
velocity,
state,
timesteps,
extrap_method,
_update,
extrap_kwargs=extrap_kwargs,
params=params,
measure_time=measure_time,
)
if measure_time:
rainrate_f, mainloop_time = rainrate_f
if measure_time:
return np.stack(rainrate_f), init_time, mainloop_time
else:
return np.stack(rainrate_f)
|
53,889 |
def _get_sensor_angles(vis: xr.DataArray, lons: da.Array, lats: da.Array) -> tuple[xr.DataArray, xr.DataArray]:
sat_lon, sat_lat, sat_alt = get_satpos(vis)
sata, satel = get_observer_look(
sat_lon,
sat_lat,
sat_alt / 1000.0, # km
vis.attrs['start_time'],
lons, lats, 0)
satz = 90 - satel
return sata, satz
|
def _get_sensor_angles(data_arr: xr.DataArray, lons: da.Array, lats: da.Array) -> tuple[xr.DataArray, xr.DataArray]:
sat_lon, sat_lat, sat_alt = get_satpos(data_arr)
sata, satel = get_observer_look(
sat_lon,
sat_lat,
sat_alt / 1000.0, # km
data_arr.attrs['start_time'],
lons, lats, 0)
satz = 90 - satel
return sata, satz
|
43,802 |
def wires_to_edges(graph: nx.Graph) -> Dict[int, Tuple[int]]:
r"""Maps the wires of a register of qubits to corresponding edges.
**Example**
>>> g = nx.complete_graph(4).to_directed()
>>> wires_to_edges(g)
{0: (0, 1),
1: (0, 2),
2: (0, 3),
3: (1, 0),
4: (1, 2),
5: (1, 3),
6: (2, 0),
7: (2, 1),
8: (2, 3),
9: (3, 0),
10: (3, 1),
11: (3, 2)}
Args:
graph (nx.Graph): the graph specifying possible edges
Returns:
Dict[Tuple[int], int]: a mapping from wires to graph edges
"""
return {i: edge for i, edge in enumerate(graph.edges)}
|
def wires_to_edges(graph: nx.Graph) -> Dict[int, Tuple]:
r"""Maps the wires of a register of qubits to corresponding edges.
**Example**
>>> g = nx.complete_graph(4).to_directed()
>>> wires_to_edges(g)
{0: (0, 1),
1: (0, 2),
2: (0, 3),
3: (1, 0),
4: (1, 2),
5: (1, 3),
6: (2, 0),
7: (2, 1),
8: (2, 3),
9: (3, 0),
10: (3, 1),
11: (3, 2)}
Args:
graph (nx.Graph): the graph specifying possible edges
Returns:
Dict[Tuple[int], int]: a mapping from wires to graph edges
"""
return {i: edge for i, edge in enumerate(graph.edges)}
|
25,802 |
def _format_env_var(envvar, value):
if value.get("deprecated", False) or envvar in value.get(
"deprecated_envvars", tuple()
):
return click.style(
"{envvar} - DEPRECATED - {description}\n\n".format(
envvar=envvar, description=value.get("description", "")
),
fg="yellow",
)
return "{envvar} - {description}\n\n".format(
envvar=envvar, description=value.get("description", "")
)
|
def _format_env_var(envvar, value):
if value.get("deprecated", False) or envvar in value.get(
"deprecated_envvars", tuple()
):
return click.style(
"{envvar} - DEPRECATED - {description}\n\n".format(
envvar=envvar, description=value.get("description", "")
),
fg="yellow",
)
return "{} - {}\n\n".format(envvar, value.get("description", ""))
|
31,011 |
def test_get_multiple_packs_dirs(requests_mock):
"""
Scenario: Get a pack dir name from pull request files
Given
- A pull request
- A file in the pull request is in a pack
When
- Getting the pack dir name from a pull request
Then
- Ensure the pack dir name is returned correctly
"""
branch = 'contrib_branch'
pr_number = '1'
repo = 'contrib_repo'
requests_mock.get(
'https://api.github.com/repos/demisto/content/pulls/1/files',
[{'json': github_response_1, 'status_code': 200},
{'json': github_response_2, 'status_code': 200},
{'json': github_response_3, 'status_code': 200},
{'json': github_response_4, 'status_code': 200}]
)
pack_dir = get_pack_dir(branch, pr_number, repo)
assert pack_dir == ['Slack', 'Slack1']
|
def test_get_multiple_packs_dirs(requests_mock):
"""
Scenario: Get a pack dir name from pull request files
Given
- A pull request
- A file in the pull request is in a pack
When
- Getting the pack dir name from a pull request
Then
- Ensure pack dir names are returned correctly
"""
branch = 'contrib_branch'
pr_number = '1'
repo = 'contrib_repo'
requests_mock.get(
'https://api.github.com/repos/demisto/content/pulls/1/files',
[{'json': github_response_1, 'status_code': 200},
{'json': github_response_2, 'status_code': 200},
{'json': github_response_3, 'status_code': 200},
{'json': github_response_4, 'status_code': 200}]
)
pack_dir = get_pack_dir(branch, pr_number, repo)
assert pack_dir == ['Slack', 'Slack1']
|
12,266 |
def _tokenize_line(command):
'''
Tokenize a single line of QASM code.
Parameters
----------
command : str
one line of QASM code to be broken into "tokens".
Returns
-------
tokens : list of str
the tokens corresponding to the qasm line taken as input.
'''
# for gates without arguments
if "(" not in command:
tokens = list(chain(*[a.split() for a in command.split(",")]))
tokens = [token.strip() for token in tokens]
# for classically controlled gates
elif re.match(r"\s*if\s*\(", command):
groups = re.match(r"\s*if\s*\((.*)\)\s*(.*)\s+\((.*)\)(.*)", command)
# for classically controlled gates with arguments
if groups:
tokens = ["if", "(", groups.group(1), ")"]
tokens_gate = _tokenize_line("{} ({}) {}".format(groups.group(2),
groups.group(3),
groups.group(4)))
tokens += tokens_gate
# for classically controlled gates without arguments
else:
groups = re.match(r"\s*if\s*\((.*)\)(.*)", command)
tokens = ["if", "(", groups.group(1), ")"]
tokens_gate = _tokenize_line(groups.group(2))
tokens += tokens_gate
tokens = [token.strip() for token in tokens]
# for gates with arguments
else:
groups = re.match(r"(^.*?)\((.*)\)(.*)", command)
if not groups:
raise SyntaxError("QASM: Incorrect bracket formatting")
tokens = groups.group(1).split()
tokens.append("(")
tokens += groups.group(2).split(",")
tokens.append(")")
tokens += groups.group(3).split(",")
tokens = [token.strip() for token in tokens]
return tokens
|
def _tokenize_line(command):
'''
Tokenize a single line of QASM code.
Parameters
----------
command : str
one line of QASM code to be broken into "tokens".
Returns
-------
tokens : list of str
The tokens (parts) corresponding to the qasm line taken as input.
'''
# for gates without arguments
if "(" not in command:
tokens = list(chain(*[a.split() for a in command.split(",")]))
tokens = [token.strip() for token in tokens]
# for classically controlled gates
elif re.match(r"\s*if\s*\(", command):
groups = re.match(r"\s*if\s*\((.*)\)\s*(.*)\s+\((.*)\)(.*)", command)
# for classically controlled gates with arguments
if groups:
tokens = ["if", "(", groups.group(1), ")"]
tokens_gate = _tokenize_line("{} ({}) {}".format(groups.group(2),
groups.group(3),
groups.group(4)))
tokens += tokens_gate
# for classically controlled gates without arguments
else:
groups = re.match(r"\s*if\s*\((.*)\)(.*)", command)
tokens = ["if", "(", groups.group(1), ")"]
tokens_gate = _tokenize_line(groups.group(2))
tokens += tokens_gate
tokens = [token.strip() for token in tokens]
# for gates with arguments
else:
groups = re.match(r"(^.*?)\((.*)\)(.*)", command)
if not groups:
raise SyntaxError("QASM: Incorrect bracket formatting")
tokens = groups.group(1).split()
tokens.append("(")
tokens += groups.group(2).split(",")
tokens.append(")")
tokens += groups.group(3).split(",")
tokens = [token.strip() for token in tokens]
return tokens
|
31,700 |
def fetch_emails_as_incidents(client: EWSClient, last_run):
"""
Fetch incidents
:param client: EWS Client
:param last_run: last run dict
:return:
"""
last_run = get_last_run(client, last_run)
excluded_ids = set(last_run.get(LAST_RUN_IDS))
try:
last_emails = fetch_last_emails(
client,
client.folder_name,
last_run.get(LAST_RUN_TIME),
excluded_ids,
)
incidents = []
incident: Dict[str, str] = {}
demisto.debug(f'{APP_NAME} - Started fetch with {len(last_emails)}')
current_fetch_ids = set()
for item in last_emails:
if item.message_id:
current_fetch_ids.add(item.message_id)
incident = parse_incident_from_item(item)
incidents.append(incident)
if len(incidents) >= client.max_fetch:
break
demisto.debug(f'{APP_NAME} - ending fetch - got {len(incidents)} incidents.')
last_run_time = incident.get("occurred", last_run.get(LAST_RUN_TIME))
if isinstance(last_run_time, EWSDateTime):
last_run_time = last_run_time.ewsformat()
if last_run_time > LAST_RUN_TIME:
ids = current_fetch_ids
else:
ids = current_fetch_ids | excluded_ids
new_last_run = {
LAST_RUN_TIME: last_run_time,
LAST_RUN_FOLDER: client.folder_name,
LAST_RUN_IDS: list(ids),
ERROR_COUNTER: 0,
}
demisto.setLastRun(new_last_run)
return incidents
except RateLimitError:
if LAST_RUN_TIME in last_run:
last_run[LAST_RUN_TIME] = last_run[LAST_RUN_TIME].ewsformat()
if ERROR_COUNTER not in last_run:
last_run[ERROR_COUNTER] = 0
last_run[ERROR_COUNTER] += 1
demisto.setLastRun(last_run)
if last_run[ERROR_COUNTER] > 2:
raise
return []
|
def fetch_emails_as_incidents(client: EWSClient, last_run):
"""
Fetch incidents
:param client: EWS Client
:param last_run: last run dict
:return:
"""
last_run = get_last_run(client, last_run)
excluded_ids = set(last_run.get(LAST_RUN_IDS, []))
try:
last_emails = fetch_last_emails(
client,
client.folder_name,
last_run.get(LAST_RUN_TIME),
excluded_ids,
)
incidents = []
incident: Dict[str, str] = {}
demisto.debug(f'{APP_NAME} - Started fetch with {len(last_emails)}')
current_fetch_ids = set()
for item in last_emails:
if item.message_id:
current_fetch_ids.add(item.message_id)
incident = parse_incident_from_item(item)
incidents.append(incident)
if len(incidents) >= client.max_fetch:
break
demisto.debug(f'{APP_NAME} - ending fetch - got {len(incidents)} incidents.')
last_run_time = incident.get("occurred", last_run.get(LAST_RUN_TIME))
if isinstance(last_run_time, EWSDateTime):
last_run_time = last_run_time.ewsformat()
if last_run_time > LAST_RUN_TIME:
ids = current_fetch_ids
else:
ids = current_fetch_ids | excluded_ids
new_last_run = {
LAST_RUN_TIME: last_run_time,
LAST_RUN_FOLDER: client.folder_name,
LAST_RUN_IDS: list(ids),
ERROR_COUNTER: 0,
}
demisto.setLastRun(new_last_run)
return incidents
except RateLimitError:
if LAST_RUN_TIME in last_run:
last_run[LAST_RUN_TIME] = last_run[LAST_RUN_TIME].ewsformat()
if ERROR_COUNTER not in last_run:
last_run[ERROR_COUNTER] = 0
last_run[ERROR_COUNTER] += 1
demisto.setLastRun(last_run)
if last_run[ERROR_COUNTER] > 2:
raise
return []
|
30,963 |
def main():
value = demisto.args()['left']
if type(value) is list:
value = demisto.args()['left'][0]
relative_date = demisto.args()['right']
return_results(check_date(value, relative_date))
|
def main():
value = demisto.args()['left']
if isinstance(value,list):
value = demisto.args()['left'][0]
relative_date = demisto.args()['right']
return_results(check_date(value, relative_date))
|
4,194 |
def _get_matrix_from_inverse_operator(inverse_operator, forward, method='dSPM',
lambda2=1. / 9.):
"""Get inverse matrix from an inverse operator.
Currently works only for fixed/loose orientation constraints
For loose orientation constraint, the CTFs are computed for the normal
component (pick_ori='normal').
Parameters
----------
inverse_operator : instance of InverseOperator
The inverse operator.
forward : dict
The forward operator.
method : 'MNE' | 'dSPM' | 'sLORETA'
Inverse methods (for apply_inverse).
lambda2 : float
The regularization parameter (for apply_inverse).
Returns
-------
invmat : ndarray
Inverse matrix associated with inverse operator and specified
parameters.
"""
# make sure forward and inverse operators match
_convert_forward_match_inv(forward, inverse_operator)
print('Free Orientation version.')
logger.info("Computing whole inverse operator.")
info_inv = _prepare_info(inverse_operator)
# only use channels that are good for inverse operator and forward sol
ch_names_inv = info_inv['ch_names']
n_chs_inv = len(ch_names_inv)
bads_inv = inverse_operator['info']['bads']
# indices of bad channels
ch_idx_bads = [ch_names_inv.index(ch) for ch in bads_inv]
# create identity matrix as input for inverse operator
# set elements to zero for non-selected channels
id_mat = np.eye(n_chs_inv)
# convert identity matrix to evoked data type (pretending it's an epoch)
ev_id = EvokedArray(id_mat, info=info_inv, tmin=0.)
# apply inverse operator to identity matrix in order to get inverse matrix
# free orientation constraint not possible because apply_inverse would
# combine components
# pick_ori='normal' required because apply_inverse won't give separate
# orientations
# if ~inverse_operator['source_ori'] == FIFF.FIFFV_MNE_FIXED_ORI:
# pick_ori = 'vector'
# else:
# pick_ori = 'normal'
# check if inverse operator uses fixed source orientations
is_fixed_inv = inverse_operator['eigen_leads']['data'].shape[0] == \
inverse_operator['nsource']
# choose pick_ori according to inverse operator
if is_fixed_inv:
pick_ori = None
else:
pick_ori = 'vector'
# columns for bad channels will be zero
invmat_op = apply_inverse(ev_id, inverse_operator, lambda2=lambda2,
method=method, pick_ori=pick_ori)
# turn source estimate into numpty array
invmat = invmat_op.data
dims = invmat.shape
# remove columns for bad channels
# take into account it may be 3D array
invmat = np.delete(invmat, ch_idx_bads, axis=len(dims) - 1)
# if 3D array, i.e. multiple values per location (fixed and loose),
# reshape into 2D array
if len(dims) == 3:
v0o1 = invmat[0, 1].copy()
v3o2 = invmat[3, 2].copy()
invmat = invmat.reshape(dims[0] * dims[1], dims[2])
# make sure that reshaping worked
assert np.array_equal(v0o1, invmat[1])
assert np.array_equal(v3o2, invmat[11])
logger.info("Dimension of Inverse Matrix: %s" % str(invmat.shape))
return invmat
|
def _get_matrix_from_inverse_operator(inverse_operator, forward, method='dSPM',
lambda2=1. / 9.):
"""Get inverse matrix from an inverse operator.
Currently works only for fixed/loose orientation constraints
For loose orientation constraint, the CTFs are computed for the normal
component (pick_ori='normal').
Parameters
----------
inverse_operator : instance of InverseOperator
The inverse operator.
forward : dict
The forward operator.
method : 'MNE' | 'dSPM' | 'sLORETA'
Inverse methods (for apply_inverse).
lambda2 : float
The regularization parameter (for apply_inverse).
Returns
-------
invmat : array, shape (n_dipoles, n_channels)
Inverse matrix associated with inverse operator and specified
parameters.
"""
# make sure forward and inverse operators match
_convert_forward_match_inv(forward, inverse_operator)
print('Free Orientation version.')
logger.info("Computing whole inverse operator.")
info_inv = _prepare_info(inverse_operator)
# only use channels that are good for inverse operator and forward sol
ch_names_inv = info_inv['ch_names']
n_chs_inv = len(ch_names_inv)
bads_inv = inverse_operator['info']['bads']
# indices of bad channels
ch_idx_bads = [ch_names_inv.index(ch) for ch in bads_inv]
# create identity matrix as input for inverse operator
# set elements to zero for non-selected channels
id_mat = np.eye(n_chs_inv)
# convert identity matrix to evoked data type (pretending it's an epoch)
ev_id = EvokedArray(id_mat, info=info_inv, tmin=0.)
# apply inverse operator to identity matrix in order to get inverse matrix
# free orientation constraint not possible because apply_inverse would
# combine components
# pick_ori='normal' required because apply_inverse won't give separate
# orientations
# if ~inverse_operator['source_ori'] == FIFF.FIFFV_MNE_FIXED_ORI:
# pick_ori = 'vector'
# else:
# pick_ori = 'normal'
# check if inverse operator uses fixed source orientations
is_fixed_inv = inverse_operator['eigen_leads']['data'].shape[0] == \
inverse_operator['nsource']
# choose pick_ori according to inverse operator
if is_fixed_inv:
pick_ori = None
else:
pick_ori = 'vector'
# columns for bad channels will be zero
invmat_op = apply_inverse(ev_id, inverse_operator, lambda2=lambda2,
method=method, pick_ori=pick_ori)
# turn source estimate into numpty array
invmat = invmat_op.data
dims = invmat.shape
# remove columns for bad channels
# take into account it may be 3D array
invmat = np.delete(invmat, ch_idx_bads, axis=len(dims) - 1)
# if 3D array, i.e. multiple values per location (fixed and loose),
# reshape into 2D array
if len(dims) == 3:
v0o1 = invmat[0, 1].copy()
v3o2 = invmat[3, 2].copy()
invmat = invmat.reshape(dims[0] * dims[1], dims[2])
# make sure that reshaping worked
assert np.array_equal(v0o1, invmat[1])
assert np.array_equal(v3o2, invmat[11])
logger.info("Dimension of Inverse Matrix: %s" % str(invmat.shape))
return invmat
|
13,407 |
def test_07_verify_logs_collection_still_work_after_enrcripted_dataset_is_deleted_still_work(logs_data):
cmd = "cat /var/log/middlewared.log"
middlewared_log = SSH_TEST(cmd, user, password, ip)
assert middlewared_log['result'] is True, str(middlewared_log)
logs_data['middleware_log_3'] = middlewared_log['output'].splitlines()[-1]
assert logs_data['middleware_log_2'] in middlewared_log['output'], str(middlewared_log['output'])
assert logs_data['middleware_log_2'] != logs_data['middleware_log_3']
cmd = "journalctl --no-page"
journald_log = SSH_TEST(cmd, user, password, ip)
assert journald_log['result'] is True, str(journald_log)
logs_data['journald_log_3'] = journald_log['output'].splitlines()[-1]
assert logs_data['journald_log_2'] in journald_log['output'], str(journald_log['output'])
assert logs_data['journald_log_2'] != logs_data['journald_log_3']
cmd = "cat /var/log/syslog"
syslog = SSH_TEST(cmd, user, password, ip)
assert syslog['result'] is True, str(syslog)
logs_data['syslog_3'] = syslog['output'].splitlines()[-1]
assert logs_data['syslog_2'] in syslog['output'], str(syslog['output'])
assert logs_data['syslog_2'] != logs_data['syslog_3']
|
def test_07_verify_logs_after_passphrase_encrypted_pool_is_deleted(logs_data):
cmd = "cat /var/log/middlewared.log"
middlewared_log = SSH_TEST(cmd, user, password, ip)
assert middlewared_log['result'] is True, str(middlewared_log)
logs_data['middleware_log_3'] = middlewared_log['output'].splitlines()[-1]
assert logs_data['middleware_log_2'] in middlewared_log['output'], str(middlewared_log['output'])
assert logs_data['middleware_log_2'] != logs_data['middleware_log_3']
cmd = "journalctl --no-page"
journald_log = SSH_TEST(cmd, user, password, ip)
assert journald_log['result'] is True, str(journald_log)
logs_data['journald_log_3'] = journald_log['output'].splitlines()[-1]
assert logs_data['journald_log_2'] in journald_log['output'], str(journald_log['output'])
assert logs_data['journald_log_2'] != logs_data['journald_log_3']
cmd = "cat /var/log/syslog"
syslog = SSH_TEST(cmd, user, password, ip)
assert syslog['result'] is True, str(syslog)
logs_data['syslog_3'] = syslog['output'].splitlines()[-1]
assert logs_data['syslog_2'] in syslog['output'], str(syslog['output'])
assert logs_data['syslog_2'] != logs_data['syslog_3']
|
8,857 |
def test_search_rule_from_callable(mockbot):
# prepare callable
@module.search(r'hello', r'hi', r'hey', r'hello|hi')
def handler(wrapped, trigger):
wrapped.reply('Hi!')
loader.clean_callable(handler, mockbot.settings)
handler.plugin_name = 'testplugin'
# create rule from a clean callable
rule = rules.SearchRule.from_callable(mockbot.settings, handler)
assert str(rule) == '<SearchRule testplugin.handler (4)>'
# match on "Hello" twice
line = ':Foo!foo@example.com PRIVMSG #sopel :Hello, world'
pretrigger = trigger.PreTrigger(mockbot.nick, line)
results = list(rule.match(mockbot, pretrigger))
assert len(results) == 2, 'Exactly 2 rules must match'
assert all(result.group(0) == 'Hello' for result in results)
# match on "hi" twice
line = ':Foo!foo@example.com PRIVMSG #sopel :hi!'
pretrigger = trigger.PreTrigger(mockbot.nick, line)
results = list(rule.match(mockbot, pretrigger))
assert len(results) == 2, 'Exactly 2 rules must match'
assert all(result.group(0) == 'hi' for result in results)
# match on "hey" once
line = ':Foo!foo@example.com PRIVMSG #sopel :hey how are you doing?'
pretrigger = trigger.PreTrigger(mockbot.nick, line)
results = list(rule.match(mockbot, pretrigger))
assert len(results) == 1, 'Exactly 1 rule must match'
assert results[0].group(0) == 'hey'
# match on "hey" once even if not at the beginning of the line
line = ':Foo!foo@example.com PRIVMSG #sopel :I say hey, can you say hey?'
pretrigger = trigger.PreTrigger(mockbot.nick, line)
results = list(rule.match(mockbot, pretrigger))
assert len(results) == 1, 'The rule must match once from anywhere'
assert results[0].group(0) == 'hey'
|
def test_search_rule_from_callable(mockbot):
# prepare callable
@module.search(r'hello', r'hi', r'hey', r'hello|hi')
def handler(wrapped, trigger):
wrapped.reply('Hi!')
loader.clean_callable(handler, mockbot.settings)
handler.plugin_name = 'testplugin'
# create rule from a cleaned callable
rule = rules.SearchRule.from_callable(mockbot.settings, handler)
assert str(rule) == '<SearchRule testplugin.handler (4)>'
# match on "Hello" twice
line = ':Foo!foo@example.com PRIVMSG #sopel :Hello, world'
pretrigger = trigger.PreTrigger(mockbot.nick, line)
results = list(rule.match(mockbot, pretrigger))
assert len(results) == 2, 'Exactly 2 rules must match'
assert all(result.group(0) == 'Hello' for result in results)
# match on "hi" twice
line = ':Foo!foo@example.com PRIVMSG #sopel :hi!'
pretrigger = trigger.PreTrigger(mockbot.nick, line)
results = list(rule.match(mockbot, pretrigger))
assert len(results) == 2, 'Exactly 2 rules must match'
assert all(result.group(0) == 'hi' for result in results)
# match on "hey" once
line = ':Foo!foo@example.com PRIVMSG #sopel :hey how are you doing?'
pretrigger = trigger.PreTrigger(mockbot.nick, line)
results = list(rule.match(mockbot, pretrigger))
assert len(results) == 1, 'Exactly 1 rule must match'
assert results[0].group(0) == 'hey'
# match on "hey" once even if not at the beginning of the line
line = ':Foo!foo@example.com PRIVMSG #sopel :I say hey, can you say hey?'
pretrigger = trigger.PreTrigger(mockbot.nick, line)
results = list(rule.match(mockbot, pretrigger))
assert len(results) == 1, 'The rule must match once from anywhere'
assert results[0].group(0) == 'hey'
|
13,469 |
def is_lgtm_allowed(from_addr, feature, approval_field):
"""Return true if the user is allowed to approve this feature."""
user = users.User(email=from_addr)
approvers = approval_defs.get_approvers(field_id)
allowed = permissions.can_approve_feature(user, feature, approvers)
return allowed
|
def is_lgtm_allowed(from_addr, feature, approval_field):
"""Return true if the user is allowed to approve this feature."""
user = users.User(email=from_addr)
approvers = approval_defs.get_approvers(approval_field.field_id)
allowed = permissions.can_approve_feature(user, feature, approvers)
return allowed
|
8,294 |
def normalize_license(license):
""" Handles when:
* No license is passed
* Made up licenses are submitted
* Official PyPI trove classifier licenses
* Common abbreviations of licenses
"""
if license is None:
return "UNKNOWN"
if license.strip() in deprecated_classifiers:
return license.strip()
if len(license.strip()) > 20:
return "Custom"
return license.strip()
|
def normalize_license(license):
""" Handles when:
* No license is passed
* Made up licenses are submitted
* Official PyPI trove classifier licenses
* Common abbreviations of licenses
"""
if license is None:
return "UNKNOWN"
if license.strip() in classifiers:
return license.strip()
if len(license.strip()) > 20:
return "Custom"
return license.strip()
|
46,085 |
def convert_to_onnx(
model: torch.nn.Module,
inp_shape: Union[List, Tuple, torch.Size],
input_names: Iterable = None,
output_names: List[str] = None,
file="model.onnx",
dynamic_axes: Union[Dict[str, int], Dict[str, Dict[str, int]]] = None,
opset_version: int = 9,
do_constant_folding: bool = False,
):
torch.onnx.export(
model,
inp_shape,
file,
verbose=True,
input_names=input_names,
output_names=output_names,
dynamic_axes=dynamic_axes,
do_constant_folding=do_constant_folding,
opset_version=opset_version
)
|
def convert_to_onnx(
model: torch.nn.Module,
input_shape: Union[List, Tuple, torch.Size],
input_names: Iterable = None,
output_names: List[str] = None,
file="model.onnx",
dynamic_axes: Union[Dict[str, int], Dict[str, Dict[str, int]]] = None,
opset_version: int = 9,
do_constant_folding: bool = False,
):
torch.onnx.export(
model,
inp_shape,
file,
verbose=True,
input_names=input_names,
output_names=output_names,
dynamic_axes=dynamic_axes,
do_constant_folding=do_constant_folding,
opset_version=opset_version
)
|
47,954 |
def run_pipeline(capture, model_type, model, render_fn, seq_size=16, fps=30):
pipeline = AsyncPipeline()
pipeline.add_step("Data", DataStep(capture), parallel=False)
if model_type in ('en-de', 'dummy-de'):
pipeline.add_step("Encoder", EncoderStep(model[0]), parallel=False)
pipeline.add_step("Decoder", DecoderStep(model[1], sequence_size=seq_size), parallel=False)
elif model_type == 'i3d-rgb':
pipeline.add_step("I3DRGBModelStep", I3DRGBModelStep(model[0], seq_size, 256, 224), parallel = False)
pipeline.add_step("Render", RenderStep(render_fn, fps=fps), parallel=True)
pipeline.run()
pipeline.close()
pipeline.print_statistics()
|
def run_pipeline(capture, model_type, model, render_fn, seq_size=16, fps=30):
pipeline = AsyncPipeline()
pipeline.add_step("Data", DataStep(capture), parallel=False)
if model_type in ('en-de', 'dummy-de'):
pipeline.add_step("Encoder", EncoderStep(model[0]), parallel=False)
pipeline.add_step("Decoder", DecoderStep(model[1], sequence_size=seq_size), parallel=False)
elif model_type == 'i3d-rgb':
pipeline.add_step("I3DRGB", I3DRGBModelStep(model[0], seq_size, 256, 224), parallel = False)
pipeline.add_step("Render", RenderStep(render_fn, fps=fps), parallel=True)
pipeline.run()
pipeline.close()
pipeline.print_statistics()
|
23,049 |
def histogram(a, bins=None, range=None, normed=False, weights=None, density=None):
"""
Blocked variant of :func:`numpy.histogram`.
Follows the signature of :func:`numpy.histogram` exactly with the following
exceptions:
- Either an iterable specifying the ``bins`` or the number of ``bins``
and a ``range`` argument is required as computing ``min`` and ``max``
over blocked arrays is an expensive operation that must be performed
explicitly.
- ``weights`` must be a dask.array.Array with the same block structure
as ``a``.
Examples
--------
Using number of bins and range:
>>> import dask.array as da
>>> import numpy as np
>>> x = da.from_array(np.arange(10000), chunks=10)
>>> h, bins = da.histogram(x, bins=10, range=[0, 10000])
>>> bins
array([ 0., 1000., 2000., 3000., 4000., 5000., 6000., 7000.,
8000., 9000., 10000.])
>>> h.compute()
array([1000, 1000, 1000, 1000, 1000, 1000, 1000, 1000, 1000, 1000])
Explicitly specifying the bins:
>>> h, bins = da.histogram(x, bins=np.array([0, 5000, 10000]))
>>> bins
array([ 0, 5000, 10000])
>>> h.compute()
array([5000, 5000])
"""
if not isinstance(a, Array):
a = from_array(a)
if not np.iterable(bins) and (range is None or bins is None):
raise ValueError(
"dask.array.histogram requires either specifying "
"bins as an iterable or specifying both a range and "
"the number of bins"
)
if weights is not None and weights.chunks != a.chunks:
raise ValueError("Input array and weights must have the same chunked structure")
if normed is not False:
raise ValueError(
"The normed= keyword argument has been deprecated. "
"Please use density instead. "
"See the numpy.histogram docstring for more information."
)
dependencies = [a]
range_refs = None
if range is not None:
try:
if len(range) != 2:
raise ValueError(
f"range must be a sequence of length 2, but got {len(range)} items"
)
except TypeError:
raise TypeError(f"expected a sequence for range, not {range}") from None
range_refs = []
for elem in range:
if isinstance(elem, Array):
if elem.shape != ():
raise ValueError(
f"Dask arrays passed in the range argument to histogram must be scalars "
f"(shape of `()`); got one with shape {elem.shape}."
)
dependencies.append(elem)
key = elem.__dask_keys__()[0]
range_refs.append(key)
else:
range_refs.append(elem)
if not np.iterable(bins):
assert not isinstance(
bins, Array
), "The number of bins cannot be a Dask array, it must be an actual value"
bin_token = bins
if len(dependencies) == 1:
# ^ `len(dependencies) == 1` here iff neither element in `range` was a Dask array.
# `mn == mx` could be an expensive implicit compute otherwise.
mn, mx = range
if mn == mx:
mn = mn - 0.5
mx = mx + 0.5
bins = np.linspace(mn, mx, bins + 1, endpoint=True)
else:
linspace_name = tokenize(bins, *range_refs)
linspace_dsk = {
(linspace_name, 0): (
np.linspace,
range_refs[0],
range_refs[1],
bins + 1,
)
}
linspace_graph = HighLevelGraph.from_collections(
linspace_name, linspace_dsk, dependencies=dependencies[1:]
)
# ^ TODO: dask linspace doesn't support delayed values
bins = Array(linspace_graph, linspace_name, [(bins + 1,)], dtype=float)
else:
bin_token = bins
token = tokenize(a, bin_token, range, weights, density)
if isinstance(bins, Array):
if bins.ndim != 1:
raise ValueError(f"bins must have 1 dimension, got {bins.ndim}")
dependencies.append(bins)
bins_ref = "histogram-bins-" + token
bins_dsk = {bins_ref: (np.concatenate, bins.__dask_keys__())}
bins_graph = HighLevelGraph.from_collections(
bins_ref, bins_dsk, dependencies=[bins]
)
else:
bins_ref = bins
bins_graph = None
nchunks = len(list(flatten(a.__dask_keys__())))
chunks = ((1,) * nchunks, (len(bins) - 1,))
name = "histogram-sum-" + token
# Map the histogram to all bins
def block_hist(x, bins, range=None, weights=None):
return np.histogram(x, bins, range=range, weights=weights)[0][np.newaxis]
if weights is None:
dsk = {
(name, i, 0): (block_hist, k, bins_ref, range_refs)
for i, k in enumerate(flatten(a.__dask_keys__()))
}
dtype = np.histogram([])[0].dtype
else:
dependencies.append(weights)
a_keys = flatten(a.__dask_keys__())
w_keys = flatten(weights.__dask_keys__())
dsk = {
(name, i, 0): (block_hist, k, bins_ref, range_refs, w)
for i, (k, w) in enumerate(zip(a_keys, w_keys))
}
dtype = weights.dtype
graph = HighLevelGraph.from_collections(name, dsk, dependencies=dependencies)
if bins_graph:
graph = HighLevelGraph.merge(graph, bins_graph)
mapped = Array(graph, name, chunks, dtype=dtype)
n = mapped.sum(axis=0)
# We need to replicate normed and density options from numpy
if density is not None:
if density:
db = asarray(np.diff(bins).astype(float), chunks=n.chunks)
return n / db / n.sum(), bins
else:
return n, bins
else:
return n, bins
|
def histogram(a, bins=None, range=None, normed=False, weights=None, density=None):
"""
Blocked variant of :func:`numpy.histogram`.
Follows the signature of :func:`numpy.histogram` exactly with the following
exceptions:
- Either an iterable specifying the ``bins`` or the number of ``bins``
and a ``range`` argument is required as computing ``min`` and ``max``
over blocked arrays is an expensive operation that must be performed
explicitly.
- ``weights`` must be a dask.array.Array with the same block structure
as ``a``.
Examples
--------
Using number of bins and range:
>>> import dask.array as da
>>> import numpy as np
>>> x = da.from_array(np.arange(10000), chunks=10)
>>> h, bins = da.histogram(x, bins=10, range=[0, 10000])
>>> bins
array([ 0., 1000., 2000., 3000., 4000., 5000., 6000., 7000.,
8000., 9000., 10000.])
>>> h.compute()
array([1000, 1000, 1000, 1000, 1000, 1000, 1000, 1000, 1000, 1000])
Explicitly specifying the bins:
>>> h, bins = da.histogram(x, bins=np.array([0, 5000, 10000]))
>>> bins
array([ 0, 5000, 10000])
>>> h.compute()
array([5000, 5000])
"""
if not isinstance(a, Array):
a = from_array(a)
if not np.iterable(bins) and (range is None or bins is None):
raise ValueError(
"dask.array.histogram requires either specifying "
"bins as an iterable or specifying both a range and "
"the number of bins"
)
if weights is not None and weights.chunks != a.chunks:
raise ValueError("Input array and weights must have the same chunked structure")
if normed is not False:
raise ValueError(
"The normed= keyword argument has been deprecated. "
"Please use density instead. "
"See the numpy.histogram docstring for more information."
)
dependencies = [a]
range_refs = None
if range is not None:
try:
if len(range) != 2:
raise ValueError(
f"range must be a sequence of length 2, but got {len(range)} items"
)
except TypeError:
raise TypeError(f"expected a sequence for range, not {range}") from None
range_refs = []
for elem in range:
if isinstance(elem, Array):
if elem.shape != ():
raise ValueError(
f"Dask arrays passed in the range argument to histogram must be scalars "
f"(shape of `()`); got one with shape {elem.shape}."
)
dependencies.append(elem)
key = elem.__dask_keys__()[0]
range_refs.append(key)
else:
range_refs.append(elem)
if not np.iterable(bins):
assert not isinstance(
bins, Array
), "The number of bins cannot be a Dask array, it must be an actual value"
bin_token = bins
if len(dependencies) == 1:
# ^ `len(dependencies) == 1` here iff neither element in `range` was a Dask array.
# `mn == mx` could be an expensive implicit compute otherwise.
mn, mx = range
if mn == mx:
mn = mn - 0.5
mx = mx + 0.5
bins = np.linspace(mn, mx, bins + 1, endpoint=True)
else:
linspace_name = tokenize(bins, *range_refs)
linspace_dsk = {
(linspace_name, 0): (
np.linspace,
range_refs[0],
range_refs[1],
bins + 1,
)
}
linspace_graph = HighLevelGraph.from_collections(
linspace_name, linspace_dsk, dependencies=dependencies[1:]
)
# ^ TODO: dask linspace doesn't support delayed values
bins = Array(linspace_graph, linspace_name, [(bins + 1,)], dtype=float)
else:
bin_token = bins
token = tokenize(a, bin_token, range, weights, density)
if isinstance(bins, Array):
if bins.ndim != 1:
raise ValueError(f"bins must have 1 dimension, got {bins.ndim}")
dependencies.append(bins)
bins_ref = "histogram-bins-" + token
bins_dsk = {bins_ref: (np.concatenate, bins.__dask_keys__())}
bins_graph = HighLevelGraph.from_collections(
bins_ref, bins_dsk, dependencies=[bins]
)
else:
bins_ref = bins
bins_graph = None
nchunks = len(list(flatten(a.__dask_keys__())))
chunks = ((1,) * nchunks, (len(bins) - 1,))
name = "histogram-sum-" + token
# Map the histogram to all bins
def block_hist(x, bins, range=None, weights=None):
return np.histogram(x, bins, range=range, weights=weights)[0][np.newaxis]
if weights is None:
dsk = {
(name, i, 0): (block_hist, k, bins_ref, range_refs)
for i, k in enumerate(flatten(a.__dask_keys__()))
}
dtype = np.histogram([])[0].dtype
else:
dependencies.append(weights)
a_keys = flatten(a.__dask_keys__())
w_keys = flatten(weights.__dask_keys__())
dsk = {
(name, i, 0): (block_hist, k, bins_ref, range_refs, w)
for i, (k, w) in enumerate(zip(a_keys, w_keys))
}
dtype = weights.dtype
graph = HighLevelGraph.from_collections(name, dsk, dependencies=dependencies)
if bins_graph is not None:
graph = HighLevelGraph.merge(graph, bins_graph)
mapped = Array(graph, name, chunks, dtype=dtype)
n = mapped.sum(axis=0)
# We need to replicate normed and density options from numpy
if density is not None:
if density:
db = asarray(np.diff(bins).astype(float), chunks=n.chunks)
return n / db / n.sum(), bins
else:
return n, bins
else:
return n, bins
|
32,477 |
def check_analysis_status_and_get_results_command(intezer_api: IntezerApi, args: dict) -> List[CommandResults]:
analysis_type = args.get('analysis_type', 'File')
analysis_ids = argToList(args.get('analysis_id'))
indicator_name = args.get('indicator_name')
command_results = []
file_metadata = {}
for analysis_id in analysis_ids:
try:
if analysis_type == 'Endpoint':
response = intezer_api.get_url_result(f'/endpoint-analyses/{analysis_id}')
analysis_result = response.json()['result']
elif analysis_type == 'Url':
analysis = UrlAnalysis.from_analysis_id(analysis_id, api=intezer_api)
if not analysis:
command_results.append(_get_missing_url_result(analysis_id))
continue
else:
analysis_result = analysis.result()
else:
analysis = FileAnalysis.from_analysis_id(analysis_id, api=intezer_api)
if not analysis:
command_results.append(_get_missing_analysis_result(analysis_id))
continue
else:
analysis_result = analysis.result()
file_metadata = analysis.get_root_analysis().metadata
if analysis_result and analysis_type == 'Endpoint':
command_results.append(
enrich_dbot_and_display_endpoint_analysis_results(analysis_result, indicator_name))
elif analysis_result and analysis_type == 'Url':
command_results.append(
enrich_dbot_and_display_url_analysis_results(analysis_result, intezer_api))
elif analysis_result:
command_results.append(enrich_dbot_and_display_file_analysis_results(analysis_result, file_metadata))
except HTTPError as http_error:
if http_error.response.status_code == HTTPStatus.CONFLICT:
command_results.append(_get_analysis_running_result(analysis_id=analysis_id))
elif http_error.response.status_code == HTTPStatus.NOT_FOUND:
command_results.append(_get_missing_analysis_result(analysis_id))
else:
raise http_error
except AnalysisIsStillRunning:
command_results.append(_get_analysis_running_result(analysis_id=analysis_id))
return command_results
|
def check_analysis_status_and_get_results_command(intezer_api: IntezerApi, args: dict) -> List[CommandResults]:
analysis_type = args.get('analysis_type', 'File')
analysis_ids = argToList(args.get('analysis_id'))
indicator_name = args.get('indicator_name')
command_results = []
file_metadata = {}
for analysis_id in analysis_ids:
try:
if analysis_type == 'Endpoint':
response = intezer_api.get_url_result(f'/endpoint-analyses/{analysis_id}')
analysis_result = response.json()['result']
elif analysis_type == 'Url':
analysis = UrlAnalysis.from_analysis_id(analysis_id, api=intezer_api)
if not analysis:
command_results.append(_get_missing_url_result(analysis_id))
continue
else:
analysis_result = analysis.result()
else:
analysis = FileAnalysis.from_analysis_id(analysis_id, api=intezer_api)
if not analysis:
command_results.append(_get_missing_analysis_result(analysis_id))
continue
else:
analysis_result = analysis.result()
file_metadata = analysis.get_root_analysis().metadata()
if analysis_result and analysis_type == 'Endpoint':
command_results.append(
enrich_dbot_and_display_endpoint_analysis_results(analysis_result, indicator_name))
elif analysis_result and analysis_type == 'Url':
command_results.append(
enrich_dbot_and_display_url_analysis_results(analysis_result, intezer_api))
elif analysis_result:
command_results.append(enrich_dbot_and_display_file_analysis_results(analysis_result, file_metadata))
except HTTPError as http_error:
if http_error.response.status_code == HTTPStatus.CONFLICT:
command_results.append(_get_analysis_running_result(analysis_id=analysis_id))
elif http_error.response.status_code == HTTPStatus.NOT_FOUND:
command_results.append(_get_missing_analysis_result(analysis_id))
else:
raise http_error
except AnalysisIsStillRunning:
command_results.append(_get_analysis_running_result(analysis_id=analysis_id))
return command_results
|
37,056 |
def barrier(self, *qargs):
"""Apply barrier to circuit.
If qargs is None, applies to all the qbits.
Args is a list of QuantumRegister or single qubits.
For QuantumRegister, applies barrier to all the qubits in that register."""
qubits = []
if not qargs: # None
for qreg in self.qregs:
for j in range(qreg.size):
qubits.append((qreg, j))
for qarg in qargs:
if isinstance(qarg, QuantumRegister):
qubits.extend([(qarg, j) for j in range(qarg.size)])
elif isinstance(qarg, list):
qubits.extend(qarg)
elif isinstance(qarg, range):
qubits.extend([i for i in qarg])
elif isinstance(qarg, slice):
qubits.extend([i for i in range(*qarg.indices(1000))])
else:
qubits.append(qarg)
return self.append(Barrier(len(qubits)), qubits, [])
|
def barrier(self, *qargs):
"""Apply barrier to circuit.
If qargs is None, applies to all the qbits.
Args is a list of QuantumRegister or single qubits.
For QuantumRegister, applies barrier to all the qubits in that register."""
qubits = []
if not qargs: # None
for qreg in self.qregs:
for j in range(qreg.size):
qubits.append((qreg, j))
for qarg in qargs:
if isinstance(qarg, QuantumRegister):
qubits.extend([(qarg, j) for j in range(qarg.size)])
elif isinstance(qarg, list):
qubits.extend(qarg)
elif isinstance(qarg, range):
qubits.extend([i for i in qarg])
elif isinstance(qarg, slice):
qubits.extend(self.qubits[qarg])
else:
qubits.append(qarg)
return self.append(Barrier(len(qubits)), qubits, [])
|
8,593 |
def test_from_linestring():
# From another linestring
line = LineString(((1.0, 2.0), (3.0, 4.0)))
copy = LineString(line)
assert copy.coords[:] == [(1.0, 2.0), (3.0, 4.0)]
assert lgeos.GEOSGeomType(copy._geom).decode('ascii') == 'LineString'
|
def test_from_linestring():
# From another linestring
line = LineString(((1.0, 2.0), (3.0, 4.0)))
copy = LineString(line)
assert copy.coords[:] == [(1.0, 2.0), (3.0, 4.0)]
assert copy.geom_type == 'LineString'
|
32,298 |
def fix_log_forwarding(topology: Topology, issue: List) -> List[ConfigurationHygieneFix]:
"""
:param topology: `Topology` instance !no-auto-argument
:param issue: Dictionary of Hygiene issue, from a hygiene check command. Can be a list.
"""
issue_objects = hygiene_issue_dict_to_object(issue)
return HygieneRemediation.fix_log_forwarding_profile_enhanced_logging(topology, issues=issue_objects)
|
def fix_log_forwarding(topology: Topology, issue: List) -> List[ConfigurationHygieneFix]:
"""
:param topology: `Topology` instance !no-auto-argument
:param issue: Dictionary of Hygiene issue, from a hygiene check command. Can be a list.
"""
return HygieneRemediation.fix_log_forwarding_profile_enhanced_logging(topology, issues=hygiene_issue_dict_to_object(issue))
|
50,071 |
def mcsolve(H, psi0, tlist, c_ops=None, e_ops=None, ntraj=1, *,
args=None, options=None, seeds=None, target_tol=None, timeout=0):
r"""
Monte Carlo evolution of a state vector :math:`|\psi \rangle` for a
given Hamiltonian and sets of collapse operators. Options for the
underlying ODE solver are given by the Options class.
Parameters
----------
H : :class:`qutip.Qobj`, :class:`qutip.QobjEvo`, ``list``, callable.
System Hamiltonian as a Qobj, QobjEvo, can also be a function or list
that can be made into a Qobjevo. (See :class:`qutip.QobjEvo`'s
documentation). ``H`` can be a superoperator (liouvillian) if some
collapse operators are to be treated deterministically.
psi0 : :class:`qutip.Qobj`
Initial state vector
tlist : array_like
Times at which results are recorded.
ntraj : int
Maximum number of trajectories to run. Can be cut short if a time limit
is passed in options (per default, mcsolve will stop after 1e8 sec)::
``options.mcsolve['map_options']['timeout'] = max_sec``
Or if the target tolerance is reached, see ``target_tol``.
c_ops : ``list``
A ``list`` of collapse operators. They must be operators even if ``H``
is a superoperator.
e_ops : ``list``, [optional]
A ``list`` of operator as Qobj, QobjEvo or callable with signature of
(t, state: Qobj) for calculating expectation values. When no ``e_ops``
are given, the solver will default to save the states.
args : dict, [optional]
Arguments for time-dependent Hamiltonian and collapse operator terms.
options : SolverOptions, [optional]
Options for the evolution.
seeds : int, SeedSequence, list, [optional]
Seed for the random number generator. It can be a single seed used to
spawn seeds for each trajectories or a list of seed, one for each
trajectories. Seed are saved in the result, they can be reused with::
seeds=prev_result.seeds
target_tol : float, list, [optional]
Target tolerance of the evolution. The evolution will compute
trajectories until the error on the expectation values is lower than
this tolerance. The error is computed using jackknife resampling.
``target_tol`` can be an absolute tolerance, a pair of absolute and
relative tolerance, in that order. Lastly, it can be a list of pairs of
(atol, rtol) for each e_ops.
timeout : float [optional]
Maximum time for the evolution in second. When reached, no more
trajectories will be computed. Overwrite the option of the same name.
Returns
-------
results : :class:`qutip.solver.Result`
Object storing all results from the simulation. Which results is saved
depend on the presence of ``e_ops`` and the options used. ``collapse``
and ``photocurrent`` is available to Monte Carlo simulation results.
"""
H = QobjEvo(H, args=args, tlist=tlist)
c_ops = c_ops if c_ops is not None else []
if not isinstance(c_ops, (list, tuple)):
c_ops = [c_ops]
c_ops = [QobjEvo(c_op, args=args, tlist=tlist) for c_op in c_ops]
if len(c_ops) == 0:
return mesolve(H, psi0, tlist, e_ops=e_ops, args=args, options=options)
if isinstance(ntraj, list):
if isinstance(options, dict):
options = SolverOptions(**options)
options = copy(options) or SolverOptions()
options.results['keep_runs_results'] = True
max_ntraj = max(ntraj)
else:
max_ntraj = ntraj
mc = McSolver(H, c_ops, options=options)
result = mc.run(psi0, tlist=tlist, ntraj=max_ntraj, e_ops=e_ops,
seed=seeds, target_tol=target_tol, timeout=timeout)
if isinstance(ntraj, list):
result.traj_batch = ntraj
return result
|
def mcsolve(H, psi0, tlist, c_ops=None, e_ops=None, ntraj=1, *,
args=None, options=None, seeds=None, target_tol=None, timeout=0):
r"""
Monte Carlo evolution of a state vector :math:`|\psi \rangle` for a
given Hamiltonian and sets of collapse operators. Options for the
underlying ODE solver are given by the Options class.
Parameters
----------
H : :class:`qutip.Qobj`, :class:`qutip.QobjEvo`, ``list``, callable.
System Hamiltonian as a Qobj, QobjEvo, can also be a function or list
that can be made into a Qobjevo. (See :class:`qutip.QobjEvo`'s
documentation). ``H`` can be a superoperator (liouvillian) if some
collapse operators are to be treated deterministically.
psi0 : :class:`qutip.Qobj`
Initial state vector
tlist : array_like
Times at which results are recorded.
ntraj : int
Maximum number of trajectories to run. Can be cut short if a time limit
is passed in options (per default, mcsolve will stop after 1e8 sec)::
``options.mcsolve['map_options']['timeout'] = max_sec``
Or if the target tolerance is reached, see ``target_tol``.
c_ops : ``list``
A ``list`` of collapse operators. They must be operators even if ``H``
is a superoperator.
e_ops : ``list``, [optional]
A ``list`` of operator as Qobj, QobjEvo or callable with signature of
(t, state: Qobj) for calculating expectation values. When no ``e_ops``
are given, the solver will default to save the states.
args : dict, [optional]
Arguments for time-dependent Hamiltonian and collapse operator terms.
options : SolverOptions, [optional]
Options for the evolution.
seeds : int, SeedSequence, list, [optional]
Seed for the random number generator. It can be a single seed used to
spawn seeds for each trajectories or a list of seed, one for each
trajectories. Seed are saved in the result, they can be reused with::
seeds=prev_result.seeds
target_tol : float, list, [optional]
Target tolerance of the evolution. The evolution will compute
trajectories until the error on the expectation values is lower than
this tolerance. The error is computed using jackknife resampling.
``target_tol`` can be an absolute tolerance or a pair of absolute and
relative tolerance, in that order. Lastly, it can be a list of pairs of
(atol, rtol) for each e_ops.
timeout : float [optional]
Maximum time for the evolution in second. When reached, no more
trajectories will be computed. Overwrite the option of the same name.
Returns
-------
results : :class:`qutip.solver.Result`
Object storing all results from the simulation. Which results is saved
depend on the presence of ``e_ops`` and the options used. ``collapse``
and ``photocurrent`` is available to Monte Carlo simulation results.
"""
H = QobjEvo(H, args=args, tlist=tlist)
c_ops = c_ops if c_ops is not None else []
if not isinstance(c_ops, (list, tuple)):
c_ops = [c_ops]
c_ops = [QobjEvo(c_op, args=args, tlist=tlist) for c_op in c_ops]
if len(c_ops) == 0:
return mesolve(H, psi0, tlist, e_ops=e_ops, args=args, options=options)
if isinstance(ntraj, list):
if isinstance(options, dict):
options = SolverOptions(**options)
options = copy(options) or SolverOptions()
options.results['keep_runs_results'] = True
max_ntraj = max(ntraj)
else:
max_ntraj = ntraj
mc = McSolver(H, c_ops, options=options)
result = mc.run(psi0, tlist=tlist, ntraj=max_ntraj, e_ops=e_ops,
seed=seeds, target_tol=target_tol, timeout=timeout)
if isinstance(ntraj, list):
result.traj_batch = ntraj
return result
|
11,561 |
def iter_lines(proc,
retcode=0,
timeout=None,
linesize=-1,
line_timeout=None,
mode=None,
_iter_lines=_iter_lines,
):
"""Runs the given process (equivalent to run_proc()) and yields a tuples of (out, err) line pairs.
If the exit code of the process does not match the expected one, :class:`ProcessExecutionError
<plumbum.commands.ProcessExecutionError>` is raised.
:param retcode: The expected return code of this process (defaults to 0).
In order to disable exit-code validation, pass ``None``. It may also
be a tuple (or any iterable) of expected exit codes.
:param timeout: The maximal amount of time (in seconds) to allow the process to run.
``None`` means no timeout is imposed; otherwise, if the process hasn't
terminated after that many seconds, the process will be forcefully
terminated an exception will be raised
:param linesize: Maximum number of characters to read from stdout/stderr at each iteration.
``-1`` (default) reads until a b'\\n' is encountered.
:param line_timeout: The maximal amount of time (in seconds) to allow between consecutive lines in either stream.
Raise an :class:`ProcessLineTimedOut <plumbum.commands.ProcessLineTimedOut>` if the timeout has
been reached. ``None`` means no timeout is imposed.
:returns: An iterator of (out, err) line tuples.
"""
if mode is None:
mode = DEFAULT_ITER_LINES_MODE
assert mode in (BY_POSITION, BY_TYPE)
encoding = getattr(proc, "custom_encoding", None) or 'utf-8'
decode = lambda s: s.decode(encoding, errors='replace').rstrip()
_register_proc_timeout(proc, timeout)
buffers = [StringIO(), StringIO()]
for t, line in _iter_lines(proc, decode, linesize, line_timeout):
# verify that the proc hasn't timed out yet
proc.verify(timeout=timeout, retcode=None, stdout=None, stderr=None)
buffers[t].write(line + "\n")
if mode is BY_POSITION:
ret = [None, None]
ret[t] = line
yield tuple(ret)
elif mode is BY_TYPE:
yield (t + 1), line # 1=stdout, 2=stderr
# this will take care of checking return code and timeouts
_check_process(proc, retcode, timeout, *(s.getvalue() for s in buffers))
|
def iter_lines(proc,
retcode=0,
timeout=None,
linesize=-1,
line_timeout=None,
mode=None,
_iter_lines=_iter_lines,
):
"""Runs the given process (equivalent to run_proc()) and yields a tuples of (out, err) line pairs.
If the exit code of the process does not match the expected one, :class:`ProcessExecutionError
<plumbum.commands.ProcessExecutionError>` is raised.
:param retcode: The expected return code of this process (defaults to 0).
In order to disable exit-code validation, pass ``None``. It may also
be a tuple (or any iterable) of expected exit codes.
:param timeout: The maximal amount of time (in seconds) to allow the process to run.
``None`` means no timeout is imposed; otherwise, if the process hasn't
terminated after that many seconds, the process will be forcefully
terminated an exception will be raised
:param linesize: Maximum number of characters to read from stdout/stderr at each iteration.
``-1`` (default) reads until a b'\\n' is encountered.
:param line_timeout: The maximal amount of time (in seconds) to allow between consecutive lines in either stream.
Raise an :class:`ProcessLineTimedOut <plumbum.commands.ProcessLineTimedOut>` if the timeout has
been reached. ``None`` means no timeout is imposed.
:returns: An iterator of (out, err) line tuples.
"""
if mode is None:
mode = DEFAULT_ITER_LINES_MODE
assert mode in (BY_POSITION, BY_TYPE)
encoding = getattr(proc, "custom_encoding", 'utf-8')
decode = lambda s: s.decode(encoding, errors='replace').rstrip()
_register_proc_timeout(proc, timeout)
buffers = [StringIO(), StringIO()]
for t, line in _iter_lines(proc, decode, linesize, line_timeout):
# verify that the proc hasn't timed out yet
proc.verify(timeout=timeout, retcode=None, stdout=None, stderr=None)
buffers[t].write(line + "\n")
if mode is BY_POSITION:
ret = [None, None]
ret[t] = line
yield tuple(ret)
elif mode is BY_TYPE:
yield (t + 1), line # 1=stdout, 2=stderr
# this will take care of checking return code and timeouts
_check_process(proc, retcode, timeout, *(s.getvalue() for s in buffers))
|
23,587 |
def dirindex(ghi, ghi_clearsky, dni_clearsky, zenith, times, pressure=101325.,
use_delta_kt_prime=True, temp_dew=None, min_cos_zenith=0.065,
max_zenith=87):
"""
Determine DNI from GHI using the DIRINDEX model.
The DIRINDEX model [1] modifies the DIRINT model implemented in
``pvlib.irradiance.dirint`` by taking into account information from a
clear sky model. It is recommended that ``ghi_clearsky`` be calculated
using the Ineichen clear sky model ``pvlib.clearsky.ineichen`` with
``perez_enhancement=True``.
The pvlib implementation limits the clearness index to 1.
Parameters
----------
ghi : array-like
Global horizontal irradiance in W/m^2.
ghi_clearsky : array-like
Global horizontal irradiance from clear sky model, in W/m^2.
dni_clearsky : array-like
Direct normal irradiance from clear sky model, in W/m^2.
zenith : array-like
True (not refraction-corrected) zenith angles in decimal
degrees. If Z is a vector it must be of the same size as all
other vector inputs. Z must be >=0 and <=180.
times : DatetimeIndex
pressure : float or array-like, default 101325.0
The site pressure in Pascal. Pressure may be measured or an
average pressure may be calculated from site altitude.
use_delta_kt_prime : bool, default True
If True, indicates that the stability index delta_kt_prime is
included in the model. The stability index adjusts the estimated
DNI in response to dynamics in the time series of GHI. It is
recommended that delta_kt_prime is not used if the time between
GHI points is 1.5 hours or greater. If use_delta_kt_prime=True,
input data must be Series.
temp_dew : None, float, or array-like, default None
Surface dew point temperatures, in degrees C. Values of temp_dew
may be numeric or NaN. Any single time period point with a
temp_dew=NaN does not have dew point improvements applied. If
temp_dew is not provided, then dew point improvements are not
applied.
min_cos_zenith : numeric, default 0.065
Minimum value of cos(zenith) to allow when calculating global
clearness index `kt`. Equivalent to zenith = 86.273 degrees.
max_zenith : numeric, default 87
Maximum value of zenith to allow in DNI calculation. DNI will be
set to 0 for times with zenith values greater than `max_zenith`.
Returns
-------
dni : array-like
The modeled direct normal irradiance in W/m^2.
Notes
-----
DIRINDEX model requires time series data (ie. one of the inputs must
be a vector of length > 2).
References
----------
[1] Perez, R., Ineichen, P., Moore, K., Kmiecik, M., Chain, C., George, R.,
& Vignola, F. (2002). A new operational model for satellite-derived
irradiances: description and validation. Solar Energy, 73(5), 307-317.
"""
dni_dirint = dirint(ghi, zenith, times, pressure=pressure,
use_delta_kt_prime=use_delta_kt_prime,
temp_dew=temp_dew, min_cos_zenith=min_cos_zenith,
max_zenith=max_zenith)
dni_dirint_clearsky = dirint(ghi_clearsky, zenith, times,
pressure=pressure,
use_delta_kt_prime=use_delta_kt_prime,
temp_dew=temp_dew,
min_cos_zenith=min_cos_zenith,
max_zenith=max_zenith)
dni_dirindex = dni_clearsky * dni_dirint / dni_dirint_clearsky
dni_dirindex[dni_dirindex < 0] = 0.
return dni_dirindex
|
def dirindex(ghi, ghi_clearsky, dni_clearsky, zenith, times, pressure=101325.,
use_delta_kt_prime=True, temp_dew=None, min_cos_zenith=0.065,
max_zenith=87):
"""
Determine DNI from GHI using the DIRINDEX model.
The DIRINDEX model [1] modifies the DIRINT model implemented in
:py:func:`pvlib.irradiance.dirint` by taking into account information from a
clear sky model. It is recommended that ``ghi_clearsky`` be calculated
using the Ineichen clear sky model ``pvlib.clearsky.ineichen`` with
``perez_enhancement=True``.
The pvlib implementation limits the clearness index to 1.
Parameters
----------
ghi : array-like
Global horizontal irradiance in W/m^2.
ghi_clearsky : array-like
Global horizontal irradiance from clear sky model, in W/m^2.
dni_clearsky : array-like
Direct normal irradiance from clear sky model, in W/m^2.
zenith : array-like
True (not refraction-corrected) zenith angles in decimal
degrees. If Z is a vector it must be of the same size as all
other vector inputs. Z must be >=0 and <=180.
times : DatetimeIndex
pressure : float or array-like, default 101325.0
The site pressure in Pascal. Pressure may be measured or an
average pressure may be calculated from site altitude.
use_delta_kt_prime : bool, default True
If True, indicates that the stability index delta_kt_prime is
included in the model. The stability index adjusts the estimated
DNI in response to dynamics in the time series of GHI. It is
recommended that delta_kt_prime is not used if the time between
GHI points is 1.5 hours or greater. If use_delta_kt_prime=True,
input data must be Series.
temp_dew : None, float, or array-like, default None
Surface dew point temperatures, in degrees C. Values of temp_dew
may be numeric or NaN. Any single time period point with a
temp_dew=NaN does not have dew point improvements applied. If
temp_dew is not provided, then dew point improvements are not
applied.
min_cos_zenith : numeric, default 0.065
Minimum value of cos(zenith) to allow when calculating global
clearness index `kt`. Equivalent to zenith = 86.273 degrees.
max_zenith : numeric, default 87
Maximum value of zenith to allow in DNI calculation. DNI will be
set to 0 for times with zenith values greater than `max_zenith`.
Returns
-------
dni : array-like
The modeled direct normal irradiance in W/m^2.
Notes
-----
DIRINDEX model requires time series data (ie. one of the inputs must
be a vector of length > 2).
References
----------
[1] Perez, R., Ineichen, P., Moore, K., Kmiecik, M., Chain, C., George, R.,
& Vignola, F. (2002). A new operational model for satellite-derived
irradiances: description and validation. Solar Energy, 73(5), 307-317.
"""
dni_dirint = dirint(ghi, zenith, times, pressure=pressure,
use_delta_kt_prime=use_delta_kt_prime,
temp_dew=temp_dew, min_cos_zenith=min_cos_zenith,
max_zenith=max_zenith)
dni_dirint_clearsky = dirint(ghi_clearsky, zenith, times,
pressure=pressure,
use_delta_kt_prime=use_delta_kt_prime,
temp_dew=temp_dew,
min_cos_zenith=min_cos_zenith,
max_zenith=max_zenith)
dni_dirindex = dni_clearsky * dni_dirint / dni_dirint_clearsky
dni_dirindex[dni_dirindex < 0] = 0.
return dni_dirindex
|
2,978 |
def clean_interp_method(method, **kwargs):
order = kwargs.get("order")
valid = [
"linear",
"time",
"index",
"values",
"nearest",
"zero",
"slinear",
"quadratic",
"cubic",
"barycentric",
"polynomial",
"krogh",
"piecewise_polynomial",
"pchip",
"akima",
"spline",
"from_derivatives",
]
if method in ("spline", "polynomial") and order is None:
raise ValueError("You must specify the order of the spline or polynomial.")
if method not in valid:
raise ValueError(f"method must be one of {valid}. Got '{method}' " "instead.")
return method
|
def clean_interp_method(method, **kwargs):
order = kwargs.get("order")
valid = [
"linear",
"time",
"index",
"values",
"nearest",
"zero",
"slinear",
"quadratic",
"cubic",
"barycentric",
"polynomial",
"krogh",
"piecewise_polynomial",
"pchip",
"akima",
"spline",
"from_derivatives",
]
if method in ("spline", "polynomial") and order is None:
raise ValueError("You must specify the order of the spline or polynomial.")
if method not in valid:
raise ValueError(f"method must be one of {valid}. Got '{method}' instead.")
return method
|
38,526 |
def rlencode(A: np.ndarray) -> Tuple[np.ndarray, np.ndarray]:
"""Compress matrix by looking for identical columns.
Example usage: Convert the a full set of (row or column) indices of a
sparse matrix into compressed storage.
Acknowledgement: The code is heavily inspired by MRST's function with the
same name, however, requirements on the shape of functions are probably
somewhat different.
Parameters:
A (np.ndarray): Matrix to be compressed. Should be 2d. Compression
will be along the second axis.
Returns:
np.ndarray: The compressed array, size n x m.
np.ndarray: Number of times each row in the first output array should
be repeated to restore the original array.
See also:
rlencode
"""
comp = A[::, 0:-1] != A[::, 1::]
i = np.any(comp, axis=0)
i = np.hstack((np.argwhere(i).ravel(), (A.shape[1] - 1)))
num = np.diff(np.hstack((np.array([-1]), i)))
return A[::, i], num
|
def rlencode(A: np.ndarray) -> Tuple[np.ndarray, np.ndarray]:
"""Compress matrix by looking for identical columns.
Example usage: Convert a full set of (row or column) indices of a
sparse matrix into compressed storage.
Acknowledgement: The code is heavily inspired by MRST's function with the
same name, however, requirements on the shape of functions are probably
somewhat different.
Parameters:
A (np.ndarray): Matrix to be compressed. Should be 2d. Compression
will be along the second axis.
Returns:
np.ndarray: The compressed array, size n x m.
np.ndarray: Number of times each row in the first output array should
be repeated to restore the original array.
See also:
rlencode
"""
comp = A[::, 0:-1] != A[::, 1::]
i = np.any(comp, axis=0)
i = np.hstack((np.argwhere(i).ravel(), (A.shape[1] - 1)))
num = np.diff(np.hstack((np.array([-1]), i)))
return A[::, i], num
|
52,461 |
def parse_ents(doc: Doc, options: Dict[str, Any] = {}) -> Dict[str, Any]:
"""Generate named entities in [{start: i, end: i, label: 'label'}] format.
doc (Doc): Document do parse.
RETURNS (dict): Generated entities keyed by text (original text) and ents.
"""
kb_url_format_template = options.get("kb_url_format_template", None)
ents = [
{
"start": ent.start_char,
"end": ent.end_char,
"label": ent.label_,
"kb_id": ent.kb_id_ if ent.kb_id_ else "",
"kb_url": kb_url_format_template.format(ent.kb_id_)
if kb_url_format_template
else "#",
}
for ent in doc.ents
]
if not ents:
warnings.warn(Warnings.W006)
title = doc.user_data.get("title", None) if hasattr(doc, "user_data") else None
settings = get_doc_settings(doc)
return {"text": doc.text, "ents": ents, "title": title, "settings": settings}
|
def parse_ents(doc: Doc, options: Dict[str, Any] = {}) -> Dict[str, Any]:
"""Generate named entities in [{start: i, end: i, label: 'label'}] format.
doc (Doc): Document to parse.
options (Dict[str, Any]): NER-specific visualisation options.
RETURNS (dict): Generated entities keyed by text (original text) and ents.
"""
kb_url_format_template = options.get("kb_url_format_template", None)
ents = [
{
"start": ent.start_char,
"end": ent.end_char,
"label": ent.label_,
"kb_id": ent.kb_id_ if ent.kb_id_ else "",
"kb_url": kb_url_format_template.format(ent.kb_id_)
if kb_url_format_template
else "#",
}
for ent in doc.ents
]
if not ents:
warnings.warn(Warnings.W006)
title = doc.user_data.get("title", None) if hasattr(doc, "user_data") else None
settings = get_doc_settings(doc)
return {"text": doc.text, "ents": ents, "title": title, "settings": settings}
|
26,660 |
def _get_message_attribute(o):
if isinstance(o, bytes):
return {'DataType': 'Binary', 'BinaryValue': o}
elif isinstance(o, str):
return {'DataType': 'String', 'StringValue': o}
elif isinstance(o, (int, float)):
return {'DataType': 'Number', 'StringValue': str(o)}
elif hasattr(o, '__iter__'):
return {'DataType': 'String.Array', 'StringValue': json.dumps(o)}
else:
raise TypeError('Values in MessageAttributes must be one of bytes, str, int, float, or iterable; '
f'got {type(o)}')
|
def _get_message_attribute(o):
if isinstance(o, bytes):
return {'DataType': 'Binary', 'BinaryValue': o}
elif isinstance(o, str):
return {'DataType': 'String', 'StringValue': o}
if isinstance(o, (int, float)):
return {'DataType': 'Number', 'StringValue': str(o)}
elif hasattr(o, '__iter__'):
return {'DataType': 'String.Array', 'StringValue': json.dumps(o)}
else:
raise TypeError('Values in MessageAttributes must be one of bytes, str, int, float, or iterable; '
f'got {type(o)}')
|
58,415 |
def benchmark_pprint(runner):
"""Moved from `python -m pprint`."""
from pprint import PrettyPrinter
printable = [("string", (1, 2), [3, 4], {5: 6, 7: 8})] * 100_000
p = PrettyPrinter()
runner.bench_func('_safe_repr', p._safe_repr, printable, {}, None, 0)
runner.bench_func('pformat', p.pformat, printable)
|
def benchmark_pprint(runner):
"""Moved from `python -m pprint`."""
from pprint import PrettyPrinter
printable = [("string", (1, 2), [3, 4], {5: 6, 7: 8})] * 100_000
p = PrettyPrinter()
runner.bench_func('pprint_safe_repr', p._safe_repr, printable, {}, None, 0)
runner.bench_func('pprint_pformat', p.pformat, printable)
|
7,080 |
def extract_resource(src: Path, tgt: Path, is_tutorial: bool = False) -> None:
"""Extract src into tgt.
NOTE: src can be a dir or a file.
"""
LOG.info(f"Extracting {src.relative_to(RESOURCE_DIR)} to {tgt}")
if tgt.exists() and is_tutorial:
# target exists, back up the old copy
_backup(tgt)
# create the target directory
tgt.parent.mkdir(parents=True, exist_ok=True)
# NOTE: shutil interfaces don't fully support Path objects at all
# python versions
if src.is_dir():
shutil.copytree(str(src), str(tgt))
else:
shutil.copyfile(str(src), str(tgt))
|
def extract_resource(src: Path, tgt: Path, is_tutorial: bool = False) -> None:
"""Extract src into tgt.
NOTE: src can be a dir or a file.
"""
LOG.info(f"Extracting {src.relative_to(RESOURCE_DIR)} to {tgt}")
if is_tutorial and tgt.exists():
# target exists, back up the old copy
_backup(tgt)
# create the target directory
tgt.parent.mkdir(parents=True, exist_ok=True)
# NOTE: shutil interfaces don't fully support Path objects at all
# python versions
if src.is_dir():
shutil.copytree(str(src), str(tgt))
else:
shutil.copyfile(str(src), str(tgt))
|
40,053 |
def find_op_code_sequence(pattern: list, instruction_list: list):
"""
Returns all indices in instruction_list that point to instruction sequences following a pattern
:param pattern: The pattern to look for.
Example: [["PUSH1", "PUSH2"], ["EQ"]] where ["PUSH1", "EQ"] satisfies the pattern
:param instruction_list: List of instructions to look in
:return: Indices to the instruction sequences
"""
for i in range(0, len(instruction_list) - len(pattern) + 1):
if is_sequence_match(pattern, instruction_list, i):
yield i
|
def find_op_code_sequence(pattern: List[List[str]], instruction_list: List[str]) -> int:
"""
Returns all indices in instruction_list that point to instruction sequences following a pattern
:param pattern: The pattern to look for.
Example: [["PUSH1", "PUSH2"], ["EQ"]] where ["PUSH1", "EQ"] satisfies the pattern
:param instruction_list: List of instructions to look in
:return: Indices to the instruction sequences
"""
for i in range(0, len(instruction_list) - len(pattern) + 1):
if is_sequence_match(pattern, instruction_list, i):
yield i
|
10,832 |
def remove_unnecessary_nrt_usage(function, context, fndesc):
"""
Remove unnecessary NRT incref/decref in the given LLVM function.
It uses highlevel type info to determine if the function does not need NRT.
Such a function does not:
- return array object;
- take arguments that need refcount except array;
- call function that return refcounted object.
In effect, the function will not capture or create references that extend
the lifetime of any refcounted objects beyound the lifetime of the
function.
The rewrite performs inplace.
If rewrite has happen, this function return True. Otherwise, return False.
"""
dmm = context.data_model_manager
if _legalize(function.module, dmm, fndesc):
_rewrite_function(function)
return True
else:
return False
|
def remove_unnecessary_nrt_usage(function, context, fndesc):
"""
Remove unnecessary NRT incref/decref in the given LLVM function.
It uses highlevel type info to determine if the function does not need NRT.
Such a function does not:
- return array object;
- take arguments that need refcount except array;
- call function that return refcounted object.
In effect, the function will not capture or create references that extend
the lifetime of any refcounted objects beyound the lifetime of the
function.
The rewrite is performed in place.
If rewrite has happen, this function return True. Otherwise, return False.
"""
dmm = context.data_model_manager
if _legalize(function.module, dmm, fndesc):
_rewrite_function(function)
return True
else:
return False
|
7,127 |
def guess_spatial_dimensions(image):
"""Make an educated guess about whether an image has a channels dimension.
Parameters
----------
image : ndarray
The input image.
Returns
-------
spatial_dims : int or None
The number of spatial dimensions of `image`. If ambiguous, the value
is ``None``.
Raises
------
ValueError
If the image array has less than two or more than four dimensions.
"""
if image.ndim == 2:
return 2
if image.ndim == 3 and image.shape[-1] != 3:
return 3
if image.ndim == 3 and image.shape[-1] == 3:
return None
if image.ndim == 4 and image.shape[-1] == 3:
return 3
else:
raise ValueError("Expected 2D, 3D, or 4D array, got %iD." % image.ndim)
|
def _guess_spatial_dimensions(image):
"""Make an educated guess about whether an image has a channels dimension.
Parameters
----------
image : ndarray
The input image.
Returns
-------
spatial_dims : int or None
The number of spatial dimensions of `image`. If ambiguous, the value
is ``None``.
Raises
------
ValueError
If the image array has less than two or more than four dimensions.
"""
if image.ndim == 2:
return 2
if image.ndim == 3 and image.shape[-1] != 3:
return 3
if image.ndim == 3 and image.shape[-1] == 3:
return None
if image.ndim == 4 and image.shape[-1] == 3:
return 3
else:
raise ValueError("Expected 2D, 3D, or 4D array, got %iD." % image.ndim)
|
3,351 |
def parse_arithmetic(
equation: str, max_operators: Optional[int] = None
) -> Tuple[Operation, List[str], List[str]]:
"""Given a string equation try to parse it into a set of Operations"""
try:
tree = arithmetic_grammar.parse(equation)
except ParseError:
raise ArithmeticParseError(
"Unable to parse your equation, make sure it is well formed arithmetic"
)
visitor = ArithmeticVisitor(max_operators)
result = visitor.visit(tree)
if len(visitor.fields) > 0 and len(visitor.functions) > 0:
raise ArithmeticValidationError("Cannot mix functions and fields in arithmetic")
if visitor.terms == 1:
raise ArithmeticValidationError("Need at least 2 terms to do math")
return result, list(visitor.fields), list(visitor.functions)
|
def parse_arithmetic(
equation: str, max_operators: Optional[int] = None
) -> Tuple[Operation, List[str], List[str]]:
"""Given a string equation try to parse it into a set of Operations"""
try:
tree = arithmetic_grammar.parse(equation)
except ParseError:
raise ArithmeticParseError(
"Unable to parse your equation, make sure it is well formed arithmetic"
)
visitor = ArithmeticVisitor(max_operators)
result = visitor.visit(tree)
if len(visitor.fields) > 0 and len(visitor.functions) > 0:
raise ArithmeticValidationError("Cannot mix functions and fields in arithmetic")
if visitor.terms == 1:
raise ArithmeticValidationError("Arithmetic expression must contain at least 2 terms")
return result, list(visitor.fields), list(visitor.functions)
|
11,621 |
def install_client(master, client, extra_args=(), user=None,
password=None, unattended=True, stdin_text=None):
client.collect_log(paths.IPACLIENT_INSTALL_LOG)
apply_common_fixes(client)
allow_sync_ptr(master)
# Now, for the situations where a client resides in a different subnet from
# master, we need to explicitly tell master to create a reverse zone for
# the client and enable dynamic updates for this zone.
zone, error = prepare_reverse_zone(master, client.ip)
if not error:
master.run_command(["ipa", "dnszone-mod", zone,
"--dynamic-update=TRUE"])
if user is None:
user = client.config.admin_name
if password is None:
password = client.config.admin_password
args = [
'ipa-client-install',
'--domain', client.domain.name,
'--realm', client.domain.realm,
'-p', user,
'-w', password,
'--server', master.hostname
]
if unattended:
args.append('-U')
result = client.run_command(args + list(extra_args), stdin_text=stdin_text)
setup_sssd_debugging(client)
kinit_admin(client)
return result
|
def install_client(master, client, extra_args=[], user=None,
password=None, unattended=True, stdin_text=None):
client.collect_log(paths.IPACLIENT_INSTALL_LOG)
apply_common_fixes(client)
allow_sync_ptr(master)
# Now, for the situations where a client resides in a different subnet from
# master, we need to explicitly tell master to create a reverse zone for
# the client and enable dynamic updates for this zone.
zone, error = prepare_reverse_zone(master, client.ip)
if not error:
master.run_command(["ipa", "dnszone-mod", zone,
"--dynamic-update=TRUE"])
if user is None:
user = client.config.admin_name
if password is None:
password = client.config.admin_password
args = [
'ipa-client-install',
'--domain', client.domain.name,
'--realm', client.domain.realm,
'-p', user,
'-w', password,
'--server', master.hostname
]
if unattended:
args.append('-U')
result = client.run_command(args + list(extra_args), stdin_text=stdin_text)
setup_sssd_debugging(client)
kinit_admin(client)
return result
|
30,357 |
def deploy_action(handler):
kwargs = {}
kwargs["run"] = True
for key, value in demisto.args().items():
kwargs[key] = value
kwargs["get_results"] = True if kwargs.get('get_results') == 'True' else False
callbacks = {}
callbacks['PreAddAction'] = handle_cgs
kwargs['callbacks'] = callbacks # type: ignore
kwargs['action_options'] = ['or'] # type: ignore
if demisto.get(demisto.args(), 'action_options'):
kwargs['action_options'] = demisto.args()['action_options'].split(',')
if demisto.get(demisto.args(), 'action_filters'):
kwargs['action_filters'] = demisto.args()['action_filters'].split(';')
if demisto.get(demisto.args(), 'action_filters_groups'):
kwargs['cg_names'] = demisto.args()['action_filters_groups'].split(',')
# Building the package query
package = demisto.args()['package']
package_with_args = [package]
formatted_args = ''
if demisto.args().get('package_args'):
package_args = demisto.args().get('package_args', '').split(",")
for i in range(0, len(package_args)):
formatted_args = formatted_args + '$' + str(i + 1) + '=' + package_args[i] + ','
formatted_args = formatted_args[:-1]
replace_str = get_sensor_variable(get_object(handler, u'package', package).get('package_spec'))
sensor_var = demisto.args().get('sensor_variables')
if replace_str is None and sensor_var:
return create_error_entry("Package \"" + package + "\" does not have a sensor variable.")
if replace_str and sensor_var is None:
return create_error_entry("Package \"" + package + "\" requires a sensor variable.")
if sensor_var:
sensor_vars = demisto.args().get('sensor_variables', '').split(";")
package_with_args = []
if formatted_args != '':
formatted_args += ','
for var in sensor_vars:
package_with_args.append(package + '{' + formatted_args + replace_str + '=' + var + '}')
elif formatted_args != '':
package_with_args = [package + '{' + formatted_args + '}']
response = []
for pack in package_with_args:
kwargs['package'] = pack
LOG("deploying Tanium package %s" % pack)
response.append(handler.deploy_action(**kwargs))
ec = { # type: ignore
'Tanium.SavedActions(val.Id && val.Id == obj.Id)': [],
'Tanium.Actions(val.id && val.id == obj.id)': []
}
contents = []
tbl = []
for res in response:
ec['Tanium.SavedActions(val.Id && val.Id == obj.Id)'].append({
'Name': res['saved_action_object'].name,
'Id': res['saved_action_object'].id
})
parsed = parse_deploy_action_raw_resp(handler, res)
ec['Tanium.Actions(val.id && val.id == obj.id)'] += filter_list([parsed['action_object']],
['name', 'id', 'status', 'start_time',
'approver', 'creation_time', 'package_spec'])
contents.append(parsed)
tbl.append({
'Action ID': parsed['action_object']['id'],
'Saved Action ID': parsed['saved_action_object']['id'],
'Name': parsed['action_object']['name'],
'Package Name': parsed['package_object']['name'],
'Command': parsed['package_object']['command']
})
return {
'ContentsFormat': formats['json'],
'Type': entryTypes['note'],
'Contents': contents,
'ReadableContentsFormat': formats['markdown'],
'HumanReadable': tableToMarkdown('Tanium Deployed Actions', tbl) if tbl else 'No results were found',
'EntryContext': ec
}
|
def deploy_action(handler):
kwargs = {}
kwargs["run"] = True
for key, value in demisto.args().items():
kwargs[key] = value
kwargs["get_results"] = True if kwargs.get('get_results').lower() == 'true' else False
callbacks = {}
callbacks['PreAddAction'] = handle_cgs
kwargs['callbacks'] = callbacks # type: ignore
kwargs['action_options'] = ['or'] # type: ignore
if demisto.get(demisto.args(), 'action_options'):
kwargs['action_options'] = demisto.args()['action_options'].split(',')
if demisto.get(demisto.args(), 'action_filters'):
kwargs['action_filters'] = demisto.args()['action_filters'].split(';')
if demisto.get(demisto.args(), 'action_filters_groups'):
kwargs['cg_names'] = demisto.args()['action_filters_groups'].split(',')
# Building the package query
package = demisto.args()['package']
package_with_args = [package]
formatted_args = ''
if demisto.args().get('package_args'):
package_args = demisto.args().get('package_args', '').split(",")
for i in range(0, len(package_args)):
formatted_args = formatted_args + '$' + str(i + 1) + '=' + package_args[i] + ','
formatted_args = formatted_args[:-1]
replace_str = get_sensor_variable(get_object(handler, u'package', package).get('package_spec'))
sensor_var = demisto.args().get('sensor_variables')
if replace_str is None and sensor_var:
return create_error_entry("Package \"" + package + "\" does not have a sensor variable.")
if replace_str and sensor_var is None:
return create_error_entry("Package \"" + package + "\" requires a sensor variable.")
if sensor_var:
sensor_vars = demisto.args().get('sensor_variables', '').split(";")
package_with_args = []
if formatted_args != '':
formatted_args += ','
for var in sensor_vars:
package_with_args.append(package + '{' + formatted_args + replace_str + '=' + var + '}')
elif formatted_args != '':
package_with_args = [package + '{' + formatted_args + '}']
response = []
for pack in package_with_args:
kwargs['package'] = pack
LOG("deploying Tanium package %s" % pack)
response.append(handler.deploy_action(**kwargs))
ec = { # type: ignore
'Tanium.SavedActions(val.Id && val.Id == obj.Id)': [],
'Tanium.Actions(val.id && val.id == obj.id)': []
}
contents = []
tbl = []
for res in response:
ec['Tanium.SavedActions(val.Id && val.Id == obj.Id)'].append({
'Name': res['saved_action_object'].name,
'Id': res['saved_action_object'].id
})
parsed = parse_deploy_action_raw_resp(handler, res)
ec['Tanium.Actions(val.id && val.id == obj.id)'] += filter_list([parsed['action_object']],
['name', 'id', 'status', 'start_time',
'approver', 'creation_time', 'package_spec'])
contents.append(parsed)
tbl.append({
'Action ID': parsed['action_object']['id'],
'Saved Action ID': parsed['saved_action_object']['id'],
'Name': parsed['action_object']['name'],
'Package Name': parsed['package_object']['name'],
'Command': parsed['package_object']['command']
})
return {
'ContentsFormat': formats['json'],
'Type': entryTypes['note'],
'Contents': contents,
'ReadableContentsFormat': formats['markdown'],
'HumanReadable': tableToMarkdown('Tanium Deployed Actions', tbl) if tbl else 'No results were found',
'EntryContext': ec
}
|
4,516 |
def get_active_chpi(raw):
"""Determine how many HPI coils were active for a time point.
Parameters
----------
raw : instance of Raw
Raw data
.. versionadded:: 1.2
Returns
-------
times : array, shape (n_times)
The number of active cHPIs for every timepoint in raw.
"""
# get meg system
system, _ = _get_meg_system(raw.info)
# processing neuromag files
if system in ['122m', '306m']:
# extract hpi info
chpi_info = get_chpi_info(raw.info)
# extract hpi time series and infer which one was on
chpi_ts = raw[chpi_info[1]][0].astype(int)
chpi_active = (chpi_ts & chpi_info[2][:, np.newaxis]).astype(bool)
times = chpi_active.sum(axis=0)
# all other systems
else:
raise NotImplementedError(('Identifying active HPI channels'
' is not implemented for other systems'
' than neuromag.'))
return times
|
def get_active_chpi(raw):
"""Determine how many HPI coils were active for a time point.
Parameters
----------
raw : instance of Raw
Raw data
.. versionadded:: 1.2
Returns
-------
n_active : array, shape (n_times,)
The number of active cHPIs for every timepoint in raw.
"""
# get meg system
system, _ = _get_meg_system(raw.info)
# processing neuromag files
if system in ['122m', '306m']:
# extract hpi info
chpi_info = get_chpi_info(raw.info)
# extract hpi time series and infer which one was on
chpi_ts = raw[chpi_info[1]][0].astype(int)
chpi_active = (chpi_ts & chpi_info[2][:, np.newaxis]).astype(bool)
times = chpi_active.sum(axis=0)
# all other systems
else:
raise NotImplementedError(('Identifying active HPI channels'
' is not implemented for other systems'
' than neuromag.'))
return times
|
29,973 |
def generate_READMEs(app_path: str):
app_path = Path(app_path)
if not app_path.exists():
raise Exception("App path provided doesn't exists ?!")
manifest = json.load(open(app_path / "manifest.json"))
upstream = manifest.get("upstream", {})
git = configparser.ConfigParser()
git.read(app_path / ".git/config")
remote = git['remote "origin"']['url']
# TODO: Handle ssh remotes
remote = re.search("(https:\/\/.*_ynh)\.git", remote)
if remote is not None:
remote = remote.group(1)
if not upstream and not (app_path / "doc" / "DISCLAIMER.md").exists():
print(
"There's no 'upstream' key in the manifest, and doc/DISCLAIMER.md doesn't exists - therefore assuming that we shall not auto-update the README.md for this app yet."
)
return
env = Environment(loader=FileSystemLoader(Path(__file__).parent / "templates"))
for lang, lang_suffix in [("en", ""), ("fr", "_fr")]:
template = env.get_template(f"README{lang_suffix}.md.j2")
if (app_path / "doc" / f"DESCRIPTION{lang_suffix}.md").exists():
description = (app_path / "doc" / f"DESCRIPTION{lang_suffix}.md").read_text()
# Fallback to english if maintainer too lazy to translate the description
elif (app_path / "doc" / "DESCRIPTION.md").exists():
description = (app_path / "doc" / "DESCRIPTION.md").read_text()
else:
description = None
if (app_path / "doc" / "screenshots").exists():
screenshots = os.listdir(os.path.join(app_path, "doc", "screenshots"))
if ".gitkeep" in screenshots:
screenshots.remove(".gitkeep")
else:
screenshots = []
if (app_path / "doc" / f"DISCLAIMER{lang_suffix}.md").exists():
disclaimer = (app_path / "doc" / f"DISCLAIMER{lang_suffix}.md").read_text()
# Fallback to english if maintainer too lazy to translate the disclaimer idk
elif (app_path / "doc" / "DISCLAIMER.md").exists():
disclaimer = (app_path / "doc" / "DISCLAIMER.md").read_text()
else:
disclaimer = None
out = template.render(
lang=lang,
upstream=upstream,
description=description,
screenshots=screenshots,
disclaimer=disclaimer,
manifest=manifest,
remote=remote
)
(app_path / f"README{lang_suffix}.md").write_text(out)
|
def generate_READMEs(app_path: str):
app_path = Path(app_path)
if not app_path.exists():
raise Exception("App path provided doesn't exists ?!")
manifest = json.load(open(app_path / "manifest.json"))
upstream = manifest.get("upstream", {})
git = configparser.ConfigParser()
git.read(app_path / ".git/config")
remote = git.get('remote "origin"', {}).get('url', "")
# TODO: Handle ssh remotes
remote = re.search("(https:\/\/.*_ynh)\.git", remote)
if remote is not None:
remote = remote.group(1)
if not upstream and not (app_path / "doc" / "DISCLAIMER.md").exists():
print(
"There's no 'upstream' key in the manifest, and doc/DISCLAIMER.md doesn't exists - therefore assuming that we shall not auto-update the README.md for this app yet."
)
return
env = Environment(loader=FileSystemLoader(Path(__file__).parent / "templates"))
for lang, lang_suffix in [("en", ""), ("fr", "_fr")]:
template = env.get_template(f"README{lang_suffix}.md.j2")
if (app_path / "doc" / f"DESCRIPTION{lang_suffix}.md").exists():
description = (app_path / "doc" / f"DESCRIPTION{lang_suffix}.md").read_text()
# Fallback to english if maintainer too lazy to translate the description
elif (app_path / "doc" / "DESCRIPTION.md").exists():
description = (app_path / "doc" / "DESCRIPTION.md").read_text()
else:
description = None
if (app_path / "doc" / "screenshots").exists():
screenshots = os.listdir(os.path.join(app_path, "doc", "screenshots"))
if ".gitkeep" in screenshots:
screenshots.remove(".gitkeep")
else:
screenshots = []
if (app_path / "doc" / f"DISCLAIMER{lang_suffix}.md").exists():
disclaimer = (app_path / "doc" / f"DISCLAIMER{lang_suffix}.md").read_text()
# Fallback to english if maintainer too lazy to translate the disclaimer idk
elif (app_path / "doc" / "DISCLAIMER.md").exists():
disclaimer = (app_path / "doc" / "DISCLAIMER.md").read_text()
else:
disclaimer = None
out = template.render(
lang=lang,
upstream=upstream,
description=description,
screenshots=screenshots,
disclaimer=disclaimer,
manifest=manifest,
remote=remote
)
(app_path / f"README{lang_suffix}.md").write_text(out)
|
44,248 |
def mutual_info(qnode, wires0, wires1, base=None):
r"""Compute the mutual information from a :class:`.QNode` returning a :func:`~.state`:
.. math::
I(A, B) = S(\rho^A) + S(\rho^B) - S(\rho^{AB})
where :math:`S` is the von Neumann entropy.
The mutual information is a measure of correlation between two subsystems.
More specifically, it quantifies the amount of information obtained about
one system by measuring the other system.
Args:
qnode (QNode): A :class:`.QNode` returning a :func:`~.state`.
wires0 (Sequence(int)): List of wires in the first subsystem.
wires1 (Sequence(int)): List of wires in the second subsystem.
base (float): Base for the logarithm. If None, the natural logarithm is used.
Returns:
func: A function with the same arguments as the QNode that returns
the mutual information from its output state.
**Example**
It is possible to obtain the mutual information of two subsystems from a
:class:`.QNode` returning a :func:`~.state`.
.. code-block:: python
dev = qml.device("default.qubit", wires=2)
@qml.qnode(dev)
def circuit(x):
qml.IsingXX(x, wires=[0, 1])
return qml.state()
>>> mutual_info_circuit = qinfo.mutual_info(circuit, wires0=[0], wires1=[1])
>>> mutual_info_circuit(np.pi/2)
1.3862943611198906
>>> x = np.array(0.4, requires_grad=True)
>>> mutual_info_circuit(x)
0.3325090393262875
>>> qml.grad(mutual_info_circuit)(np.array(0.4, requires_grad=True))
1.2430067731198946
.. seealso:: :func:`~.qinfo.vn_entropy`, `pennylane.math.mutual_info` and :func:`pennylane.mutual_info`
"""
density_matrix_qnode = qml.qinfo.reduced_dm(qnode, qnode.device.wires)
def wrapper(*args, **kwargs):
density_matrix = density_matrix_qnode(*args, **kwargs)
entropy = qml.math.mutual_info(density_matrix, wires0, wires1, base=base)
return entropy
return wrapper
|
def mutual_info(qnode, wires0, wires1, base=None):
r"""Compute the mutual information from a :class:`.QNode` returning a :func:`~.state`:
.. math::
I(A, B) = S(\rho^A) + S(\rho^B) - S(\rho^{AB})
where :math:`S` is the von Neumann entropy.
The mutual information is a measure of correlation between two subsystems.
More specifically, it quantifies the amount of information obtained about
one system by measuring the other system.
Args:
qnode (QNode): A :class:`.QNode` returning a :func:`~.state`.
wires0 (Sequence(int)): List of wires in the first subsystem.
wires1 (Sequence(int)): List of wires in the second subsystem.
base (float): Base for the logarithm. If None, the natural logarithm is used.
Returns:
func: A function with the same arguments as the QNode that returns
the mutual information from its output state.
**Example**
It is possible to obtain the mutual information of two subsystems from a
:class:`.QNode` returning a :func:`~.state`.
.. code-block:: python
dev = qml.device("default.qubit", wires=2)
@qml.qnode(dev)
def circuit(x):
qml.IsingXX(x, wires=[0, 1])
return qml.state()
>>> mutual_info_circuit = qinfo.mutual_info(circuit, wires0=[0], wires1=[1])
>>> mutual_info_circuit(np.pi/2)
1.3862943611198906
>>> x = np.array(0.4, requires_grad=True)
>>> mutual_info_circuit(x)
0.3325090393262875
>>> qml.grad(mutual_info_circuit)(np.array(0.4, requires_grad=True))
1.2430067731198946
.. seealso:: :func:`~.qinfo.vn_entropy`, :func:`pennylane.math.mutual_info` and :func:`pennylane.mutual_info`
"""
density_matrix_qnode = qml.qinfo.reduced_dm(qnode, qnode.device.wires)
def wrapper(*args, **kwargs):
density_matrix = density_matrix_qnode(*args, **kwargs)
entropy = qml.math.mutual_info(density_matrix, wires0, wires1, base=base)
return entropy
return wrapper
|
23,635 |
def noct(poa_global, temp_air, wind_speed, noct, eta_m_ref,
effective_irradiance=None, transmittance_absorbtance=0.9,
array_height=1, mount_standoff=3.5):
'''
Cell temperature model from the System Advisor Model (SAM).
The model is described in [1], Section 10.6.
Parameters
----------
poa_global : numeric
Total incident irradiance. [W/m^2]
temp_air : numeric
Ambient dry bulb temperature. [C]
wind_speed : numeric, default 1.0
Wind speed in m/s measured at the same height for which the wind loss
factor was determined. The default value 1.0 m/s is the wind
speed at module height used to determine NOCT. [m/s]
noct : numeric
Nominal operating cell temperature [C], determined at conditions of
800 W/m^2 irradiance, 20 C ambient air temperature and 1 m/s wind.
effective_irradiance : numeric, default None.
The irradiance that is converted to photocurrent. If None,
assumed equal to poa_global. [W/m^2]
eta_m_ref : numeric
Module external efficiency at reference conditions of 1000 W/m^2 and
20C. Calculate as P_mp (V_mp x I_mp) divided by 1000 W/m^2. [unitless]
transmittance_absorptance : numeric, default 0.9
Coefficient for combined transmittance and absorptance effects.
[unitless]
array_height : int, default 1
Height of array above ground in stories (one story is about 3m). Must
be either 1 or 2. For systems elevated less than one story, use 1.
If system is elevated more than two stories, use 2.
mount_standoff : numeric, default 3.5
Distance between array mounting and mounting surface. Use default
if system is ground-mounted. [inches]
Returns
-------
cell_temperature : numeric
Cell temperature. [C]
Raises
------
ValueError
If array_height is an invalid value (must be 1 or 2).
References
----------
.. [1] Gilman, P., Dobos, A., DiOrio, N., Freeman, J., Janzou, S.,
Ryberg, D., 2018, "SAM Photovoltaic Model Technical Reference
Update", National Renewable Energy Laboratory Report
NREL/TP-6A20-67399.
'''
if effective_irradiance is None:
irr_ratio = 1.
else:
irr_ratio = effective_irradiance / poa_global
if array_height == 1:
wind_adj = 0.51 * wind_speed
elif array_height == 2:
wind_adj = 0.61 * wind_speed
else:
raise ValueError(
f'array_height must be 1 or 2, {array_height} was given')
noct_adj = noct + _adj_noct(mount_standoff)
tau_alpha = transmittance_absorbtance * irr_ratio
cell_temp_init = ross(poa_global, temp_air, noct_adj)
heat_loss = 1 - eta_m_ref / tau_alpha
wind_loss = 9.5 / (5.7 + 3.8 * wind_adj)
return cell_temp_init * heat_loss * wind_loss
|
def noct(poa_global, temp_air, wind_speed, noct, eta_m_ref,
effective_irradiance=None, transmittance_absorbtance=0.9,
array_height=1, mount_standoff=3.5):
'''
Cell temperature model from the System Advisor Model (SAM).
The model is described in [1]_, Section 10.6.
Parameters
----------
poa_global : numeric
Total incident irradiance. [W/m^2]
temp_air : numeric
Ambient dry bulb temperature. [C]
wind_speed : numeric, default 1.0
Wind speed in m/s measured at the same height for which the wind loss
factor was determined. The default value 1.0 m/s is the wind
speed at module height used to determine NOCT. [m/s]
noct : numeric
Nominal operating cell temperature [C], determined at conditions of
800 W/m^2 irradiance, 20 C ambient air temperature and 1 m/s wind.
effective_irradiance : numeric, default None.
The irradiance that is converted to photocurrent. If None,
assumed equal to poa_global. [W/m^2]
eta_m_ref : numeric
Module external efficiency at reference conditions of 1000 W/m^2 and
20C. Calculate as P_mp (V_mp x I_mp) divided by 1000 W/m^2. [unitless]
transmittance_absorptance : numeric, default 0.9
Coefficient for combined transmittance and absorptance effects.
[unitless]
array_height : int, default 1
Height of array above ground in stories (one story is about 3m). Must
be either 1 or 2. For systems elevated less than one story, use 1.
If system is elevated more than two stories, use 2.
mount_standoff : numeric, default 3.5
Distance between array mounting and mounting surface. Use default
if system is ground-mounted. [inches]
Returns
-------
cell_temperature : numeric
Cell temperature. [C]
Raises
------
ValueError
If array_height is an invalid value (must be 1 or 2).
References
----------
.. [1] Gilman, P., Dobos, A., DiOrio, N., Freeman, J., Janzou, S.,
Ryberg, D., 2018, "SAM Photovoltaic Model Technical Reference
Update", National Renewable Energy Laboratory Report
NREL/TP-6A20-67399.
'''
if effective_irradiance is None:
irr_ratio = 1.
else:
irr_ratio = effective_irradiance / poa_global
if array_height == 1:
wind_adj = 0.51 * wind_speed
elif array_height == 2:
wind_adj = 0.61 * wind_speed
else:
raise ValueError(
f'array_height must be 1 or 2, {array_height} was given')
noct_adj = noct + _adj_noct(mount_standoff)
tau_alpha = transmittance_absorbtance * irr_ratio
cell_temp_init = ross(poa_global, temp_air, noct_adj)
heat_loss = 1 - eta_m_ref / tau_alpha
wind_loss = 9.5 / (5.7 + 3.8 * wind_adj)
return cell_temp_init * heat_loss * wind_loss
|
32,292 |
def check_url_filtering_profiles(
topology: Topology,
device_filter_string: str = None
) -> ConfigurationHygieneCheckResult:
"""
Checks the configured URL Filtering profiles to ensure at least one meets best practices.
:param topology: `Topology` instance !no-auto-argument
:param device_filter_string: String to filter to only check given device
"""
return HygieneLookups.check_url_filtering_profiles(
topology,
device_filter_str=device_filter_string,
)
|
def check_url_filtering_profiles(
topology: Topology,
device_filter_string: Optional[str] = None
) -> ConfigurationHygieneCheckResult:
"""
Checks the configured URL Filtering profiles to ensure at least one meets best practices.
:param topology: `Topology` instance !no-auto-argument
:param device_filter_string: String to filter to only check given device
"""
return HygieneLookups.check_url_filtering_profiles(
topology,
device_filter_str=device_filter_string,
)
|
35,876 |
def wallet_createwatchonly(wallet_root_path, master_pub_key, is_fidelity_bond_wallet = False):
wallet_name = cli_get_wallet_file_name(defaultname="watchonly.jmdat")
if not wallet_name:
DEFAULT_WATCHONLY_WALLET_NAME = "watchonly.jmdat"
wallet_name = DEFAULT_WATCHONLY_WALLET_NAME
wallet_path = os.path.join(wallet_root_path, wallet_name)
password = cli_get_wallet_passphrase_check()
if not password:
return ""
if is_fidelity_bond_wallet:
entropy = FidelityBondMixin.get_xpub_from_fidelity_bond_master_pub_key(master_pub_key)
if not entropy:
jmprint("Error with provided master pub key", "error")
return ""
else:
entropy = master_pub_key
entropy = entropy.encode()
if is_fidelity_bond_wallet:
create_wallet(wallet_path, password,
max_mixdepth=FidelityBondMixin.FIDELITY_BOND_MIXDEPTH,
wallet_cls=FidelityBondWatchonlyWallet, entropy=entropy)
else:
create_wallet(wallet_path, password,
max_mixdepth=WatchonlyMixin.WATCH_ONLY_MIXDEPTH,
wallet_cls=SegwitWatchonlyWallet, entropy=entropy)
return "Done"
|
def wallet_createwatchonly(wallet_root_path, master_pub_key, is_fidelity_bond_wallet = False):
wallet_name = cli_get_wallet_file_name(defaultname="watchonly.jmdat")
if not wallet_name:
DEFAULT_WATCHONLY_WALLET_NAME = "watchonly.jmdat"
wallet_name = DEFAULT_WATCHONLY_WALLET_NAME
wallet_path = os.path.join(wallet_root_path, wallet_name)
password = cli_get_wallet_passphrase_check()
if not password:
return ""
if is_fidelity_bond_wallet:
entropy = FidelityBondMixin.get_xpub_from_fidelity_bond_master_pub_key(master_pub_key)
if not entropy:
jmprint("Error with provided master public key", "error")
return ""
else:
entropy = master_pub_key
entropy = entropy.encode()
if is_fidelity_bond_wallet:
create_wallet(wallet_path, password,
max_mixdepth=FidelityBondMixin.FIDELITY_BOND_MIXDEPTH,
wallet_cls=FidelityBondWatchonlyWallet, entropy=entropy)
else:
create_wallet(wallet_path, password,
max_mixdepth=WatchonlyMixin.WATCH_ONLY_MIXDEPTH,
wallet_cls=SegwitWatchonlyWallet, entropy=entropy)
return "Done"
|
13,560 |
def eigs(A, E=None, k=3, which='LM', b=None, l=None, maxiter=1000, tol=1e-13):
"""Approximate a few eigenvalues of an |Operator|.
Computes `k` eigenvalues `w[i]` with corresponding eigenvectors `v[i]` which solve
the eigenvalue problem
.. math::
A v[i] = w[i] v[i]
or the generalized eigenvalue problem
.. math::
A v[i] = w[i] E v[i]
if `E` is not `None`.
The implementation is based on Algorithm 4.2 in [RL95]_.
Parameters
----------
A
The real |Operator| for which the eigenvalues are to be computed.
E
The |Operator| which defines the generalized eigenvalue problem.
k
The number of eigenvalues and eigenvectors which are to be computed.
which
A string specifying which `k` eigenvalues and eigenvectors to compute:
- `'LM'`: select eigenvalues with largest |v[i]|
- `'SM'`: select eigenvalues with smallest |v[i]|
- `'LR'`: select eigenvalues with largest Re(v[i])
- `'SR'`: select eigenvalues with smallest Re(v[i])
- `'LI'`: select eigenvalues with largest Im(v[i])
- `'SI'`: select eigenvalues with smallest Im(v[i])
b
Initial vector for Arnoldi iteration. Default is a random vector.
l
The size of the Arnoldi factorization. Default is `min(n - 1, max(2*k + 1, 20))`.
maxiter
The maximum number of iterations.
tol
The relative error tolerance for the ritz estimates.
Returns
-------
w
A |NumPy array| which contains the computed eigenvalues.
v
A |VectorArray| which contains the computed eigenvectors.
"""
n = A.source.dim
if l is None:
l = np.min((n - 1, np.max((2 * k + 1, 20))))
if E is None:
E = IdentityOperator(A.source)
assert A.source == A.range
assert E.source == A.source
assert E.range == A.source
assert k < n
assert l > k
if b is None:
b = A.source.random()
V, H, f = arnoldi(A, E, k, b)
k0 = k
i = 0
while True:
i = i + 1
V, H, f = extend_arnoldi(A, E, V, H, f, l - k)
ew, ev = spla.eig(H)
# truncate small imaginary parts
ew.imag[np.abs(ew.imag) / np.abs(ew) < 1e-12] = 0
if which == 'LM':
idx = np.argsort(-np.abs(ew))
elif which == 'SM':
idx = np.argsort(np.abs(ew))
elif which == 'LR':
idx = np.argsort(-np.real(ew))
elif which == 'SR':
idx = np.argsort(np.real(ew))
elif which == 'LI':
idx = np.argsort(-np.abs(np.imag(ew)))
elif which == 'SI':
idx = np.argsort(np.abs(np.imag(ew)))
k = k0
ews = ew[idx]
evs = ev[:, idx]
rres = f.l2_norm()[0] * np.abs(evs[l - 1]) / np.abs(ews)
# increase k by one in order to keep complex conjugate pairs together
if ews[k - 1].imag != 0 and ews[k - 1].imag + ews[k].imag < 1e-12:
k = k + 1
if np.all(rres[:k] <= tol) or i >= maxiter:
break
# increase k in order to prevent stagnation
k = np.min((l - 1, k + np.min((np.count_nonzero(rres[:k] <= tol), (l - k) // 2))))
# sort shifts for QR iteration based on their residual
shifts = ews[k:l]
srres = rres[k:l]
idx = np.argsort(-srres)
srres = srres[idx]
shifts = shifts[idx]
# don't use converged unwanted ritzvalues as shifts
shifts = np.delete(shifts, np.where(srres == 0))
k = k + np.count_nonzero(srres == 0)
if shifts[0].imag != 0 and shifts[0].imag + ews[1].imag >= 1e-12:
shifts = shifts[1:]
k = k + 1
H, Qs = QR_iteration(H, shifts)
V = V.lincomb(Qs.T)
f = V[k] * H[k, k - 1] + f * Qs[l - 1, k - 1]
V = V[:k]
H = H[:k, :k]
return ews[:k0], V.lincomb(evs[:, :k0].T)
|
def eigs(A, E=None, k=3, which='LM', b=None, l=None, maxiter=1000, tol=1e-13):
"""Approximate a few eigenvalues of an |Operator|.
Computes `k` eigenvalues `w[i]` with corresponding eigenvectors `v[i]` which solve
the eigenvalue problem
.. math::
A v[i] = w[i] v[i]
or the generalized eigenvalue problem
.. math::
A v[i] = w[i] E v[i]
if `E` is not `None`.
The implementation is based on Algorithm 4.2 in [RL95]_.
Parameters
----------
A
The real |Operator| for which the eigenvalues are to be computed.
E
The |Operator| which defines the generalized eigenvalue problem.
k
The number of eigenvalues and eigenvectors which are to be computed.
which
A string specifying which `k` eigenvalues and eigenvectors to compute:
- `'LM'`: select eigenvalues with largest |v[i]|
- `'SM'`: select eigenvalues with smallest |v[i]|
- `'LR'`: select eigenvalues with largest Re(v[i])
- `'SR'`: select eigenvalues with smallest Re(v[i])
- `'LI'`: select eigenvalues with largest Im(v[i])
- `'SI'`: select eigenvalues with smallest Im(v[i])
b
Initial vector for Arnoldi iteration. Default is a random vector.
l
The size of the Arnoldi factorization. Default is `min(n - 1, max(2*k + 1, 20))`.
maxiter
The maximum number of iterations.
tol
The relative error tolerance for the ritz estimates.
Returns
-------
w
A |NumPy array| which contains the computed eigenvalues.
v
A |VectorArray| which contains the computed eigenvectors.
"""
n = A.source.dim
if l is None:
l = np.min((n - 1, np.max((2 * k + 1, 20))))
if E is None:
E = IdentityOperator(A.source)
assert A.source == A.range
assert E.source == A.source
assert E.range == A.source
assert k < n
assert l > k
if b is None:
b = A.source.random()
V, H, f = arnoldi(A, E, k, b)
k0 = k
i = 0
while True:
i += 1
V, H, f = extend_arnoldi(A, E, V, H, f, l - k)
ew, ev = spla.eig(H)
# truncate small imaginary parts
ew.imag[np.abs(ew.imag) / np.abs(ew) < 1e-12] = 0
if which == 'LM':
idx = np.argsort(-np.abs(ew))
elif which == 'SM':
idx = np.argsort(np.abs(ew))
elif which == 'LR':
idx = np.argsort(-np.real(ew))
elif which == 'SR':
idx = np.argsort(np.real(ew))
elif which == 'LI':
idx = np.argsort(-np.abs(np.imag(ew)))
elif which == 'SI':
idx = np.argsort(np.abs(np.imag(ew)))
k = k0
ews = ew[idx]
evs = ev[:, idx]
rres = f.l2_norm()[0] * np.abs(evs[l - 1]) / np.abs(ews)
# increase k by one in order to keep complex conjugate pairs together
if ews[k - 1].imag != 0 and ews[k - 1].imag + ews[k].imag < 1e-12:
k = k + 1
if np.all(rres[:k] <= tol) or i >= maxiter:
break
# increase k in order to prevent stagnation
k = np.min((l - 1, k + np.min((np.count_nonzero(rres[:k] <= tol), (l - k) // 2))))
# sort shifts for QR iteration based on their residual
shifts = ews[k:l]
srres = rres[k:l]
idx = np.argsort(-srres)
srres = srres[idx]
shifts = shifts[idx]
# don't use converged unwanted ritzvalues as shifts
shifts = np.delete(shifts, np.where(srres == 0))
k = k + np.count_nonzero(srres == 0)
if shifts[0].imag != 0 and shifts[0].imag + ews[1].imag >= 1e-12:
shifts = shifts[1:]
k = k + 1
H, Qs = QR_iteration(H, shifts)
V = V.lincomb(Qs.T)
f = V[k] * H[k, k - 1] + f * Qs[l - 1, k - 1]
V = V[:k]
H = H[:k, :k]
return ews[:k0], V.lincomb(evs[:, :k0].T)
|
2,247 |
def _num_features(X):
"""Return the number of features in an array-like X.
Parameters
----------
X : array-like
array-like to get the number of features.
Returns
-------
features : int
Number of features
"""
message = "Unable to find the number of features from X"
if not hasattr(X, '__len__') and not hasattr(X, 'shape'):
if not hasattr(X, '__array__'):
raise TypeError(message)
X = np.asarray(X)
if hasattr(X, 'shape') and X.shape is not None:
if len(X.shape) <= 1:
raise TypeError(message)
if isinstance(X.shape[1], numbers.Integral):
return X.shape[1]
first_elem = X[0]
# Do not consider an array-like of strings to be a 2D array
if isinstance(first_elem, (str, bytes)):
raise TypeError(message)
try:
return len(first_elem)
except Exception as err:
raise TypeError(message) from err
|
def _num_features(X):
"""Return the number of features in an array-like X.
Parameters
----------
X : array-like
array-like to get the number of features.
Returns
-------
features : int
Number of features
"""
message = f"Unable to find the number of features from X of type {type(X)}"
if not hasattr(X, '__len__') and not hasattr(X, 'shape'):
if not hasattr(X, '__array__'):
raise TypeError(message)
X = np.asarray(X)
if hasattr(X, 'shape') and X.shape is not None:
if len(X.shape) <= 1:
raise TypeError(message)
if isinstance(X.shape[1], numbers.Integral):
return X.shape[1]
first_elem = X[0]
# Do not consider an array-like of strings to be a 2D array
if isinstance(first_elem, (str, bytes)):
raise TypeError(message)
try:
return len(first_elem)
except Exception as err:
raise TypeError(message) from err
|
48,579 |
def copy_vtk_array(array, deep=True):
"""Create a deep or shallow copy a of vtk array.
Parametres
----------
array : vtk.vtkDataArray or vtk.vtkAbstractArray
VTK array.
deep : bool, optional
When ``True``, create a deep copy of the array. When ``False``, returns
a shallow copy.
Returns
-------
vtk.vtkDataArray or vtk.vtkAbstractArray
Copy of the original VTK array.
Examples
--------
Perform a deep copy of a vtk array.
>>> import vtk
>>> import pyvista
>>> arr = vtk.vtkFloatArray()
>>> _ = arr.SetNumberOfValues(10)
>>> arr.SetValue(0, 1)
>>> arr_copy = pyvista.utilities.misc.copy_vtk_array(arr)
>>> arr_copy.GetValue(0)
1.0
"""
if not isinstance(array, (_vtk.vtkDataArray, _vtk.vtkAbstractArray)):
raise TypeError(f"Invalid type {type(array)}.")
vtk_type = str(type(array)).split('.')[-1][:-2]
if vtk_type not in dir(_vtk): # pragma: no cover
raise RuntimeError(f'Type {vtk_type} not supported.')
new_array = getattr(_vtk, vtk_type)()
if deep:
new_array.DeepCopy(array)
else:
new_array.ShallowCopy(array)
return new_array
|
def copy_vtk_array(array, deep=True):
"""Create a deep or shallow copy of a VTK array.
Parameters
----------
array : vtk.vtkDataArray or vtk.vtkAbstractArray
VTK array.
deep : bool, optional
When ``True``, create a deep copy of the array. When ``False``, returns
a shallow copy.
Returns
-------
vtk.vtkDataArray or vtk.vtkAbstractArray
Copy of the original VTK array.
Examples
--------
Perform a deep copy of a vtk array.
>>> import vtk
>>> import pyvista
>>> arr = vtk.vtkFloatArray()
>>> _ = arr.SetNumberOfValues(10)
>>> arr.SetValue(0, 1)
>>> arr_copy = pyvista.utilities.misc.copy_vtk_array(arr)
>>> arr_copy.GetValue(0)
1.0
"""
if not isinstance(array, (_vtk.vtkDataArray, _vtk.vtkAbstractArray)):
raise TypeError(f"Invalid type {type(array)}.")
vtk_type = str(type(array)).split('.')[-1][:-2]
if vtk_type not in dir(_vtk): # pragma: no cover
raise RuntimeError(f'Type {vtk_type} not supported.')
new_array = getattr(_vtk, vtk_type)()
if deep:
new_array.DeepCopy(array)
else:
new_array.ShallowCopy(array)
return new_array
|
30,417 |
def merge_script_package_to_yml(package_path, dir_name, dest_path=""):
"""Merge the various components to create an output yml file
Args:
package_path (str): Directory containing the various files
dir_name (str): Parent directory containing package (Scripts/Integrations)
dest_path (str, optional): Defaults to "". Destination output
Returns:
output path, script path, image path
"""
print("Merging package: {}".format(package_path))
output_filename = '{}-{}.yml'.format(DIR_TO_PREFIX[dir_name], os.path.basename(os.path.dirname(package_path)))
if dest_path:
output_path = os.path.join(dest_path, output_filename)
else:
output_path = os.path.join(dir_name, output_filename)
yml_paths = glob.glob(package_path + '*.yml')
yml_path = yml_paths[0]
for path in yml_paths:
# The plugin creates a unified YML file for the package.
# In case this script runs locally and there is a unified YML file in the package we need to ignore it.
# Also,
# we don't take the unified file by default because there might be packages that were not created by the plugin.
if 'unified' not in path:
yml_path = path
break
with open(yml_path, 'r') as yml_file:
yml_data = yaml.safe_load(yml_file)
script_obj = yml_data
if dir_name != 'Scripts':
script_obj = yml_data['script']
script_type = TYPE_TO_EXTENSION[script_obj['type']]
with io.open(yml_path, mode='r', encoding='utf-8') as yml_file:
yml_text = yml_file.read()
yml_text, script_path = insert_script_to_yml(package_path, script_type, yml_text, dir_name, yml_data)
image_path = None
desc_path = None
if dir_name == 'Integrations' or dir_name == 'Beta_Integrations':
yml_text, image_path = insert_image_to_yml(dir_name, package_path, yml_data, yml_text)
yml_text, desc_path = insert_description_to_yml(dir_name, package_path, yml_data, yml_text)
output_map = {output_path: yml_text}
if 'dockerimage45' in script_obj:
# we need to split into two files 45 and 50. Current one will be from version 5.0
yml_text = re.sub(r'^\s*dockerimage45:.*\n?', '', yml_text, flags=re.MULTILINE) # remove the dockerimage45 line
yml_text45 = yml_text
if 'fromversion' in yml_data:
yml_text = re.sub(r'^fromversion:.*$', 'fromversion: 5.0.0', yml_text, flags=re.MULTILINE)
else:
yml_text = 'fromversion: 5.0.0\n' + yml_text
if 'toversion' in yml_data:
yml_text45 = re.sub(r'^toversion:.*$', 'toversion: 4.5.9', yml_text, flags=re.MULTILINE)
else:
yml_text45 = 'toversion: 4.5.9\n' + yml_text45
yml_text45 = re.sub(r'(^\s*dockerimage:).*$', r'\1 ' + script_obj.get('dockerimage45'), yml_text45, flags=re.MULTILINE)
output_path45 = re.sub(r'\.yml$', '_45.yml', output_path)
output_map = {
output_path: yml_text,
output_path45: yml_text45
}
for k, v in output_map.items():
if IS_CI and os.path.isfile(k):
raise ValueError('Output file already exists: {}.'
' Make sure to remove this file from source control'
' or rename this package (for example if it is a v2).'.format(output_path))
with io.open(k, mode='w', encoding='utf-8') as f:
f.write(v)
return list(output_map.keys()), yml_path, script_path, image_path, desc_path
|
def merge_script_package_to_yml(package_path, dir_name, dest_path=""):
"""Merge the various components to create an output yml file
Args:
package_path (str): Directory containing the various files
dir_name (str): Parent directory containing package (Scripts/Integrations)
dest_path (str, optional): Defaults to "". Destination output
Returns:
output path, script path, image path
"""
print("Merging package: {}".format(package_path))
output_filename = '{}-{}.yml'.format(DIR_TO_PREFIX[dir_name], os.path.basename(os.path.dirname(package_path)))
if dest_path:
output_path = os.path.join(dest_path, output_filename)
else:
output_path = os.path.join(dir_name, output_filename)
yml_paths = glob.glob(package_path + '*.yml')
yml_path = yml_paths[0]
for path in yml_paths:
# The plugin creates a unified YML file for the package.
# In case this script runs locally and there is a unified YML file in the package we need to ignore it.
# Also,
# we don't take the unified file by default because there might be packages that were not created by the plugin.
if 'unified' not in path:
yml_path = path
break
with open(yml_path, 'r') as yml_file:
yml_data = yaml.safe_load(yml_file)
script_obj = yml_data
if dir_name != 'Scripts':
script_obj = yml_data['script']
script_type = TYPE_TO_EXTENSION[script_obj['type']]
with io.open(yml_path, mode='r', encoding='utf-8') as yml_file:
yml_text = yml_file.read()
yml_text, script_path = insert_script_to_yml(package_path, script_type, yml_text, dir_name, yml_data)
image_path = None
desc_path = None
if dir_name == 'Integrations' or dir_name == 'Beta_Integrations':
yml_text, image_path = insert_image_to_yml(dir_name, package_path, yml_data, yml_text)
yml_text, desc_path = insert_description_to_yml(dir_name, package_path, yml_data, yml_text)
output_map = {output_path: yml_text}
if 'dockerimage45' in script_obj:
# we need to split into two files 45 and 50. Current one will be from version 5.0
yml_text = re.sub(r'^\s*dockerimage45:.*\n?', '', yml_text, flags=re.MULTILINE) # remove the dockerimage45 line
yml_text45 = yml_text
if 'fromversion' in yml_data:
yml_text = re.sub(r'^fromversion:.*$', 'fromversion: 5.0.0', yml_text, flags=re.MULTILINE)
else:
yml_text = 'fromversion: 5.0.0\n' + yml_text
if 'toversion' in yml_data:
yml_text45 = re.sub(r'^toversion:.*$', 'toversion: 4.5.9', yml_text45, flags=re.MULTILINE)
else:
yml_text45 = 'toversion: 4.5.9\n' + yml_text45
yml_text45 = re.sub(r'(^\s*dockerimage:).*$', r'\1 ' + script_obj.get('dockerimage45'), yml_text45, flags=re.MULTILINE)
output_path45 = re.sub(r'\.yml$', '_45.yml', output_path)
output_map = {
output_path: yml_text,
output_path45: yml_text45
}
for k, v in output_map.items():
if IS_CI and os.path.isfile(k):
raise ValueError('Output file already exists: {}.'
' Make sure to remove this file from source control'
' or rename this package (for example if it is a v2).'.format(output_path))
with io.open(k, mode='w', encoding='utf-8') as f:
f.write(v)
return list(output_map.keys()), yml_path, script_path, image_path, desc_path
|
9,903 |
def create_module_params(module):
"""
Reads the module parameters and returns a dict
:return: dict
"""
instance_parameters = dict(
# ReplicationSubnetGroupIdentifier gets translated to lower case anyway by the API
ReplicationSubnetGroupIdentifier=module.params.get('identifier').lower(),
ReplicationSubnetGroupDescription=module.params.get('description'),
SubnetIds=module.params.get('subnetids'),
)
return instance_parameters
|
def create_module_params(module):
"""
Reads the module parameters and returns a dict
:return: dict
"""
instance_parameters = dict(
# ReplicationSubnetGroupIdentifier gets translated to lower case anyway by the API
ReplicationSubnetGroupIdentifier=module.params.get('identifier').lower(),
ReplicationSubnetGroupDescription=module.params.get('description'),
SubnetIds=module.params.get('subnet_ids'),
)
return instance_parameters
|
32,313 |
def create_nic_parameters(args, subscription_id):
"""
Construct the NIC object
Use the actual parameters passed to the 'azure-vm-create-nic' command
to build a nic object that will be sent in the body of the command's associated
API call.
parameter: (dict) args
Dictionary that contains the actual parameters that were passed to the
'azure-vm-create-nic' command
returns:
NIC Object
"""
# Retrieve relevant command arguments
resource_group = args.get('resource_group')
location = args.get('nic_location')
address_assignment_method = args.get('address_assignment_method')
private_ip_address = args.get('private_ip_address')
network_security_group = args.get('network_security_group')
vnet_name = args.get('vnet_name')
subnet_name = args.get('subnet_name')
ip_config_name = args.get('ip_config_name')
subnet_id = f"/subscriptions/{subscription_id}/resourceGroups/"
subnet_id += f"{resource_group}/providers/Microsoft.Network/virtualNetworks/" \
f"{vnet_name}/subnets/{subnet_name}"
# Construct NIC object
nic = {
'location': location,
'properties': {
'ipConfigurations': [
{
'name': ip_config_name,
'properties': {
'privateIPAllocationMethod': address_assignment_method,
'subnet': {
'id': subnet_id
}
}
}
]
}
}
if address_assignment_method == "Static":
if not private_ip_address:
err_msg = 'You have chosen to assign a "Static" IP address value to the interface, ' \
'so you must enter a value for the "private_ip_address" argument.'
raise Exception(err_msg)
nic['properties']['ipConfigurations'][0]['properties']['privateIPAddress'] = private_ip_address
if network_security_group:
network_security_group_id = f"/subscriptions/{subscription_id}/resourceGroups/"
network_security_group_id += f"{resource_group}/providers/Microsoft.Network/networkSecurityGroups/" \
f"{network_security_group}"
nic['properties']['networkSecurityGroup']['id'] = private_ip_address
return nic
|
def create_nic_parameters(args, subscription_id):
"""
Construct the NIC object
Use the actual parameters passed to the 'azure-vm-create-nic' command
to build a nic object that will be sent in the body of the command's associated
API call.
parameter: (dict) args
Dictionary that contains the actual parameters that were passed to the
'azure-vm-create-nic' command
returns:
NIC Object
"""
# Retrieve relevant command arguments
resource_group = args.get('resource_group')
location = args.get('nic_location')
address_assignment_method = args.get('address_assignment_method')
private_ip_address = args.get('private_ip_address')
network_security_group = args.get('network_security_group')
vnet_name = args.get('vnet_name')
subnet_name = args.get('subnet_name')
ip_config_name = args.get('ip_config_name')
subnet_id = f"/subscriptions/{subscription_id}/resourceGroups/"
subnet_id += f"{resource_group}/providers/Microsoft.Network/virtualNetworks/" \
f"{vnet_name}/subnets/{subnet_name}"
# Construct NIC object
nic = {
'location': location,
'properties': {
'ipConfigurations': [
{
'name': ip_config_name,
'properties': {
'privateIPAllocationMethod': address_assignment_method,
'subnet': {
'id': subnet_id
}
}
}
]
}
}
if address_assignment_method == "Static":
if not private_ip_address:
err_msg = 'You have chosen to assign a "Static" IP address value to the interface, ' \
'so you must enter a value for the "private_ip_address" argument.'
raise Exception(err_msg)
nic['properties']['ipConfigurations'][0]['properties']['privateIPAddress'] = private_ip_address
if network_security_group:
network_security_group_id = f"/subscriptions/{subscription_id}/resourceGroups/"
network_security_group_id += f"{resource_group}/providers/Microsoft.Network/networkSecurityGroups/" \
f"{network_security_group}"
nic['properties']['networkSecurityGroup']['id'] = network_security_group_id
return nic
|
23,759 |
def create_package(conanfile, package_id, source_folder, build_folder, package_folder,
install_folder, hook_manager, conanfile_path, ref, local=False,
copy_info=False):
""" copies built artifacts, libs, headers, data, etc. from build_folder to
package folder
"""
mkdir(package_folder)
output = conanfile.output
# Make the copy of all the patterns
output.info("Generating the package")
output.info("Package folder %s" % package_folder)
try:
conanfile.package_folder = package_folder
conanfile.source_folder = source_folder
conanfile.install_folder = install_folder
conanfile.build_folder = build_folder
hook_manager.execute("pre_package", conanfile=conanfile, conanfile_path=conanfile_path,
reference=ref, package_id=package_id)
package_output = ScopedOutput("%s package()" % output.scope, output)
output.highlight("Calling package()")
folders = [source_folder, build_folder] if source_folder != build_folder else [build_folder]
conanfile.copy = FileCopier(folders, package_folder)
with conanfile_exception_formatter(str(conanfile), "package"):
with chdir(build_folder):
conanfile.package()
except Exception as e:
if not local:
os.chdir(build_folder)
try:
rmdir(package_folder)
except Exception as e_rm:
output.error("Unable to remove package folder %s\n%s" % (package_folder, str(e_rm)))
output.warn("**** Please delete it manually ****")
if isinstance(e, ConanExceptionInUserConanfileMethod):
raise
raise ConanException(e)
manifest = _create_aux_files(install_folder, package_folder, conanfile, copy_info)
_report_files_from_manifest(package_output, package_folder)
package_id = package_id or os.path.basename(package_folder)
output.success("Package '%s' created" % package_id)
hook_manager.execute("post_package", conanfile=conanfile, conanfile_path=conanfile_path,
reference=ref, package_id=package_id)
return manifest.summary_hash
|
def create_package(conanfile, package_id, source_folder, build_folder, package_folder,
install_folder, hook_manager, conanfile_path, ref, local=False,
copy_info=False):
""" copies built artifacts, libs, headers, data, etc. from build_folder to
package folder
"""
mkdir(package_folder)
output = conanfile.output
# Make the copy of all the patterns
output.info("Generating the package")
output.info("Package folder %s" % package_folder)
try:
conanfile.package_folder = package_folder
conanfile.source_folder = source_folder
conanfile.install_folder = install_folder
conanfile.build_folder = build_folder
hook_manager.execute("pre_package", conanfile=conanfile, conanfile_path=conanfile_path,
reference=ref, package_id=package_id)
package_output = ScopedOutput("%s package()" % output.scope, output)
output.highlight("Calling package()")
folders = [source_folder, build_folder] if source_folder != build_folder else [build_folder]
conanfile.copy = FileCopier(folders, package_folder)
with conanfile_exception_formatter(str(conanfile), "package"):
with chdir(build_folder):
conanfile.package()
except Exception as e:
if not local:
os.chdir(build_folder)
try:
rmdir(package_folder)
except Exception as e_rm:
output.error("Unable to remove package folder %s\n%s" % (package_folder, str(e_rm)))
output.warn("**** Please delete it manually ****")
if isinstance(e, ConanExceptionInUserConanfileMethod):
raise
raise ConanException(e)
manifest = _create_aux_files(install_folder, package_folder, conanfile, copy_info)
_report_files_from_manifest(package_output, package_folder)
package_id = package_id or os.path.basename(package_folder)
output.success("Package '%s' created" % package_id)
hook_manager.execute("post_package", conanfile=conanfile, conanfile_path=conanfile_path,
reference=ref, package_id=package_id)
return digest.summary_hash
|
41,047 |
def andb(arrs):
"""
Sums arrays in `arrs`
Parameters
----------
arrs : :obj:`list`
List of boolean or integer arrays to be summed
Returns
-------
result : :obj:`numpy.ndarray`
Integer array of summed `arrs`
"""
# coerce to integer and ensure all arrays are the same shape
arrs = [check_array(arr, dtype=int, ensure_2d=False, allow_nd=True) for arr in arrs]
if not np.all([arr1.shape == arr2.shape for arr1 in arrs for arr2 in arrs]):
raise ValueError('All input arrays must have same shape.')
# sum across arrays
result = np.sum(arrs, axis=0)
return result
|
def andb(arrs):
"""
warnings.simplefilter("ignore", RuntimeWarning)
Parameters
----------
arrs : :obj:`list`
List of boolean or integer arrays to be summed
Returns
-------
result : :obj:`numpy.ndarray`
Integer array of summed `arrs`
"""
# coerce to integer and ensure all arrays are the same shape
arrs = [check_array(arr, dtype=int, ensure_2d=False, allow_nd=True) for arr in arrs]
if not np.all([arr1.shape == arr2.shape for arr1 in arrs for arr2 in arrs]):
raise ValueError('All input arrays must have same shape.')
# sum across arrays
result = np.sum(arrs, axis=0)
return result
|
21,751 |
def _event_type_from_format_version(
format_version: int,
) -> Type[Union[FrozenEvent, FrozenEventV2, FrozenEventV2]]:
"""Returns the python type to use to construct an Event object for the
given event format version.
Args:
format_version: The event format version
Returns:
type: A type that can be initialized as per the initializer of
`FrozenEvent`
"""
if format_version == EventFormatVersions.V1:
return FrozenEvent
elif format_version == EventFormatVersions.V2:
return FrozenEventV2
elif format_version == EventFormatVersions.V3:
return FrozenEventV3
else:
raise Exception("No event format %r" % (format_version,))
|
def _event_type_from_format_version(
format_version: int,
) -> Type[Union[FrozenEvent, FrozenEventV2, FrozenEventV3]]:
"""Returns the python type to use to construct an Event object for the
given event format version.
Args:
format_version: The event format version
Returns:
type: A type that can be initialized as per the initializer of
`FrozenEvent`
"""
if format_version == EventFormatVersions.V1:
return FrozenEvent
elif format_version == EventFormatVersions.V2:
return FrozenEventV2
elif format_version == EventFormatVersions.V3:
return FrozenEventV3
else:
raise Exception("No event format %r" % (format_version,))
|
56,881 |
def delete_markdown(text, *, ignore_links=True):
r"""A helper function that deletes Discord's markdown.
Parameters
-----------
text: :class:`str`
The text to delete markdown from.
ignore_links: :class:`bool`
Whether to leave links alone when removing markdown. For example,
if a URL in the text contains characters such as ``_`` then it will
be left alone.
Defaults to ``True``.
Returns
--------
:class:`str`
The text with the markdown special characters deleted.
"""
url_regex = r'(?P<url><[^: >]+:\/[^ >]+>|(?:https?|steam):\/\/[^\s<]+[^<.,:;\"\'\]\s])'
def replacement(match):
groupdict = match.groupdict()
is_url = groupdict.get('url')
if is_url:
return is_url
return ''
regex = r'(?P<markdown>[_\\~|\*`]|%s)' % _MARKDOWN_ESCAPE_COMMON
if ignore_links:
regex = '(?:%s|%s)' % (url_regex, regex)
return re.sub(regex, replacement, text, 0, re.MULTILINE)
|
def delete_markdown(text, *, ignore_links=True):
r"""A helper function that remove markdown characters.
Parameters
-----------
text: :class:`str`
The text to delete markdown from.
ignore_links: :class:`bool`
Whether to leave links alone when removing markdown. For example,
if a URL in the text contains characters such as ``_`` then it will
be left alone. Defaults to ``True``.
Returns
--------
:class:`str`
The text with the markdown special characters deleted.
"""
url_regex = r'(?P<url><[^: >]+:\/[^ >]+>|(?:https?|steam):\/\/[^\s<]+[^<.,:;\"\'\]\s])'
def replacement(match):
groupdict = match.groupdict()
is_url = groupdict.get('url')
if is_url:
return is_url
return ''
regex = r'(?P<markdown>[_\\~|\*`]|%s)' % _MARKDOWN_ESCAPE_COMMON
if ignore_links:
regex = '(?:%s|%s)' % (url_regex, regex)
return re.sub(regex, replacement, text, 0, re.MULTILINE)
|
50,444 |
def get_proctored_exam_settings_url(course_module):
"""
Gets course authoring microfrontend URL for links to proctored exam settings page
"""
if settings.FEATURES.get('ENABLE_EXAM_SETTINGS_HTML_VIEW'):
return get_course_authoring_url(course_module)
else:
return ''
|
def get_proctored_exam_settings_url(course_module):
"""
Gets course authoring microfrontend URL for links to proctored exam settings page
"""
if settings.FEATURES.get('ENABLE_EXAM_SETTINGS_HTML_VIEW'):
course_authoring_microfrontend_url = get_course_authoring_url(course_module)
return course_authoring_microfrontend_url
|
23,274 |
def resolve_types(cls, globalns=None, localns=None, attribs=None):
"""
Resolve any strings and forward annotations in type annotations.
This is only required if you need concrete types in `Attribute`'s *type*
field. In other words, you don't need to resolve your types if you only
use them for static type checking.
With no arguments, names will be looked up in the module in which the class
was created. If this is not what you want, e.g. if the name only exists
inside a method, you may pass *globalns* or *localns* to specify other
dictionaries in which to look up these names. See the docs of
`typing.get_type_hints` for more details.
:param type cls: Class to resolve.
:param Optional[dict] globalns: Dictionary containing global variables.
:param Optional[dict] localns: Dictionary containing local variables.
:param Optional[list] attribs: List of attribs for the given class.
This is necessary when calling from inside a ``field_transformer``
since *cls* is not an ``attrs`` class yet.
:raise TypeError: If *cls* is not a class.
:raise attr.exceptions.NotAnAttrsClassError: If *cls* is not an ``attrs``
class and you didn't pass any attribs.
:raise NameError: If types cannot be resolved because of missing variables.
:returns: *cls* so you can use this function also as a class decorator.
Please note that you have to apply it **after** `attr.s`. That means
the decorator has to come in the line **before** `attr.s`.
.. versionadded:: 20.1.0
.. versionadded:: 20.4.0 *attribs*
"""
try:
# Since calling get_type_hints is expensive we cache whether we've
# done it already.
cls.__attrs_types_resolved__
except AttributeError:
import typing
hints = typing.get_type_hints(cls, globalns=globalns, localns=localns)
for field in fields(cls) if attribs is None else attribs:
if field.name in hints:
# Since fields have been frozen we must work around it.
_obj_setattr(field, "type", hints[field.name])
cls.__attrs_types_resolved__ = True
# Return the class so you can use it as a decorator too.
return cls
|
def resolve_types(cls, globalns=None, localns=None, attribs=None):
"""
Resolve any strings and forward annotations in type annotations.
This is only required if you need concrete types in `Attribute`'s *type*
field. In other words, you don't need to resolve your types if you only
use them for static type checking.
With no arguments, names will be looked up in the module in which the class
was created. If this is not what you want, e.g. if the name only exists
inside a method, you may pass *globalns* or *localns* to specify other
dictionaries in which to look up these names. See the docs of
`typing.get_type_hints` for more details.
:param type cls: Class to resolve.
:param Optional[dict] globalns: Dictionary containing global variables.
:param Optional[dict] localns: Dictionary containing local variables.
:param Optional[list] attribs: List of attribs for the given class.
This is necessary when calling from inside a ``field_transformer``
since *cls* is not an ``attrs`` class yet.
:raise TypeError: If *cls* is not a class.
:raise attr.exceptions.NotAnAttrsClassError: If *cls* is not an ``attrs``
class and you didn't pass any attribs.
:raise NameError: If types cannot be resolved because of missing variables.
:returns: *cls* so you can use this function also as a class decorator.
Please note that you have to apply it **after** `attr.s`. That means
the decorator has to come in the line **before** `attr.s`.
.. versionadded:: 20.1.0
.. versionadded:: 21.1.0 *attribs*
"""
try:
# Since calling get_type_hints is expensive we cache whether we've
# done it already.
cls.__attrs_types_resolved__
except AttributeError:
import typing
hints = typing.get_type_hints(cls, globalns=globalns, localns=localns)
for field in fields(cls) if attribs is None else attribs:
if field.name in hints:
# Since fields have been frozen we must work around it.
_obj_setattr(field, "type", hints[field.name])
cls.__attrs_types_resolved__ = True
# Return the class so you can use it as a decorator too.
return cls
|
14,887 |
def setup(hass, config):
"""Set up the QVR Pro component."""
from pyqvrpro import Client
from pyqvrpro.client import AuthenticationError
user = config[DOMAIN][CONF_USERNAME]
password = config[DOMAIN][CONF_PASSWORD]
host = config[DOMAIN][CONF_HOST]
excluded_channels = config[DOMAIN][CONF_EXCLUDE_CHANNELS]
try:
qvrpro = Client(user, password, host)
except AuthenticationError:
_LOGGER.error("QVR Pro authentication failed. Please check your credentials.")
return False
channel_resp = qvrpro.get_channel_list()
if "message" in channel_resp.keys():
if channel_resp["message"] == "Insufficient permission.":
_LOGGER.error("QVR Pro user must have Surveillance Management permission.")
return False
channels = []
for channel in channel_resp["channels"]:
if channel["channel_index"] + 1 in excluded_channels:
continue
channels.append(QVRChannel(**channel))
hass.data[DOMAIN] = {"channels": channels, "client": qvrpro}
load_platform(hass, "camera", DOMAIN, {}, config)
# Register services
def handle_start_record(call):
guid = call.data.get("guid")
qvrpro.start_recording(guid)
def handle_stop_record(call):
guid = call.data.get("guid")
qvrpro.stop_recording(guid)
hass.services.register(DOMAIN, "start_record", handle_start_record)
hass.services.register(DOMAIN, "stop_record", handle_stop_record)
return True
|
def setup(hass, config):
"""Set up the QVR Pro component."""
from pyqvrpro import Client
from pyqvrpro.client import AuthenticationError
user = config[DOMAIN][CONF_USERNAME]
password = config[DOMAIN][CONF_PASSWORD]
host = config[DOMAIN][CONF_HOST]
excluded_channels = config[DOMAIN][CONF_EXCLUDE_CHANNELS]
try:
qvrpro = Client(user, password, host)
except AuthenticationError:
_LOGGER.error("QVR Pro authentication failed. Please check your credentials.")
return False
channel_resp = qvrpro.get_channel_list()
if "message" in channel_resp.keys():
if channel_resp["message"] == "Insufficient permission.":
_LOGGER.error("User must have Surveillance Management permission")
return False
channels = []
for channel in channel_resp["channels"]:
if channel["channel_index"] + 1 in excluded_channels:
continue
channels.append(QVRChannel(**channel))
hass.data[DOMAIN] = {"channels": channels, "client": qvrpro}
load_platform(hass, "camera", DOMAIN, {}, config)
# Register services
def handle_start_record(call):
guid = call.data.get("guid")
qvrpro.start_recording(guid)
def handle_stop_record(call):
guid = call.data.get("guid")
qvrpro.stop_recording(guid)
hass.services.register(DOMAIN, "start_record", handle_start_record)
hass.services.register(DOMAIN, "stop_record", handle_stop_record)
return True
|
48,429 |
def diff_config(module, commands, config):
"""Diff the candidate commands against current config returning a list of
updates to be applied to remote edgeos device
:param module: ansible module for this type (edgeos)
:type module: ansible.module
:param commands: candidate commands passed through ansible
:type commands: list
:param config: [commands pulled from edgeos device]
:type config: list
:return: updates: changes to apply to remote device
:rtype: list
:return: unmanaged_config: config on device without matching candidate
commands passed to ansible
:rtype: list
:return: invalid: commands passed to ansible not starting with 'set' or
'delete' and therefore considered invalid
:rtype: list
"""
config = [to_native(check_command(module, c)) for c in config.splitlines()]
set_commands, delete_commands, invalid_commands = list(), list(), list()
updates, unmanaged_config = list(), list()
for line in commands:
line = to_native(check_command(module, line))
if line.startswith('delete '):
delete_commands.append(line)
elif line.startswith('set '):
set_commands.append(line)
else:
invalid_commands.append(line)
# Will always run the delete commands first to allow for resets
if delete_commands:
updates = delete_commands
# Removing all matching commands already in config
updates = updates + [line for line in set_commands if line not in config]
# Add back changes where a corresponding delete command exists
if delete_commands:
for line in set_commands:
search = re.sub('^set ', 'delete ', line)
for dline in delete_commands:
if search.startswith(dline):
updates.append(line)
# Unmanaged config (config without matching commands)
unmanaged_config = (list(set(config) - set(set_commands)))
matches = list()
# Remove if actually a change to config
for line in unmanaged_config:
search = line.rsplit(' ', 1)[0]
for update in updates:
if update.startswith(search):
matches.append(line)
break
unmanaged_config = [line for line in unmanaged_config if line not in matches]
return updates, unmanaged_config, invalid_commands
|
def diff_config(module, commands, config):
"""Diff the candidate commands against current config returning a list of
updates to be applied to remote edgeos device
:param module: ansible module for this type (edgeos)
:type module: ansible.module
:param commands: candidate commands passed through ansible
:type commands: list
:param config: [commands pulled from edgeos device]
:type config: list
:return: updates: changes to apply to remote device
:rtype: list
:return: unmanaged_config: config on device without matching candidate
commands passed to ansible
:rtype: list
:return: invalid: commands passed to ansible not starting with 'set' or
'delete' and therefore considered invalid
:rtype: list
"""
config = [to_native(check_command(module, c)) for c in config.splitlines()]
set_commands, delete_commands, invalid_commands = [], [], []
updates, unmanaged_config = [], []
for line in commands:
line = to_native(check_command(module, line))
if line.startswith('delete '):
delete_commands.append(line)
elif line.startswith('set '):
set_commands.append(line)
else:
invalid_commands.append(line)
# Will always run the delete commands first to allow for resets
if delete_commands:
updates = delete_commands
# Removing all matching commands already in config
updates = updates + [line for line in set_commands if line not in config]
# Add back changes where a corresponding delete command exists
if delete_commands:
for line in set_commands:
search = re.sub('^set ', 'delete ', line)
for dline in delete_commands:
if search.startswith(dline):
updates.append(line)
# Unmanaged config (config without matching commands)
unmanaged_config = (list(set(config) - set(set_commands)))
matches = list()
# Remove if actually a change to config
for line in unmanaged_config:
search = line.rsplit(' ', 1)[0]
for update in updates:
if update.startswith(search):
matches.append(line)
break
unmanaged_config = [line for line in unmanaged_config if line not in matches]
return updates, unmanaged_config, invalid_commands
|
31,966 |
def main() -> None:
params = demisto.params()
credentials = params.get('credentials')
proxy = params.get('proxy', False)
insecure = params.get('insecure', False)
first_fetch_param = params.get('first_fetch')
first_fetch_datetime = arg_to_datetime(first_fetch_param) if first_fetch_param else None
first_fetch = first_fetch_datetime.timestamp() if first_fetch_datetime else None
base_url = params.get('base_url')
tlp_color = params.get('tlp_color')
include_deleted = params.get('include_deleted', False)
type = argToList(params.get('type'), 'ALL')
malicious_confidence = argToList(params.get('malicious_confidence'))
filter = params.get('filter')
generic_phrase = params.get('generic_phrase')
max_fetch = arg_to_number(params.get('max_indicator_to_fetch')) if params.get('max_indicator_to_fetch') else 10000
max_fetch = min(max_fetch, 10000)
feed_tags = argToList(params.get('feedTags'))
create_relationships = argToBoolean(params.get('create_relationships', True))
args = demisto.args()
try:
command = demisto.command()
demisto.info(f'Command being called is {demisto.command()}')
client = Client(
credentials=credentials,
base_url=base_url,
insecure=insecure,
proxy=proxy,
tlp_color=tlp_color,
feed_tags=feed_tags,
include_deleted=include_deleted,
type=type,
malicious_confidence=malicious_confidence,
filter=filter,
generic_phrase=generic_phrase,
limit=max_fetch,
first_fetch=first_fetch,
create_relationships=create_relationships
)
if command == 'test-module':
# This is the call made when pressing the integration Test button.
result = test_module(client, args)
return_results(result)
elif command == 'fetch-indicators':
fetch_indicators_command(client=client)
elif command == 'crowdstrike-indicators-list':
return_results(crowdstrike_indicators_list_command(client, args))
elif command == "crowdstrike-reset-fetch-indicators":
return_results(reset_last_run())
# Log exceptions and return errors
except Exception as e:
demisto.error(traceback.format_exc()) # print the traceback
return_error(f'Failed to execute {demisto.command()} command.\nError:\n{str(e)}')
|
def main() -> None:
params = demisto.params()
credentials = params.get('credentials')
proxy = params.get('proxy', False)
insecure = params.get('insecure', False)
first_fetch_param = params.get('first_fetch')
first_fetch_datetime = arg_to_datetime(first_fetch_param) if first_fetch_param else None
first_fetch = first_fetch_datetime.timestamp() if first_fetch_datetime else None
base_url = params.get('base_url')
tlp_color = params.get('tlp_color')
include_deleted = params.get('include_deleted', False)
type = argToList(params.get('type'), 'ALL')
malicious_confidence = argToList(params.get('malicious_confidence'))
filter = params.get('filter')
generic_phrase = params.get('generic_phrase')
max_fetch = arg_to_number(params.get('max_indicator_to_fetch')) if params.get('max_indicator_to_fetch') else 10000
max_fetch = min(max_fetch, 10000)
feed_tags = argToList(params.get('feedTags'))
create_relationships = params.get('create_relationships', True)
args = demisto.args()
try:
command = demisto.command()
demisto.info(f'Command being called is {demisto.command()}')
client = Client(
credentials=credentials,
base_url=base_url,
insecure=insecure,
proxy=proxy,
tlp_color=tlp_color,
feed_tags=feed_tags,
include_deleted=include_deleted,
type=type,
malicious_confidence=malicious_confidence,
filter=filter,
generic_phrase=generic_phrase,
limit=max_fetch,
first_fetch=first_fetch,
create_relationships=create_relationships
)
if command == 'test-module':
# This is the call made when pressing the integration Test button.
result = test_module(client, args)
return_results(result)
elif command == 'fetch-indicators':
fetch_indicators_command(client=client)
elif command == 'crowdstrike-indicators-list':
return_results(crowdstrike_indicators_list_command(client, args))
elif command == "crowdstrike-reset-fetch-indicators":
return_results(reset_last_run())
# Log exceptions and return errors
except Exception as e:
demisto.error(traceback.format_exc()) # print the traceback
return_error(f'Failed to execute {demisto.command()} command.\nError:\n{str(e)}')
|
4,625 |
def compute_fixed_effects(contrast_imgs, variance_imgs, mask=None,
precision_weighted=False, dofs=None):
"""Compute the fixed effects, given images of effects and variance
Parameters
----------
contrast_imgs : list of Nifti1Images or strings
The input contrast images.
variance_imgs : list of Nifti1Images or strings
The input variance images.
mask : Nifti1Image or NiftiMasker instance or None, optional
Mask image. If None, it is recomputed from contrast_imgs.
precision_weighted : Bool, optional
Whether fixed effects estimates should be weighted by inverse
variance or not. Default=False.
dofs = array-like, with len = len(variance_imgs) or None
the degrees of freedom of the models
when None, it is assumed that the degrees of freedom are 100 per input.
Returns
-------
fixed_fx_contrast_img : Nifti1Image
The fixed effects contrast computed within the mask.
fixed_fx_variance_img : Nifti1Image
The fixed effects variance computed within the mask.
fixed_fx_t_img : Nifti1Image
The fixed effects stat computed within the mask.
fixed_fx_z_score_img : Nifti1Image
The fixed effects corresponding z-transform
Notes
-----
This function is experimental.
It may change in any future release of Nilearn.
"""
n_runs = len(contrast_imgs)
if n_runs != len(variance_imgs):
raise ValueError(
'The number of contrast images (%d) '
'differs from the number of variance images (%d). '
% (n_runs, len(variance_imgs))
)
if isinstance(mask, NiftiMasker):
masker = mask.fit()
elif mask is None:
masker = NiftiMasker().fit(contrast_imgs)
else:
masker = NiftiMasker(mask_img=mask).fit()
variances = masker.transform(variance_imgs)
contrasts = np.array([masker.transform(contrast_img)
for contrast_img in contrast_imgs])
if dofs is not None:
if len(dofs) != n_runs:
raise ValueError(
'The number of dofs (%d) '
'differs from the number of contrast images (%d). '
% (len(contrast_imgs), n_runs)
)
else:
dofs = [100] * n_runs
(fixed_fx_contrast, fixed_fx_variance, fixed_fx_stat, fixed_fx_z_score)\
= _compute_fixed_effects_params(
contrasts, variances, precision_weighted, dofs)
fixed_fx_contrast_img = masker.inverse_transform(fixed_fx_contrast)
fixed_fx_variance_img = masker.inverse_transform(fixed_fx_variance)
fixed_fx_stat_img = masker.inverse_transform(fixed_fx_stat)
fixed_fx_z_score_img = masker.inverse_transform(fixed_fx_z_score)
return (fixed_fx_contrast_img, fixed_fx_variance_img,
fixed_fx_stat_img, fixed_fx_z_score_img)
|
def compute_fixed_effects(contrast_imgs, variance_imgs, mask=None,
precision_weighted=False, dofs=None):
"""Compute the fixed effects, given images of effects and variance
Parameters
----------
contrast_imgs : list of Nifti1Images or strings
The input contrast images.
variance_imgs : list of Nifti1Images or strings
The input variance images.
mask : Nifti1Image or NiftiMasker instance or None, optional
Mask image. If None, it is recomputed from contrast_imgs.
precision_weighted : Bool, optional
Whether fixed effects estimates should be weighted by inverse
variance or not. Default=False.
dofs = array-like, with len = len(variance_imgs) or None
the degrees of freedom of the models
when None, it is assumed that the degrees of freedom are 100 per input.
Returns
-------
fixed_fx_contrast_img : Nifti1Image
The fixed effects contrast computed within the mask.
fixed_fx_variance_img : Nifti1Image
The fixed effects variance computed within the mask.
fixed_fx_t_img : Nifti1Image
The fixed effects stat computed within the mask.
fixed_fx_z_score_img : Nifti1Image
The fixed effects corresponding z-transform
Notes
-----
This function is experimental.
It may change in any future release of Nilearn.
"""
n_runs = len(contrast_imgs)
if n_runs != len(variance_imgs):
raise ValueError(
'The number of contrast images (%d) '
'differs from the number of variance images (%d). '
% (n_runs, len(variance_imgs))
)
if isinstance(mask, NiftiMasker):
masker = mask.fit()
elif mask is None:
masker = NiftiMasker().fit(contrast_imgs)
else:
masker = NiftiMasker(mask_img=mask).fit()
variances = masker.transform(variance_imgs)
contrasts = np.array([masker.transform(contrast_img)
for contrast_img in contrast_imgs])
if dofs is not None:
if len(dofs) != n_runs:
raise ValueError(
'The number of dofs (%d) '
'differs from the number of contrast images (%d). '
% (len(dofs), n_runs)
)
else:
dofs = [100] * n_runs
(fixed_fx_contrast, fixed_fx_variance, fixed_fx_stat, fixed_fx_z_score)\
= _compute_fixed_effects_params(
contrasts, variances, precision_weighted, dofs)
fixed_fx_contrast_img = masker.inverse_transform(fixed_fx_contrast)
fixed_fx_variance_img = masker.inverse_transform(fixed_fx_variance)
fixed_fx_stat_img = masker.inverse_transform(fixed_fx_stat)
fixed_fx_z_score_img = masker.inverse_transform(fixed_fx_z_score)
return (fixed_fx_contrast_img, fixed_fx_variance_img,
fixed_fx_stat_img, fixed_fx_z_score_img)
|
37,555 |
def _split_runs_on_parameters(runs):
"""Finds runs containing parameterized gates and splits them into sequential
runs excluding the parameterized gates.
"""
out = []
for run in runs:
groups = groupby(run, lambda x: x.op.is_parameterized() and x.op.name == "u3")
for group_is_parameterized, gates in groups:
if not group_is_parameterized:
out.append(list(gates))
return out
|
def _split_runs_on_parameters(runs):
"""Finds runs containing parameterized gates and splits them into sequential
runs excluding the parameterized gates.
"""
out = []
for run in runs:
groups = groupby(run, lambda x: x.op.is_parameterized() and x.op.name in ("u3", "u"))
for group_is_parameterized, gates in groups:
if not group_is_parameterized:
out.append(list(gates))
return out
|
42,933 |
def plot(graph: nx.Graph, subgraph: Optional[list] = None, size: float = 500) -> None:
""" Creates a plot.ly plot of the input graph.
The graph layout is fixed to be the Kamada-Kawai layout with an aspect ratio of 1:1. The
function can plot just the input graph or the graph with a specified subgraph highlighted.
The function uses the standard colour theme of green nodes, grey edges, and red highlighted
subgraph.
**Example usage**:
>>> graph = nx.complete_graph(10)
>>> fig = plot(graph, [0, 1, 2, 3])
>>> fig.show()
Args:
graph (nx.Graph): input graph
subgraph (list): list of nodes comprising the subgraph to highlight
size (dict): size of the plot
Returns:
plot.ly graph object
"""
s = graph.subgraph(subgraph)
l = nx.kamada_kawai_layout(graph)
g_nodes = go.Scatter(
**_node_coords(graph, l),
mode='markers',
hoverinfo='text',
marker=dict(color=graph_node_colour, size=graph_node_size, line_width=2)
)
g_edges = go.Scatter(
**edge_coords(graph, l),
line=dict(width=1, color=graph_edge_colour),
hoverinfo='none',
mode='lines'
)
g_nodes.text = [str(i) for i in graph.nodes()]
layout = go.Layout(showlegend=False, hovermode='closest',
xaxis=dict(showgrid=False, zeroline=False, showticklabels=False),
yaxis=dict(showgrid=False, zeroline=False, showticklabels=False),
margin=dict(b=0, l=0, r=0, t=25),
height=size,
width=size,
plot_bgcolor='#ffffff'
)
if subgraph:
s_edges = go.Scatter(
**edge_coords(s, l),
line=dict(width=2, color=subgraph_edge_colour),
hoverinfo='none',
mode='lines'
)
s_nodes = go.Scatter(
**_node_coords(s, l),
mode='markers',
hoverinfo='text',
marker=dict(color=subgraph_node_colour, size=subgraph_node_size, line_width=2)
)
s_nodes.text = [str(i) for i in s.nodes()]
f = go.Figure(data=[g_edges, s_edges, g_nodes, s_nodes], layout=layout)
else:
f = go.Figure(data=[g_edges, g_nodes], layout=layout)
return f
|
def plot(graph: nx.Graph, subgraph: Optional[list] = None, size: float = 500) -> None:
"""Creates a `Plotly <https://plot.ly/>`__ plot of the input graph.
The graph layout is fixed to be the Kamada-Kawai layout with an aspect ratio of 1:1. The
function can plot just the input graph or the graph with a specified subgraph highlighted.
The function uses the standard colour theme of green nodes, grey edges, and red highlighted
subgraph.
**Example usage**:
>>> graph = nx.complete_graph(10)
>>> fig = plot(graph, [0, 1, 2, 3])
>>> fig.show()
Args:
graph (nx.Graph): input graph
subgraph (list): list of nodes comprising the subgraph to highlight
size (dict): size of the plot
Returns:
plot.ly graph object
"""
s = graph.subgraph(subgraph)
l = nx.kamada_kawai_layout(graph)
g_nodes = go.Scatter(
**_node_coords(graph, l),
mode='markers',
hoverinfo='text',
marker=dict(color=graph_node_colour, size=graph_node_size, line_width=2)
)
g_edges = go.Scatter(
**edge_coords(graph, l),
line=dict(width=1, color=graph_edge_colour),
hoverinfo='none',
mode='lines'
)
g_nodes.text = [str(i) for i in graph.nodes()]
layout = go.Layout(showlegend=False, hovermode='closest',
xaxis=dict(showgrid=False, zeroline=False, showticklabels=False),
yaxis=dict(showgrid=False, zeroline=False, showticklabels=False),
margin=dict(b=0, l=0, r=0, t=25),
height=size,
width=size,
plot_bgcolor='#ffffff'
)
if subgraph:
s_edges = go.Scatter(
**edge_coords(s, l),
line=dict(width=2, color=subgraph_edge_colour),
hoverinfo='none',
mode='lines'
)
s_nodes = go.Scatter(
**_node_coords(s, l),
mode='markers',
hoverinfo='text',
marker=dict(color=subgraph_node_colour, size=subgraph_node_size, line_width=2)
)
s_nodes.text = [str(i) for i in s.nodes()]
f = go.Figure(data=[g_edges, s_edges, g_nodes, s_nodes], layout=layout)
else:
f = go.Figure(data=[g_edges, g_nodes], layout=layout)
return f
|
14,574 |
def launcher():
"""Starts eye processes. Hosts the IPC Backbone and Logging functions.
Reacts to notifications:
``launcher_process.should_stop``: Stops the launcher process
``eye_process.should_start``: Starts the eye process
"""
# Reliable msg dispatch to the IPC via push bridge.
def pull_pub(ipc_pub_url, pull):
ctx = zmq.Context.instance()
pub = ctx.socket(zmq.PUB)
pub.connect(ipc_pub_url)
while True:
m = pull.recv_multipart()
pub.send_multipart(m)
# The delay proxy handles delayed notififications.
def delay_proxy(ipc_pub_url, ipc_sub_url):
ctx = zmq.Context.instance()
sub = zmq_tools.Msg_Receiver(ctx, ipc_sub_url, ("delayed_notify",))
pub = zmq_tools.Msg_Dispatcher(ctx, ipc_pub_url)
poller = zmq.Poller()
poller.register(sub.socket, zmq.POLLIN)
waiting_notifications = {}
TOPIC_CUTOFF = len("delayed_")
while True:
if poller.poll(timeout=250):
# Recv new delayed notification and store it.
topic, n = sub.recv()
n["__notify_time__"] = time() + n["delay"]
waiting_notifications[n["subject"]] = n
# When a notifications time has come, pop from dict and send it as notification
for s, n in list(waiting_notifications.items()):
if n["__notify_time__"] < time():
n["topic"] = n["topic"][TOPIC_CUTOFF:]
del n["__notify_time__"]
del n["delay"]
del waiting_notifications[s]
pub.notify(n)
# Recv log records from other processes.
def log_loop(ipc_sub_url, log_level_debug):
import logging
# Get the root logger
logger = logging.getLogger()
# set log level
logger.setLevel(logging.NOTSET)
# Stream to file
fh = logging.FileHandler(
os.path.join(user_dir, "{}.log".format(parsed_args.app)),
mode="w",
encoding="utf-8",
)
fh.setFormatter(
logging.Formatter(
"%(asctime)s - %(processName)s - [%(levelname)s] %(name)s: %(message)s"
)
)
logger.addHandler(fh)
# Stream to console.
ch = logging.StreamHandler()
ch.setFormatter(
logging.Formatter("%(processName)s - [%(levelname)s] %(name)s: %(message)s")
)
if log_level_debug:
ch.setLevel(logging.DEBUG)
else:
ch.setLevel(logging.INFO)
logger.addHandler(ch)
# IPC setup to receive log messages. Use zmq_tools.ZMQ_handler to send messages to here.
sub = zmq_tools.Msg_Receiver(zmq_ctx, ipc_sub_url, topics=("logging",))
while True:
topic, msg = sub.recv()
record = logging.makeLogRecord(msg)
logger.handle(record)
## IPC
timebase = Value(c_double, 0)
eye_procs_alive = Value(c_bool, 0), Value(c_bool, 0)
zmq_ctx = zmq.Context()
# Let the OS choose the IP and PORT
ipc_pub_url = "tcp://*:*"
ipc_sub_url = "tcp://*:*"
ipc_push_url = "tcp://*:*"
# Binding IPC Backbone Sockets to URLs.
# They are used in the threads started below.
# Using them in the main thread is not allowed.
xsub_socket = zmq_ctx.socket(zmq.XSUB)
xsub_socket.bind(ipc_pub_url)
ipc_pub_url = xsub_socket.last_endpoint.decode("utf8").replace(
"0.0.0.0", "127.0.0.1"
)
xpub_socket = zmq_ctx.socket(zmq.XPUB)
xpub_socket.bind(ipc_sub_url)
ipc_sub_url = xpub_socket.last_endpoint.decode("utf8").replace(
"0.0.0.0", "127.0.0.1"
)
pull_socket = zmq_ctx.socket(zmq.PULL)
pull_socket.bind(ipc_push_url)
ipc_push_url = pull_socket.last_endpoint.decode("utf8").replace(
"0.0.0.0", "127.0.0.1"
)
# Starting communication threads:
# A ZMQ Proxy Device serves as our IPC Backbone
ipc_backbone_thread = Thread(target=zmq.proxy, args=(xsub_socket, xpub_socket))
ipc_backbone_thread.setDaemon(True)
ipc_backbone_thread.start()
pull_pub = Thread(target=pull_pub, args=(ipc_pub_url, pull_socket))
pull_pub.setDaemon(True)
pull_pub.start()
log_thread = Thread(target=log_loop, args=(ipc_sub_url, parsed_args.debug))
log_thread.setDaemon(True)
log_thread.start()
delay_thread = Thread(target=delay_proxy, args=(ipc_push_url, ipc_sub_url))
delay_thread.setDaemon(True)
delay_thread.start()
del xsub_socket, xpub_socket, pull_socket
topics = (
"notify.eye_process.",
"notify.player_process.",
"notify.world_process.",
"notify.service_process",
"notify.clear_settings_process.",
"notify.player_drop_process.",
"notify.launcher_process.",
"notify.meta.should_doc",
"notify.circle_detector_process.should_start",
"notify.ipc_startup",
)
cmd_sub = zmq_tools.Msg_Receiver(zmq_ctx, ipc_sub_url, topics=topics)
cmd_push = zmq_tools.Msg_Dispatcher(zmq_ctx, ipc_push_url)
while True:
# Wait until subscriptions were successfull
cmd_push.notify({"subject": "ipc_startup"})
if cmd_sub.socket.poll(timeout=50):
cmd_sub.recv()
break
import logging
if unknown_args:
logging.warning(f"Unknown command-line arguments: {unknown_args}")
if parsed_args.app == "service":
cmd_push.notify({"subject": "service_process.should_start"})
elif parsed_args.app == "capture":
cmd_push.notify({"subject": "world_process.should_start"})
elif parsed_args.app == "player":
rec_dir = os.path.expanduser(parsed_args.recording)
cmd_push.notify(
{"subject": "player_drop_process.should_start", "rec_dir": rec_dir}
)
with Prevent_Idle_Sleep():
while True:
# listen for relevant messages.
if cmd_sub.socket.poll(timeout=1000):
topic, n = cmd_sub.recv()
if "notify.eye_process.should_start" in topic:
eye_id = n["eye_id"]
hwm = n["hwm"] if "hwm" in n else None
Process(
target=eye,
name="eye{}".format(eye_id),
args=(
timebase,
eye_procs_alive[eye_id],
ipc_pub_url,
ipc_sub_url,
ipc_push_url,
user_dir,
app_version,
eye_id,
n.get("overwrite_cap_settings"),
parsed_args.hide_ui,
hwm,
),
).start()
elif "notify.player_process.should_start" in topic:
Process(
target=player,
name="player",
args=(
n["rec_dir"],
ipc_pub_url,
ipc_sub_url,
ipc_push_url,
user_dir,
app_version,
),
).start()
elif "notify.world_process.should_start" in topic:
Process(
target=world,
name="world",
args=(
timebase,
eye_procs_alive,
ipc_pub_url,
ipc_sub_url,
ipc_push_url,
user_dir,
app_version,
parsed_args.port,
parsed_args.hide_ui,
),
).start()
elif "notify.clear_settings_process.should_start" in topic:
Process(
target=clear_settings, name="clear_settings", args=(user_dir,)
).start()
elif "notify.service_process.should_start" in topic:
Process(
target=service,
name="service",
args=(
timebase,
eye_procs_alive,
ipc_pub_url,
ipc_sub_url,
ipc_push_url,
user_dir,
app_version,
parsed_args.port,
parsed_args.hide_ui,
),
).start()
elif "notify.player_drop_process.should_start" in topic:
Process(
target=player_drop,
name="player",
args=(
n["rec_dir"],
ipc_pub_url,
ipc_sub_url,
ipc_push_url,
user_dir,
app_version,
),
).start()
elif "notify.circle_detector_process.should_start" in topic:
Process(
target=circle_detector,
name="circle_detector",
args=(ipc_push_url, n["pair_url"], n["source_path"]),
).start()
elif "notify.meta.should_doc" in topic:
cmd_push.notify(
{
"subject": "meta.doc",
"actor": "launcher",
"doc": launcher.__doc__,
}
)
else:
if not active_children():
break
for p in active_children():
p.join()
|
def launcher():
"""Starts eye processes. Hosts the IPC Backbone and Logging functions.
Reacts to notifications:
``launcher_process.should_stop``: Stops the launcher process
``eye_process.should_start``: Starts the eye process
"""
# Reliable msg dispatch to the IPC via push bridge.
def pull_pub(ipc_pub_url, pull):
ctx = zmq.Context.instance()
pub = ctx.socket(zmq.PUB)
pub.connect(ipc_pub_url)
while True:
m = pull.recv_multipart()
pub.send_multipart(m)
# The delay proxy handles delayed notififications.
def delay_proxy(ipc_pub_url, ipc_sub_url):
ctx = zmq.Context.instance()
sub = zmq_tools.Msg_Receiver(ctx, ipc_sub_url, ("delayed_notify",))
pub = zmq_tools.Msg_Dispatcher(ctx, ipc_pub_url)
poller = zmq.Poller()
poller.register(sub.socket, zmq.POLLIN)
waiting_notifications = {}
TOPIC_CUTOFF = len("delayed_")
while True:
if poller.poll(timeout=250):
# Recv new delayed notification and store it.
topic, n = sub.recv()
n["__notify_time__"] = time() + n["delay"]
waiting_notifications[n["subject"]] = n
# When a notifications time has come, pop from dict and send it as notification
for s, n in list(waiting_notifications.items()):
if n["__notify_time__"] < time():
n["topic"] = n["topic"][TOPIC_CUTOFF:]
del n["__notify_time__"]
del n["delay"]
del waiting_notifications[s]
pub.notify(n)
# Recv log records from other processes.
def log_loop(ipc_sub_url, log_level_debug):
import logging
# Get the root logger
logger = logging.getLogger()
# set log level
logger.setLevel(logging.NOTSET)
# Stream to file
fh = logging.FileHandler(
os.path.join(user_dir, "{}.log".format(parsed_args.app)),
mode="w",
encoding="utf-8",
)
fh.setFormatter(
logging.Formatter(
"%(asctime)s - %(processName)s - [%(levelname)s] %(name)s: %(message)s"
)
)
logger.addHandler(fh)
# Stream to console.
ch = logging.StreamHandler()
ch.setFormatter(
logging.Formatter("%(processName)s - [%(levelname)s] %(name)s: %(message)s")
)
if log_level_debug:
ch.setLevel(logging.DEBUG)
else:
ch.setLevel(logging.INFO)
logger.addHandler(ch)
# IPC setup to receive log messages. Use zmq_tools.ZMQ_handler to send messages to here.
sub = zmq_tools.Msg_Receiver(zmq_ctx, ipc_sub_url, topics=("logging",))
while True:
topic, msg = sub.recv()
record = logging.makeLogRecord(msg)
logger.handle(record)
## IPC
timebase = Value(c_double, 0)
eye_procs_alive = Value(c_bool, 0), Value(c_bool, 0)
zmq_ctx = zmq.Context()
# Let the OS choose the IP and PORT
ipc_pub_url = "tcp://*:*"
ipc_sub_url = "tcp://*:*"
ipc_push_url = "tcp://*:*"
# Binding IPC Backbone Sockets to URLs.
# They are used in the threads started below.
# Using them in the main thread is not allowed.
xsub_socket = zmq_ctx.socket(zmq.XSUB)
xsub_socket.bind(ipc_pub_url)
ipc_pub_url = xsub_socket.last_endpoint.decode("utf8").replace(
"0.0.0.0", "127.0.0.1"
)
xpub_socket = zmq_ctx.socket(zmq.XPUB)
xpub_socket.bind(ipc_sub_url)
ipc_sub_url = xpub_socket.last_endpoint.decode("utf8").replace(
"0.0.0.0", "127.0.0.1"
)
pull_socket = zmq_ctx.socket(zmq.PULL)
pull_socket.bind(ipc_push_url)
ipc_push_url = pull_socket.last_endpoint.decode("utf8").replace(
"0.0.0.0", "127.0.0.1"
)
# Starting communication threads:
# A ZMQ Proxy Device serves as our IPC Backbone
ipc_backbone_thread = Thread(target=zmq.proxy, args=(xsub_socket, xpub_socket))
ipc_backbone_thread.setDaemon(True)
ipc_backbone_thread.start()
pull_pub = Thread(target=pull_pub, args=(ipc_pub_url, pull_socket))
pull_pub.setDaemon(True)
pull_pub.start()
log_thread = Thread(target=log_loop, args=(ipc_sub_url, parsed_args.debug))
log_thread.setDaemon(True)
log_thread.start()
delay_thread = Thread(target=delay_proxy, args=(ipc_push_url, ipc_sub_url))
delay_thread.setDaemon(True)
delay_thread.start()
del xsub_socket, xpub_socket, pull_socket
topics = (
"notify.eye_process.",
"notify.player_process.",
"notify.world_process.",
"notify.service_process",
"notify.clear_settings_process.",
"notify.player_drop_process.",
"notify.launcher_process.",
"notify.meta.should_doc",
"notify.circle_detector_process.should_start",
"notify.ipc_startup",
)
cmd_sub = zmq_tools.Msg_Receiver(zmq_ctx, ipc_sub_url, topics=topics)
cmd_push = zmq_tools.Msg_Dispatcher(zmq_ctx, ipc_push_url)
while True:
# Wait until subscriptions were successfull
cmd_push.notify({"subject": "ipc_startup"})
if cmd_sub.socket.poll(timeout=50):
cmd_sub.recv()
break
import logging
if unknown_args:
logging.warning(f"Unknown command-line arguments: {unknown_args}")
if parsed_args.app == "service":
cmd_push.notify({"subject": "service_process.should_start"})
elif parsed_args.app == "capture":
cmd_push.notify({"subject": "world_process.should_start"})
elif parsed_args.app == "player":
rec_dir = os.path.expanduser(parsed_args.recording)
cmd_push.notify(
{"subject": "player_drop_process.should_start", "rec_dir": rec_dir}
)
with Prevent_Idle_Sleep():
while True:
# listen for relevant messages.
if cmd_sub.socket.poll(timeout=1000):
topic, n = cmd_sub.recv()
if "notify.eye_process.should_start" in topic:
eye_id = n["eye_id"]
hwm = n["hwm"] if "hwm" in n else None
Process(
target=eye,
name="eye{}".format(eye_id),
args=(
timebase,
eye_procs_alive[eye_id],
ipc_pub_url,
ipc_sub_url,
ipc_push_url,
user_dir,
app_version,
eye_id,
n.get("overwrite_cap_settings"),
parsed_args.hide_ui,
pub_socket_hwm,
),
).start()
elif "notify.player_process.should_start" in topic:
Process(
target=player,
name="player",
args=(
n["rec_dir"],
ipc_pub_url,
ipc_sub_url,
ipc_push_url,
user_dir,
app_version,
),
).start()
elif "notify.world_process.should_start" in topic:
Process(
target=world,
name="world",
args=(
timebase,
eye_procs_alive,
ipc_pub_url,
ipc_sub_url,
ipc_push_url,
user_dir,
app_version,
parsed_args.port,
parsed_args.hide_ui,
),
).start()
elif "notify.clear_settings_process.should_start" in topic:
Process(
target=clear_settings, name="clear_settings", args=(user_dir,)
).start()
elif "notify.service_process.should_start" in topic:
Process(
target=service,
name="service",
args=(
timebase,
eye_procs_alive,
ipc_pub_url,
ipc_sub_url,
ipc_push_url,
user_dir,
app_version,
parsed_args.port,
parsed_args.hide_ui,
),
).start()
elif "notify.player_drop_process.should_start" in topic:
Process(
target=player_drop,
name="player",
args=(
n["rec_dir"],
ipc_pub_url,
ipc_sub_url,
ipc_push_url,
user_dir,
app_version,
),
).start()
elif "notify.circle_detector_process.should_start" in topic:
Process(
target=circle_detector,
name="circle_detector",
args=(ipc_push_url, n["pair_url"], n["source_path"]),
).start()
elif "notify.meta.should_doc" in topic:
cmd_push.notify(
{
"subject": "meta.doc",
"actor": "launcher",
"doc": launcher.__doc__,
}
)
else:
if not active_children():
break
for p in active_children():
p.join()
|
59,615 |
def _is_valid_resolution(resolution):
"""
Check if a resolution is valid for the global Earth relief grid.
Parameters
----------
resolution : str
Same as the input for load_earth_relief
Raises
------
GMTInvalidInput
If given resolution is not valid.
Examples
--------
>>> _is_valid_resolution("01d")
>>> _is_valid_resolution("60m")
>>> _is_valid_resolution("5m")
Traceback (most recent call last):
...
pygmt.exceptions.GMTInvalidInput: Invalid Earth relief resolution '5m'.
>>> _is_valid_resolution("15s")
>>> _is_valid_resolution("01s")
Traceback (most recent call last):
...
pygmt.exceptions.GMTInvalidInput: Invalid Earth relief resolution '01s'.
"""
valid_resolutions = ["01d"]
valid_resolutions.extend(
["{:02d}m".format(res) for res in [60, 30, 20, 15, 10, 6, 5, 4, 3, 2, 1]]
)
valid_resolutions.extend(["{:02d}s".format(res) for res in [30, 15]])
if resolution not in valid_resolutions:
raise GMTInvalidInput(
"Invalid Earth relief resolution '{}'.".format(resolution)
)
|
def _is_valid_resolution(resolution):
"""
Check if a resolution is valid for the global Earth relief grid.
Parameters
----------
resolution : str
Same as the input for load_earth_relief
Raises
------
GMTInvalidInput
If given resolution is not valid.
Examples
--------
>>> _is_valid_resolution("01d")
>>> _is_valid_resolution("5m")
Traceback (most recent call last):
...
pygmt.exceptions.GMTInvalidInput: Invalid Earth relief resolution '5m'.
>>> _is_valid_resolution("15s")
>>> _is_valid_resolution("01s")
Traceback (most recent call last):
...
pygmt.exceptions.GMTInvalidInput: Invalid Earth relief resolution '01s'.
"""
valid_resolutions = ["01d"]
valid_resolutions.extend(
["{:02d}m".format(res) for res in [60, 30, 20, 15, 10, 6, 5, 4, 3, 2, 1]]
)
valid_resolutions.extend(["{:02d}s".format(res) for res in [30, 15]])
if resolution not in valid_resolutions:
raise GMTInvalidInput(
"Invalid Earth relief resolution '{}'.".format(resolution)
)
|
44,082 |
def gaussian_moment(la, lb, ra, rb, alpha, beta, e, rc):
r"""Compute one-dimensional multipole moment integral for two primitive Gaussian functions.
The multipole moment integral in one dimension is defined as
.. math::
S_{ij}^e = \left \langle G_i | q_C^e | G_j \right \rangle,
where :math:`G` is a Gaussian function at dimension :math:`q = x, y, z` of the Cartesian
coordinates system, :math:`e` is the multipole moment order and :math:`C` is the origin of the
Cartesian coordinates. The integrals can be evauated as
[`Helgaker (1995) p803 <https://www.worldscientific.com/doi/abs/10.1142/9789812832115_0001>`_]
.. math::
S_{ij}^e = \sum_{t=0}^{\mathrm{min}(i+j, \ e)} E_t^{ij} M_t^e,
where :math:`E` and :math:`M` are the Hermite Gaussian expansion coefficient and the Hermite
moment integral, respectively, that can be computed recursively.
Args:
la (integer): angular momentum for the first Gaussian function
lb (integer): angular momentum for the second Gaussian function
ra (float): position of the first Gaussian function
rb (float): position of the second Gaussian function
alpha (array[float]): exponent of the first Gaussian function
beta (array[float]): exponent of the second Gaussian function
e (integer): order of the multipole moment
rc (array[float]): distance between the center of the Hermite Gaussian and the origin
Returns:
array[float]: one-dimensional multipole moment integral between primitive Gaussian functions
**Example**
>>> la, lb = 0, 0
>>> ra, rb = np.array([2.0]), np.array([2.0])
>>> alpha = np.array([3.42525091])
>>> beta = np.array([3.42525091])
>>> e = 1
>>> rc = 1.5
>>> gaussian_moment(la, lb, ra, rb, alpha, beta, e, rc)
array([1.0157925])
"""
s = 0.0
for t in range(min(la + lb + 1, e + 1)):
s = s + expansion(la, lb, ra, rb, alpha, beta, t) * _hermite_moment(alpha, beta, t, e, rc)
return s
|
def gaussian_moment(la, lb, ra, rb, alpha, beta, e, rc):
r"""Compute one-dimensional multipole moment integral for two primitive Gaussian functions.
The multipole moment integral in one dimension is defined as
.. math::
S_{ij}^e = \left \langle G_i | q_C^e | G_j \right \rangle,
where :math:`G` is a Gaussian function at dimension :math:`q = x, y, z` of the Cartesian
coordinates system, :math:`e` is the multipole moment order and :math:`C` is the origin of the
Cartesian coordinates. The integrals can be evaluated as
[`Helgaker (1995) p803 <https://www.worldscientific.com/doi/abs/10.1142/9789812832115_0001>`_]
.. math::
S_{ij}^e = \sum_{t=0}^{\mathrm{min}(i+j, \ e)} E_t^{ij} M_t^e,
where :math:`E` and :math:`M` are the Hermite Gaussian expansion coefficient and the Hermite
moment integral, respectively, that can be computed recursively.
Args:
la (integer): angular momentum for the first Gaussian function
lb (integer): angular momentum for the second Gaussian function
ra (float): position of the first Gaussian function
rb (float): position of the second Gaussian function
alpha (array[float]): exponent of the first Gaussian function
beta (array[float]): exponent of the second Gaussian function
e (integer): order of the multipole moment
rc (array[float]): distance between the center of the Hermite Gaussian and the origin
Returns:
array[float]: one-dimensional multipole moment integral between primitive Gaussian functions
**Example**
>>> la, lb = 0, 0
>>> ra, rb = np.array([2.0]), np.array([2.0])
>>> alpha = np.array([3.42525091])
>>> beta = np.array([3.42525091])
>>> e = 1
>>> rc = 1.5
>>> gaussian_moment(la, lb, ra, rb, alpha, beta, e, rc)
array([1.0157925])
"""
s = 0.0
for t in range(min(la + lb + 1, e + 1)):
s = s + expansion(la, lb, ra, rb, alpha, beta, t) * _hermite_moment(alpha, beta, t, e, rc)
return s
|
56,388 |
def generate_matrix(shape, dtype=float, **kwargs):
r"""Generates a random matrix with given singular values.
This function generates a random NumPy matrix (or a set of matrices) that
has specified singular values. It can be used to generate the inputs for a
test that can be instable when the input value behaves bad.
Notation: denote the shape of the generated array by :math:`(B..., M, N)`,
and :math:`K = min\{M, N\}`. :math:`B...` may be an empty sequence.
Args:
shape (tuple of int): Shape of the generated array, i.e.,
:math:`(B..., M, N)`.
dtype: Dtype of the generated array.
singular_values (array-like): Singular values of the generated
matrices. It must be broadcastable to shape :math:`(B..., K)`.
"""
singular_values, = argument.parse_kwargs(
kwargs, ('singular_values', None),
)
if len(shape) <= 1:
raise ValueError(
'shpae {} is invalid for matrices: too few axes'.format(shape)
)
k_shape = shape[:-2] + (min(shape[-2:]),)
# TODO(beam2d): consider supporting integer/boolean matrices
dtype = numpy.dtype(dtype)
if dtype.kind not in 'fc':
raise ValueError('dtype {} is not supported'.format(dtype))
if singular_values is None:
raise TypeError('singular_values is not given')
singular_values = numpy.asarray(singular_values)
if (singular_values < 0).any():
raise ValueError('negative singular value is given')
# Generate random matrices with given singular values. We simply generate
# orthogonal vectors using SVD on random matrices and then combine them
# with the given singular values.
a = numpy.random.randn(*shape)
if dtype.kind == 'c':
a = a + 1j * numpy.random.randn(*shape)
u, _, vh = numpy.linalg.svd(a, full_matrices=False)
a = numpy.einsum('...ik,...k,...kj->...ij', u, singular_values, vh)
return a.astype(dtype)
|
def generate_matrix(shape, dtype=float, **kwargs):
r"""generate_matrix(shape, dtype=float, *, singular_values)
Generates a random matrix with given singular values.
This function generates a random NumPy matrix (or a set of matrices) that
has specified singular values. It can be used to generate the inputs for a
test that can be instable when the input value behaves bad.
Notation: denote the shape of the generated array by :math:`(B..., M, N)`,
and :math:`K = min\{M, N\}`. :math:`B...` may be an empty sequence.
Args:
shape (tuple of int): Shape of the generated array, i.e.,
:math:`(B..., M, N)`.
dtype: Dtype of the generated array.
singular_values (array-like): Singular values of the generated
matrices. It must be broadcastable to shape :math:`(B..., K)`.
"""
singular_values, = argument.parse_kwargs(
kwargs, ('singular_values', None),
)
if len(shape) <= 1:
raise ValueError(
'shpae {} is invalid for matrices: too few axes'.format(shape)
)
k_shape = shape[:-2] + (min(shape[-2:]),)
# TODO(beam2d): consider supporting integer/boolean matrices
dtype = numpy.dtype(dtype)
if dtype.kind not in 'fc':
raise ValueError('dtype {} is not supported'.format(dtype))
if singular_values is None:
raise TypeError('singular_values is not given')
singular_values = numpy.asarray(singular_values)
if (singular_values < 0).any():
raise ValueError('negative singular value is given')
# Generate random matrices with given singular values. We simply generate
# orthogonal vectors using SVD on random matrices and then combine them
# with the given singular values.
a = numpy.random.randn(*shape)
if dtype.kind == 'c':
a = a + 1j * numpy.random.randn(*shape)
u, _, vh = numpy.linalg.svd(a, full_matrices=False)
a = numpy.einsum('...ik,...k,...kj->...ij', u, singular_values, vh)
return a.astype(dtype)
|
24,242 |
def has_legacy_signature(check):
for path, _, files in os.walk(get_check_directory(check)):
for fn in files:
if fn.endswith('.py'):
with open(os.path.join(path, fn)) as test_file:
for line in test_file:
if "init" in line and "agentConfig" in line:
return True
return False
|
def has_legacy_signature(check):
for path, _, files in os.walk(get_check_directory(check)):
for fn in files:
if fn.endswith('.py'):
with open(os.path.join(path, fn)) as test_file:
for line in test_file:
if "__init__" in line and "agentConfig" in line:
return True
return False
|
33,018 |
def parse_poi_query(north, south, east, west, tags=None, timeout=180, maxsize=''):
"""
Construct an Overpass QL query to load POIs with certain tags.
By default, queries all features with an amenity tag.
Parameters
----------
north : float
Northernmost coordinate from bounding box of the search area.
south : float
Southernmost coordinate from bounding box of the search area.
east : float
Easternmost coordinate from bounding box of the search area.
west : float
Westernmost coordinate of the bounding box of the search area.
tags : dict
Dictionary of tag keys and values that will be used for finding POIs in the selected area.
Keys may be strings or lists of strings.
Values make be string, lists of strings, or None, if all values should be returned for a given key.
By default, all POIs with an 'amenity' key of any value will be be returned.
timeout : int
Timeout for the API request.
"""
# build default tags
if not tags:
tags = {'amenity':True}
# define templates for objects and extents
object_template = '({object_type}[{{keys}}{{op}}"{{values}}"]{{extent}});'
# object_template = '({object_type}[~"^({{keys}})$"{{op}}"{{values}}"]{{extent}});'
re_keys_template = '~"^({keys})$"'
single_key_template = '"{key}"'
extent_template = '({south:.6f},{west:.6f},{north:.6f},{east:.6f});(._;>;);'
extent = extent_template.format(south=south, west=west, north=north, east=east)
# initate query string
query_template = "[out:json][timeout:{timeout}]{maxsize};("
query_str = query_template.format(timeout=timeout, maxsize=maxsize)
# add statements for each object type
# templates = [object_template.format(object_type=x) for x in ['node','way','relation']]
templates = [object_template.format(object_type=x) for x in ['nwr']]
for template in templates:
# add statements for each key
for keys, values in tags.items():
# ensure keys is a list
keys = [keys] if not isinstance(keys, list) else keys
if values == True:
# get features with any value for these keys
# add positive statement with multiple keys and no specific values
query_str += template.format(keys=re_keys_template.format(keys='|'.join(keys)), values='.*', extent=extent, op='~')
elif values == False:
# get features wihout these keys, not matter their values
for key in keys:
# add negative statement with multiple keys and no specific values
# can only be added one at a time withough key regex
query_str += template.format(keys=single_key_template.format(key=key), values='.*', extent=extent, op='!~')
else:
# get features with specified values for these keys
# ensure values is a list
values = [values] if not isinstance(values, list) else values
# add positive statement with multiple keys in specific values
query_str += template.format(keys='{}'.format('|'.join(keys)), values='|'.join(values), extent=extent, op='~')
# terminate query string
query_str += ");out;"
return query_str
|
def parse_poi_query(north, south, east, west, tags=None, timeout=180, maxsize=''):
"""
Construct an Overpass QL query to load POIs with certain tags.
By default, queries all features with an amenity tag.
Parameters
----------
north : float
Northernmost coordinate from bounding box of the search area.
south : float
Southernmost coordinate from bounding box of the search area.
east : float
Easternmost coordinate from bounding box of the search area.
west : float
Westernmost coordinate of the bounding box of the search area.
tags : dict
Dictionary of tag keys and values that will be used for finding POIs in the selected area.
Keys may be strings or lists of strings.
Values may be string, lists of strings, or None, if all values should be returned for a given key.
By default, all POIs with an 'amenity' key of any value will be be returned.
timeout : int
Timeout for the API request.
"""
# build default tags
if not tags:
tags = {'amenity':True}
# define templates for objects and extents
object_template = '({object_type}[{{keys}}{{op}}"{{values}}"]{{extent}});'
# object_template = '({object_type}[~"^({{keys}})$"{{op}}"{{values}}"]{{extent}});'
re_keys_template = '~"^({keys})$"'
single_key_template = '"{key}"'
extent_template = '({south:.6f},{west:.6f},{north:.6f},{east:.6f});(._;>;);'
extent = extent_template.format(south=south, west=west, north=north, east=east)
# initate query string
query_template = "[out:json][timeout:{timeout}]{maxsize};("
query_str = query_template.format(timeout=timeout, maxsize=maxsize)
# add statements for each object type
# templates = [object_template.format(object_type=x) for x in ['node','way','relation']]
templates = [object_template.format(object_type=x) for x in ['nwr']]
for template in templates:
# add statements for each key
for keys, values in tags.items():
# ensure keys is a list
keys = [keys] if not isinstance(keys, list) else keys
if values == True:
# get features with any value for these keys
# add positive statement with multiple keys and no specific values
query_str += template.format(keys=re_keys_template.format(keys='|'.join(keys)), values='.*', extent=extent, op='~')
elif values == False:
# get features wihout these keys, not matter their values
for key in keys:
# add negative statement with multiple keys and no specific values
# can only be added one at a time withough key regex
query_str += template.format(keys=single_key_template.format(key=key), values='.*', extent=extent, op='!~')
else:
# get features with specified values for these keys
# ensure values is a list
values = [values] if not isinstance(values, list) else values
# add positive statement with multiple keys in specific values
query_str += template.format(keys='{}'.format('|'.join(keys)), values='|'.join(values), extent=extent, op='~')
# terminate query string
query_str += ");out;"
return query_str
|
31,208 |
def main():
"""
PARSE AND VALIDATE INTEGRATION PARAMS
"""
token = demisto.params().get('token')
# get the service API url
base_url = urljoin(demisto.params()['url'], '/api/rest')
verify_certificate = not demisto.params().get('insecure', False)
# How much time before the first fetch to retrieve incidents
first_fetch_time = demisto.params().get('fetch_time', '3 days').strip()
proxy = demisto.params().get('proxy', False)
headers = {
"Authorization": token
}
LOG(f'Command being called is {demisto.command()}')
try:
client = Client(
base_url=base_url,
verify=verify_certificate,
proxy=proxy,
headers=headers
)
args = demisto.args()
if demisto.command() == 'test-module':
# This is the call made when pressing the integration Test button.
result = test_module(client)
demisto.results(result)
elif demisto.command() == 'fetch-incidents':
# Set and define the fetch incidents command to run after activated via integration settings.
next_run, incidents = fetch_incidents(
client=client,
last_run=demisto.getLastRun(),
first_fetch_time=first_fetch_time)
demisto.setLastRun(next_run)
demisto.incidents(incidents)
elif demisto.command() == 'mantis-get-issue-by-id':
mantis_get_issue_by_id_command(client, args)
elif demisto.command() == 'mantis-get-issues':
mantis_get_all_issues_command(client, args)
elif demisto.command() == 'mantis-create-issue':
mantis_create_issue_command(client, args)
elif demisto.command() == 'mantis-add-note':
matis_create_note_command(client, args)
elif demisto.command() == 'mantis-close-issue':
mantis_close_issue_command(client, args)
# Log exceptions
except Exception as e:
return_error(f'Failed to execute {demisto.command()} command. Error: {str(e)}')
|
def main():
"""
PARSE AND VALIDATE INTEGRATION PARAMS
"""
token = demisto.params().get('token')
# get the service API url
base_url = urljoin(demisto.params()['url'], '/api/rest')
verify_certificate = not demisto.params().get('insecure', False)
# How much time before the first fetch to retrieve incidents
first_fetch_time = demisto.params().get('fetch_time', '3 days').strip()
proxy = demisto.params().get('proxy', False)
headers = {
"Authorization": token
}
LOG(f'Command being called is {demisto.command()}')
try:
client = Client(
base_url=base_url,
verify=verify_certificate,
proxy=proxy,
headers=headers
)
args = demisto.args()
if demisto.command() == 'test-module':
# This is the call made when pressing the integration Test button.
result = test_module(client)
demisto.results(result)
elif demisto.command() == 'fetch-incidents':
# Set and define the fetch incidents command to run after activated via integration settings.
next_run, incidents = fetch_incidents(
client=client,
last_run=demisto.getLastRun(),
first_fetch_time=first_fetch_time)
demisto.setLastRun(next_run)
demisto.incidents(incidents)
elif demisto.command() == 'mantis-get-issue-by-id':
mantis_get_issue_by_id_command(client, args)
elif demisto.command() == 'mantis-get-issues':
return_results(mantis_get_all_issues_command(client, args))
elif demisto.command() == 'mantis-create-issue':
mantis_create_issue_command(client, args)
elif demisto.command() == 'mantis-add-note':
matis_create_note_command(client, args)
elif demisto.command() == 'mantis-close-issue':
mantis_close_issue_command(client, args)
# Log exceptions
except Exception as e:
return_error(f'Failed to execute {demisto.command()} command. Error: {str(e)}')
|
38,565 |
def compute_normal(
pts: np.ndarray[Any, np.dtype[np.float64]], tol: float = 1e-5
) -> np.ndarray:
"""Compute the normal of a set of points. The sign of the normal is arbitrary
The algorithm assume that the points lie on a plane.
Three non-aligned points are required.
Parameters:
pts: np.ndarray, 3xn, the points. Need n > 2.
tol: Absolute tolerance used to detect essentially collinear points.
Returns:
normal: np.array, 1x3, the normal.
"""
if pts.shape[1] <= 2:
raise ValueError("in compute_normal: pts.shape[1] must be larger than 2")
# Center of the point cloud, and vectors from the center to all points
center = pts.mean(axis=1).reshape((-1, 1))
v = pts - center
# To do the cross product, we need two vectors in the plane of the point cloud.
# In an attempt at minimizing the vulnerabilities with respect to rounding errors,
# the vectors should be carefully chosen.
# As the first vector, chose the longest one.
# Norm of all vectors
nrm = np.linalg.norm(v, axis=0)
# Index of the longest vector (will be needed below)
v1_ind = np.argmax(nrm)
v1 = v[:, v1_ind]
# Next, compute the cross product between the longest vector and all vectors in
# the plane
cross = np.array(
[
v1[1] * v[2] - v1[2] * v[1],
v1[2] * v[0] - v1[0] * v[2],
v1[0] * v[1] - v1[1] * v[0],
]
)
# Find the index of the longest cross product, and thereby of the vector in v that
# produced the longest vector.
cross_ind = np.argmax(np.linalg.norm(cross, axis=0))
# Pick out the normal vector, using the longest normal
normal = cross[:, cross_ind]
# Check on the computation, if the cross product is essentially zero, the points
# are collinear, and the computation is not to be trusted.
# Need to use absolute tolerance when invoking np.allclose, since relative tolerance
# makes no sense when comparing with a zero vector - see numpy.allclose
# documentation for details.
# Still, the tolerance should be scaled with the size of v1 and v[:, cross_ind]
# (the cross product axb = |a||b|sin(theta) - to detect a small theta, we need to
# scale the cross product by the lengths of a and b).
nrm_scaling = nrm[v1_ind] * nrm[cross_ind]
if np.allclose(normal, np.zeros(3), atol=tol * nrm_scaling):
raise RuntimeError(
"Unable to calculate normal from point set. Are all points collinear?"
)
return normal / np.linalg.norm(normal)
|
def compute_normal(
pts: np.ndarray[Any, np.dtype[np.float64]], tol: float = 1e-5
) -> np.ndarray:
"""Compute the normal of a set of points. The sign of the normal is arbitrary
The algorithm assume that the points lie on a plane.
Three non-aligned points are required.
Parameters:
pts: np.ndarray, 3xn, the points. Need n > 2.
tol: Absolute tolerance used to detect essentially collinear points.
Returns:
normal: np.array, 1x3, the normal.
"""
if pts.shape[1] <= 2:
raise ValueError("in compute_normal: pts.shape[1] must be larger than 2")
# Center of the point cloud, and vectors from the center to all points
center = pts.mean(axis=1).reshape((-1, 1))
v = pts - center
# To do the cross product, we need two vectors in the plane of the point cloud.
# In an attempt at minimizing the vulnerabilities with respect to rounding errors,
# the vectors should be carefully chosen.
# As the first vector, choose the longest one.
# Norm of all vectors
nrm = np.linalg.norm(v, axis=0)
# Index of the longest vector (will be needed below)
v1_ind = np.argmax(nrm)
v1 = v[:, v1_ind]
# Next, compute the cross product between the longest vector and all vectors in
# the plane
cross = np.array(
[
v1[1] * v[2] - v1[2] * v[1],
v1[2] * v[0] - v1[0] * v[2],
v1[0] * v[1] - v1[1] * v[0],
]
)
# Find the index of the longest cross product, and thereby of the vector in v that
# produced the longest vector.
cross_ind = np.argmax(np.linalg.norm(cross, axis=0))
# Pick out the normal vector, using the longest normal
normal = cross[:, cross_ind]
# Check on the computation, if the cross product is essentially zero, the points
# are collinear, and the computation is not to be trusted.
# Need to use absolute tolerance when invoking np.allclose, since relative tolerance
# makes no sense when comparing with a zero vector - see numpy.allclose
# documentation for details.
# Still, the tolerance should be scaled with the size of v1 and v[:, cross_ind]
# (the cross product axb = |a||b|sin(theta) - to detect a small theta, we need to
# scale the cross product by the lengths of a and b).
nrm_scaling = nrm[v1_ind] * nrm[cross_ind]
if np.allclose(normal, np.zeros(3), atol=tol * nrm_scaling):
raise RuntimeError(
"Unable to calculate normal from point set. Are all points collinear?"
)
return normal / np.linalg.norm(normal)
|
10,569 |
def test_ansible_git_version(capsys, mocker):
mocker.patch("ansible.cli.arguments.option_helpers._git_repo_info",
return_value="ansible 2.11.0.dev0 (devel 8a202cae3e) last updated 2021/01/11 10:24:38 (GMT +200)")
adhoc_cli = AdHocCLI(args=['/bin/ansible', '--version'])
with pytest.raises(SystemExit):
adhoc_cli.run()
version = capsys.readouterr()
try:
version_lines = version.out.splitlines()
except AttributeError:
# Python 2.6 does return a named tuple, so get the first item
version_lines = version[0].splitlines()
assert len(version_lines) == 9, 'Incorrect number of lines in "ansible --version" output'
assert re.match('ansible [0-9.a-z]+ .*$', version_lines[0]), 'Incorrect ansible version line in "ansible --version" output'
assert re.match(' config file = .*$', version_lines[1]), 'Incorrect config file line in "ansible --version" output'
assert re.match(' configured module search path = .*$', version_lines[2]), 'Incorrect module search path in "ansible --version" output'
assert re.match(' ansible python module location = .*$', version_lines[3]), 'Incorrect python module location in "ansible --version" output'
assert re.match(' ansible collection location = .*$', version_lines[4]), 'Incorrect collection location in "ansible --version" output'
assert re.match(' executable location = .*$', version_lines[5]), 'Incorrect executable locaction in "ansible --version" output'
assert re.match(' python version = .*$', version_lines[6]), 'Incorrect python version in "ansible --version" output'
assert re.match(' jinja version = .*$', version_lines[7]), 'Incorrect jinja version in "ansible --version" output'
assert re.match(' libyaml = .*$', version_lines[8]), 'Missing libyaml in "ansible --version" output'
|
def test_ansible_git_version(capsys, mocker):
mocker.patch("ansible.cli.arguments.option_helpers._git_repo_info",
return_value="ansible 2.11.0.dev0 (devel 8a202cae3e) last updated 2021/01/11 10:24:38 (GMT +200)")
adhoc_cli = AdHocCLI(args=['/bin/ansible', '--version'])
with pytest.raises(SystemExit):
adhoc_cli.run()
version = capsys.readouterr()
try:
version_lines = version.out.splitlines()
except AttributeError:
# Python 2.6 does return a named tuple, so get the first item
version_lines = version[0].splitlines()
assert len(version_lines) == 9, 'Incorrect number of lines in "ansible --version" output'
assert re.match(r'ansible [0-9.a-z]+ .*$', version_lines[0]), 'Incorrect ansible version line in "ansible --version" output'
assert re.match(' config file = .*$', version_lines[1]), 'Incorrect config file line in "ansible --version" output'
assert re.match(' configured module search path = .*$', version_lines[2]), 'Incorrect module search path in "ansible --version" output'
assert re.match(' ansible python module location = .*$', version_lines[3]), 'Incorrect python module location in "ansible --version" output'
assert re.match(' ansible collection location = .*$', version_lines[4]), 'Incorrect collection location in "ansible --version" output'
assert re.match(' executable location = .*$', version_lines[5]), 'Incorrect executable locaction in "ansible --version" output'
assert re.match(' python version = .*$', version_lines[6]), 'Incorrect python version in "ansible --version" output'
assert re.match(' jinja version = .*$', version_lines[7]), 'Incorrect jinja version in "ansible --version" output'
assert re.match(' libyaml = .*$', version_lines[8]), 'Missing libyaml in "ansible --version" output'
|
24,470 |
def initialize_instance(values, **kwargs):
for key, value in values.items():
if value is None or value == '':
if key in ['channel', 'queue_manager']: # These are required options
raise ValueError("'{}' cannot be empty.".format(key))
raise ValueError(
"'{}' cannot be empty. If you don't want to provide a value you can comment this option".format(key)
)
return values
|
def initialize_instance(values, **kwargs):
for key, value in values.items():
if value is None or value == '':
if key in ['channel', 'queue_manager']: # These are required options
raise ValueError("'{}' cannot be empty.".format(key))
raise ValueError(
"'{}' cannot be empty. If you don't want to provide a value you can comment out this option".format(key)
)
return values
|
14,708 |
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the NetAtmo Thermostat."""
import pyatmo
homes_conf = config.get(CONF_HOMES)
conf = hass.data.get(DATA_NETATMO_CONFIG, {})
try:
home_data = HomeData(conf)
except pyatmo.NoDevice:
return
homes = []
rooms = {}
if homes_conf is not None:
for home_conf in homes_conf:
home = home_conf[CONF_NAME]
if home_conf[CONF_ROOMS] != []:
rooms[home] = home_conf[CONF_ROOMS]
homes.append(home)
else:
homes = home_data.get_home_names()
devices = []
for home in homes:
_LOGGER.debug("Setting up %s ...", home)
try:
room_data = ThermostatData(conf, home)
except pyatmo.NoDevice:
continue
for room_id in room_data.get_room_ids():
room_name = room_data.homedata.rooms[home][room_id]['name']
_LOGGER.debug("Setting up %s (%s) ...", room_name, room_id)
if home in rooms and room_name not in rooms[home]:
_LOGGER.debug("Excluding %s ...", room_name)
continue
_LOGGER.debug("Adding devices for room %s (%s) ...",
room_name, room_id)
devices.append(NetatmoThermostat(room_data, room_id))
add_entities(devices, True)
|
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the NetAtmo Thermostat."""
import pyatmo
homes_conf = config.get(CONF_HOMES)
conf = hass.data.get(DATA_NETATMO_CONFIG, {})
try:
home_data = HomeData(auth)
except pyatmo.NoDevice:
return
homes = []
rooms = {}
if homes_conf is not None:
for home_conf in homes_conf:
home = home_conf[CONF_NAME]
if home_conf[CONF_ROOMS] != []:
rooms[home] = home_conf[CONF_ROOMS]
homes.append(home)
else:
homes = home_data.get_home_names()
devices = []
for home in homes:
_LOGGER.debug("Setting up %s ...", home)
try:
room_data = ThermostatData(conf, home)
except pyatmo.NoDevice:
continue
for room_id in room_data.get_room_ids():
room_name = room_data.homedata.rooms[home][room_id]['name']
_LOGGER.debug("Setting up %s (%s) ...", room_name, room_id)
if home in rooms and room_name not in rooms[home]:
_LOGGER.debug("Excluding %s ...", room_name)
continue
_LOGGER.debug("Adding devices for room %s (%s) ...",
room_name, room_id)
devices.append(NetatmoThermostat(room_data, room_id))
add_entities(devices, True)
|
7,168 |
def _get_peak_mask(image, min_distance, footprint, threshold_abs,
threshold_rel):
"""
Return the mask containing all peak candidates above thresholds
"""
if footprint is not None:
image_max = ndi.maximum_filter(image, footprint=footprint,
mode='constant')
else:
size = 2 * min_distance + 1
image_max = ndi.maximum_filter(image, size=size, mode='constant')
mask = image == image_max
if threshold_rel is not None:
threshold = max(threshold_abs, threshold_rel * image.max())
else:
threshold = threshold_abs
mask &= image > threshold
return mask
|
def _get_peak_mask(image, min_distance, footprint, threshold_abs,
threshold_rel):
"""
Return the mask containing all peak candidates above thresholds.
"""
if footprint is not None:
image_max = ndi.maximum_filter(image, footprint=footprint,
mode='constant')
else:
size = 2 * min_distance + 1
image_max = ndi.maximum_filter(image, size=size, mode='constant')
mask = image == image_max
if threshold_rel is not None:
threshold = max(threshold_abs, threshold_rel * image.max())
else:
threshold = threshold_abs
mask &= image > threshold
return mask
|
54,382 |
def handle_command(line, args, dryrun=False):
"""Handle a compilation command
Parameters
----------
line : iterable
an iterable with the compilation arguments
args : {object, namedtuple}
an container with additional compilation options,
in particular containing ``args.cflags`` and ``args.ldflags``
dryrun : bool, default=False
if True do not run the resulting command, only return it
Examples
--------
>>> from collections import namedtuple
>>> Args = namedtuple('args', ['cflags', 'ldflags', 'host'])
>>> args = Args(cflags='', ldflags='', host='')
>>> handle_command(['gcc', 'test.c'], args, dryrun=True)
emcc test.c
['emcc', 'test.c']
"""
# This is a special case to skip the compilation tests in numpy that aren't
# actually part of the build
for arg in line:
if r"/file.c" in arg or "_configtest" in arg:
return
if re.match(r"/tmp/.*/source\.[bco]+", arg):
return
if arg == "-print-multiarch":
return
if arg.startswith("/tmp"):
return
if line[0] == "gfortran":
result = f2c(line)
if result is None:
return
line = result
new_args = ["emcc"]
elif line[0] == "ar":
new_args = ["emar"]
elif line[0] == "c++":
new_args = ["em++"]
else:
new_args = ["emcc"]
# distutils doesn't use the c++ compiler when compiling c++ <sigh>
if any(arg.endswith(".cpp") for arg in line):
new_args = ["em++"]
library_output = line[-1].endswith(".so")
if library_output:
new_args.extend(args.ldflags.split())
elif new_args[0] in ("emcc", "em++"):
new_args.extend(args.cflags.split())
lapack_dir = None
# Go through and adjust arguments
for arg in line[1:]:
if arg.startswith("-I"):
if (
str(Path(arg[2:]).resolve()).startswith(sys.prefix + "/include/python")
and "site-packages" not in arg
):
arg = arg.replace("-I" + sys.prefix, "-I" + args.target)
# Don't include any system directories
elif arg[2:].startswith("/usr"):
continue
# Don't include any system directories
if arg.startswith("-L/usr"):
continue
# threading is disabled for now
if arg == "-pthread":
continue
# On Mac, we need to omit some darwin-specific arguments
if arg in ["-bundle", "-undefined", "dynamic_lookup"]:
continue
# The native build is possibly multithreaded, but the emscripten one
# definitely isn't
arg = re.sub(r"/python([0-9]\.[0-9]+)m", r"/python\1", arg)
if arg.endswith(".o"):
arg = arg[:-2] + ".bc"
output = arg
elif arg.endswith(".so"):
arg = arg[:-3] + ".wasm"
output = arg
# Fix for scipy to link to the correct BLAS/LAPACK files
if arg.startswith("-L") and "CLAPACK-WA" in arg:
out_idx = line.index("-o")
out_idx += 1
module_name = line[out_idx]
module_name = Path(module_name).name.split(".")[0]
lapack_dir = arg.replace("-L", "")
# For convinience we determine needed scipy link libraries
# here, instead of in patch files
link_libs = ["F2CLIBS/libf2c.bc", "blas_WA.bc"]
if module_name in [
"_flapack",
"_flinalg",
"_calc_lwork",
"cython_lapack",
"_iterative",
"_arpack",
]:
link_libs.append("lapack_WA.bc")
for lib_name in link_libs:
arg = os.path.join(lapack_dir, f"{lib_name}")
new_args.append(arg)
new_args.extend(["-s", "INLINING_LIMIT=5"])
continue
# Use -Os for files that are statically linked to CLAPACK
if (
arg.startswith("-O")
and "CLAPACK" in " ".join(line)
and "-L" in " ".join(line)
):
new_args.append("-Os")
continue
if new_args[-1].startswith("-B") and "compiler_compat" in arg:
# conda uses custom compiler search paths with the compiler_compat folder.
# Ignore it.
del new_args[-1]
continue
# See https://github.com/emscripten-core/emscripten/issues/8650
if arg == "-lfreetype":
continue
elif arg == "-lz":
continue
elif arg == "-lpng16":
continue
elif arg == "-lgfortran":
continue
new_args.append(arg)
# This can only be used for incremental rebuilds -- it generates
# an error during clean build of numpy
# if os.path.isfile(output):
# print('SKIPPING: ' + ' '.join(new_args))
# return
print(" ".join(new_args))
if not dryrun:
result = subprocess.run(new_args)
if result.returncode != 0:
sys.exit(result.returncode)
# Emscripten .so files shouldn't have the native platform slug
if library_output:
renamed = output[:-5] + ".so"
for ext in importlib.machinery.EXTENSION_SUFFIXES:
if ext == ".so":
continue
if renamed.endswith(ext):
renamed = renamed[: -len(ext)] + ".so"
break
if not dryrun:
os.rename(output, renamed)
return new_args
|
def handle_command(line, args, dryrun=False):
"""Handle a compilation command
Parameters
----------
line : iterable
an iterable with the compilation arguments
args : {object, namedtuple}
an container with additional compilation options,
in particular containing ``args.cflags`` and ``args.ldflags``
dryrun : bool, default=False
if True do not run the resulting command, only return it
Examples
--------
>>> from collections import namedtuple
>>> Args = namedtuple('args', ['cflags', 'ldflags', 'host'])
>>> args = Args(cflags='', ldflags='', host='')
>>> handle_command(['gcc', 'test.c'], args, dryrun=True)
emcc test.c
['emcc', 'test.c']
"""
# This is a special case to skip the compilation tests in numpy that aren't
# actually part of the build
for arg in line:
if r"/file.c" in arg or "_configtest" in arg:
return
if re.match(r"/tmp/.*/source\.[bco]+", arg):
return
if arg == "-print-multiarch":
return
if arg.startswith("/tmp"):
return
if line[0] == "gfortran":
result = f2c(line)
if result is None:
return
line = result
new_args = ["emcc"]
elif line[0] == "ar":
new_args = ["emar"]
elif line[0] == "c++":
new_args = ["em++"]
else:
new_args = ["emcc"]
# distutils doesn't use the c++ compiler when compiling c++ <sigh>
if any(arg.endswith(".cpp") for arg in line):
new_args = ["em++"]
library_output = line[-1].endswith(".so")
if library_output:
new_args.extend(args.ldflags.split())
elif new_args[0] in ("emcc", "em++"):
new_args.extend(args.cflags.split())
lapack_dir = None
# Go through and adjust arguments
for arg in line[1:]:
if arg.startswith("-I"):
if (
str(Path(arg[2:]).resolve()).startswith(sys.prefix + "/include/python")
and "site-packages" not in arg
):
arg = arg.replace("-I" + sys.prefix, "-I" + args.target)
# Don't include any system directories
elif arg[2:].startswith("/usr"):
continue
# Don't include any system directories
if arg.startswith("-L/usr"):
continue
# threading is disabled for now
if arg == "-pthread":
continue
# On Mac, we need to omit some darwin-specific arguments
if arg in ["-bundle", "-undefined", "dynamic_lookup"]:
continue
# The native build is possibly multithreaded, but the emscripten one
# definitely isn't
arg = re.sub(r"/python([0-9]\.[0-9]+)m", r"/python\1", arg)
if arg.endswith(".o"):
arg = arg[:-2] + ".bc"
output = arg
elif arg.endswith(".so"):
arg = arg[:-3] + ".wasm"
output = arg
# Fix for scipy to link to the correct BLAS/LAPACK files
if arg.startswith("-L") and "CLAPACK-WA" in arg:
out_idx = line.index("-o")
out_idx += 1
module_name = line[out_idx]
module_name = Path(module_name).name.split(".")[0]
lapack_dir = arg.replace("-L", "")
# For convinience we determine needed scipy link libraries
# here, instead of in patch files
link_libs = ["F2CLIBS/libf2c.bc", "blas_WA.bc"]
if module_name in [
"_flapack",
"_flinalg",
"_calc_lwork",
"cython_lapack",
"_iterative",
"_arpack",
]:
link_libs.append("lapack_WA.bc")
for lib_name in link_libs:
arg = os.path.join(lapack_dir, f"{lib_name}")
new_args.append(arg)
new_args.extend(["-s", "INLINING_LIMIT=5"])
continue
# Use -Os for files that are statically linked to CLAPACK
if (
arg.startswith("-O")
and "CLAPACK" in " ".join(line)
and "-L" in " ".join(line)
):
new_args.append("-Os")
continue
if new_args[-1].startswith("-B") and "compiler_compat" in arg:
# conda uses custom compiler search paths with the compiler_compat folder.
# Ignore it.
del new_args[-1]
continue
# See https://github.com/emscripten-core/emscripten/issues/8650
if arg in ["-lfreetype", "-lz", "-lpng16", "-lgfortran"]:
continue
new_args.append(arg)
# This can only be used for incremental rebuilds -- it generates
# an error during clean build of numpy
# if os.path.isfile(output):
# print('SKIPPING: ' + ' '.join(new_args))
# return
print(" ".join(new_args))
if not dryrun:
result = subprocess.run(new_args)
if result.returncode != 0:
sys.exit(result.returncode)
# Emscripten .so files shouldn't have the native platform slug
if library_output:
renamed = output[:-5] + ".so"
for ext in importlib.machinery.EXTENSION_SUFFIXES:
if ext == ".so":
continue
if renamed.endswith(ext):
renamed = renamed[: -len(ext)] + ".so"
break
if not dryrun:
os.rename(output, renamed)
return new_args
|
31,373 |
def pipeline_query_command(client: Client, collection: str, pipeline: str, limit: str = '50', offset: str = '0',
**kwargs) -> Tuple[str, dict, list]:
limit = int(limit)
offset = int(offset)
try:
json_pipeline = validate_json_objects(json.loads(pipeline))
raw_response = client.pipeline_query(
collection=collection,
pipeline=json_pipeline,
)
except JSONDecodeError:
raise DemistoException('The `pipeline` argument is not a valid json.')
if raw_response:
raw_response = raw_response if len(raw_response) <= limit else raw_response[offset:(offset + limit)]
readable_outputs = tableToMarkdown(
f'Total of {len(raw_response)} entries were found in MongoDB collection `{collection}` '
f'with pipeline: {pipeline}:',
t=[entry.get('_id') for entry in raw_response],
headers=['_id'],
)
outputs_objects = list()
for item in raw_response:
item.update({'collection': collection})
outputs_objects.append(item)
outputs = {CONTEXT_KEY: outputs_objects}
return readable_outputs, outputs, raw_response
else:
return 'MongoDB: No results found', {}, raw_response
|
def pipeline_query_command(client: Client, collection: str, pipeline: str, limit: str = '50', offset: str = '0',
**kwargs) -> Tuple[str, dict, list]:
limit = int(limit)
offset = int(offset)
try:
json_pipeline = validate_json_objects(json.loads(pipeline))
raw_response = client.pipeline_query(
collection=collection,
pipeline=json_pipeline,
)
except JSONDecodeError:
raise DemistoException('The `pipeline` argument is not a valid json.')
if raw_response:
raw_response = raw_response[offset:(offset + limit)]
readable_outputs = tableToMarkdown(
f'Total of {len(raw_response)} entries were found in MongoDB collection `{collection}` '
f'with pipeline: {pipeline}:',
t=[entry.get('_id') for entry in raw_response],
headers=['_id'],
)
outputs_objects = list()
for item in raw_response:
item.update({'collection': collection})
outputs_objects.append(item)
outputs = {CONTEXT_KEY: outputs_objects}
return readable_outputs, outputs, raw_response
else:
return 'MongoDB: No results found', {}, raw_response
|
866 |
def NormalGamma(sym, mu, lamda, alpha, beta):
"""
Creates a bivariate joint random variable with multivariate Normal gamma
distribution.
Examples
========
>>> from sympy.stats import density, NormalGamma
>>> from sympy import symbols
>>> X = NormalGamma('x', 0, 1, 2, 3)
>>> y, z = symbols('y z')
>>> density(X)(y, z)
9*sqrt(2)*z**(3/2)*exp(-3*z)*exp(-y**2*z/2)/(2*sqrt(pi))
Parameters
==========
sym : A symbol/str
For identifying the random variable.
m u: A real number
The mean of the normal distribution
lamda : A positive integer
Parameter of joint distribution
alpha : A positive integer
Parameter of joint distribution
beta : A positive integer
Parameter of joint distribution
Returns
=======
RandomSymbol
References
==========
.. [1] https://en.wikipedia.org/wiki/Normal-gamma_distribution
"""
return multivariate_rv(NormalGammaDistribution, sym, mu, lamda, alpha, beta)
|
def NormalGamma(sym, mu, lamda, alpha, beta):
"""
Creates a bivariate joint random variable with multivariate Normal gamma
distribution.
Examples
========
>>> from sympy.stats import density, NormalGamma
>>> from sympy import symbols
>>> X = NormalGamma('x', 0, 1, 2, 3)
>>> y, z = symbols('y z')
>>> density(X)(y, z)
9*sqrt(2)*z**(3/2)*exp(-3*z)*exp(-y**2*z/2)/(2*sqrt(pi))
Parameters
==========
sym : A symbol/str
For identifying the random variable.
mu : A real number
The mean of the normal distribution
lamda : A positive integer
Parameter of joint distribution
alpha : A positive integer
Parameter of joint distribution
beta : A positive integer
Parameter of joint distribution
Returns
=======
RandomSymbol
References
==========
.. [1] https://en.wikipedia.org/wiki/Normal-gamma_distribution
"""
return multivariate_rv(NormalGammaDistribution, sym, mu, lamda, alpha, beta)
|
41,048 |
def tedpca(
data_cat,
data_oc,
combmode,
mask,
adaptive_mask,
t2sG,
io_generator,
tes,
algorithm="aic",
kdaw=10.0,
rdaw=1.0,
verbose=False,
low_mem=False,
):
"""
Use principal components analysis (PCA) to identify and remove thermal
noise from multi-echo data.
Parameters
----------
data_cat : (S x E x T) array_like
Input functional data
data_oc : (S x T) array_like
Optimally combined time series data
combmode : {'t2s', 'paid'} str
How optimal combination of echos should be made, where 't2s' indicates
using the method of Posse 1999 and 'paid' indicates using the method of
Poser 2006
mask : (S,) array_like
Boolean mask array
adaptive_mask : (S,) array_like
Array where each value indicates the number of echoes with good signal
for that voxel. This mask may be thresholded; for example, with values
less than 3 set to 0.
For more information on thresholding, see `make_adaptive_mask`.
t2sG : (S,) array_like
Map of voxel-wise T2* estimates.
io_generator : :obj:`tedana.io.OutputGenerator`
The output generation object for this workflow
tes : :obj:`list`
List of echo times associated with `data_cat`, in milliseconds
algorithm : {'kundu', 'kundu-stabilize', 'mdl', 'aic', 'kic', float}, optional
Method with which to select components in TEDPCA. PCA
decomposition with the mdl, kic and aic options are based on a Moving Average
(stationary Gaussian) process and are ordered from most to least aggressive
(see Li et al., 2007).
If a float is provided, then it is assumed to represent percentage of variance
explained (0-1) to retain from PCA.
If an int is provide, then it is assumed to be the number of components
to select
Default is 'aic'.
kdaw : :obj:`float`, optional
Dimensionality augmentation weight for Kappa calculations. Must be a
non-negative float, or -1 (a special value). Default is 10.
rdaw : :obj:`float`, optional
Dimensionality augmentation weight for Rho calculations. Must be a
non-negative float, or -1 (a special value). Default is 1.
verbose : :obj:`bool`, optional
Whether to output files from fitmodels_direct or not. Default: False
low_mem : :obj:`bool`, optional
Whether to use incremental PCA (for low-memory systems) or not.
This is only compatible with the "kundu" or "kundu-stabilize" algorithms.
Default: False
Returns
-------
kept_data : (S x T) :obj:`numpy.ndarray`
Dimensionally reduced optimally combined functional data
n_components : :obj:`int`
Number of components retained from PCA decomposition
Notes
-----
====================== =================================================
Notation Meaning
====================== =================================================
:math:`\\kappa` Component pseudo-F statistic for TE-dependent
(BOLD) model.
:math:`\\rho` Component pseudo-F statistic for TE-independent
(artifact) model.
:math:`v` Voxel
:math:`V` Total number of voxels in mask
:math:`\\zeta` Something
:math:`c` Component
:math:`p` Something else
====================== =================================================
Steps:
1. Variance normalize either multi-echo or optimally combined data,
depending on settings.
2. Decompose normalized data using PCA or SVD.
3. Compute :math:`{\\kappa}` and :math:`{\\rho}`:
.. math::
{\\kappa}_c = \\frac{\\sum_{v}^V {\\zeta}_{c,v}^p * \
F_{c,v,R_2^*}}{\\sum {\\zeta}_{c,v}^p}
{\\rho}_c = \\frac{\\sum_{v}^V {\\zeta}_{c,v}^p * \
F_{c,v,S_0}}{\\sum {\\zeta}_{c,v}^p}
4. Some other stuff. Something about elbows.
5. Classify components as thermal noise if they meet both of the
following criteria:
- Nonsignificant :math:`{\\kappa}` and :math:`{\\rho}`.
- Nonsignificant variance explained.
Outputs:
This function writes out several files:
=========================== =============================================
Default Filename Content
=========================== =============================================
desc-PCA_metrics.tsv PCA component table
desc-PCA_metrics.json Metadata sidecar file describing the
component table
desc-PCA_mixing.tsv PCA mixing matrix
desc-PCA_components.nii.gz Component weight maps
desc-PCA_decomposition.json Metadata sidecar file describing the PCA
decomposition
=========================== =============================================
See Also
--------
:func:`tedana.utils.make_adaptive_mask` : The function used to create
the ``adaptive_mask`` parameter.
:py:mod:`tedana.constants` : The module describing the filenames for
various naming conventions
"""
if algorithm == "kundu":
alg_str = "followed by the Kundu component selection decision tree (Kundu et al., 2013)"
RefLGR.info(
"Kundu, P., Brenowitz, N. D., Voon, V., Worbe, Y., "
"Vértes, P. E., Inati, S. J., ... & Bullmore, E. T. "
"(2013). Integrated strategy for improving functional "
"connectivity mapping using multiecho fMRI. Proceedings "
"of the National Academy of Sciences, 110(40), "
"16187-16192."
)
elif algorithm == "kundu-stabilize":
alg_str = (
"followed by the 'stabilized' Kundu component "
"selection decision tree (Kundu et al., 2013)"
)
RefLGR.info(
"Kundu, P., Brenowitz, N. D., Voon, V., Worbe, Y., "
"Vértes, P. E., Inati, S. J., ... & Bullmore, E. T. "
"(2013). Integrated strategy for improving functional "
"connectivity mapping using multiecho fMRI. Proceedings "
"of the National Academy of Sciences, 110(40), "
"16187-16192."
)
elif isinstance(algorithm, Number):
if isinstance(algorithm, float):
alg_str = (
"in which the number of components was determined based on a "
"variance explained threshold"
)
else:
alg_str = "in which the number of components is pre-defined"
else:
alg_str = (
"based on the PCA component estimation with a Moving Average"
"(stationary Gaussian) process (Li et al., 2007)"
)
RefLGR.info(
"Li, Y.O., Adalı, T. and Calhoun, V.D., (2007). "
"Estimating the number of independent components for "
"functional magnetic resonance imaging data. "
"Human brain mapping, 28(11), pp.1251-1266."
)
RepLGR.info(
"Principal component analysis {0} was applied to "
"the optimally combined data for dimensionality "
"reduction.".format(alg_str)
)
n_samp, n_echos, n_vols = data_cat.shape
LGR.info(
f"Computing PCA of optimally combined multi-echo data with selection criteria: {algorithm}"
)
data = data_oc[mask, :]
data_z = ((data.T - data.T.mean(axis=0)) / data.T.std(axis=0)).T # var normalize ts
data_z = (data_z - data_z.mean()) / data_z.std() # var normalize everything
if algorithm in ["mdl", "aic", "kic"]:
data_img = io.new_nii_like(io_generator.reference_img, utils.unmask(data, mask))
mask_img = io.new_nii_like(io_generator.reference_img, mask.astype(int))
voxel_comp_weights, varex, varex_norm, comp_ts = ma_pca(
data_img, mask_img, algorithm, normalize=True
)
elif isinstance(algorithm, Number):
ppca = PCA(copy=False, n_components=algorithm, svd_solver="full")
ppca.fit(data_z)
comp_ts = ppca.components_.T
varex = ppca.explained_variance_
voxel_comp_weights = np.dot(np.dot(data_z, comp_ts), np.diag(1.0 / varex))
varex_norm = ppca.explained_variance_ratio_
elif low_mem:
voxel_comp_weights, varex, varex_norm, comp_ts = low_mem_pca(data_z)
else:
ppca = PCA(copy=False, n_components=(n_vols - 1))
ppca.fit(data_z)
comp_ts = ppca.components_.T
varex = ppca.explained_variance_
voxel_comp_weights = np.dot(np.dot(data_z, comp_ts), np.diag(1.0 / varex))
varex_norm = ppca.explained_variance_ratio_
# Compute Kappa and Rho for PCA comps
required_metrics = [
"kappa",
"rho",
"countnoise",
"countsigFT2",
"countsigFS0",
"dice_FT2",
"dice_FS0",
"signal-noise_t",
"variance explained",
"normalized variance explained",
"d_table_score",
]
comptable = metrics.collect.generate_metrics(
data_cat,
data_oc,
comp_ts,
adaptive_mask,
tes,
io_generator,
"PCA",
metrics=required_metrics,
)
# varex_norm from PCA overrides varex_norm from dependence_metrics,
# but we retain the original
comptable["estimated normalized variance explained"] = comptable[
"normalized variance explained"
]
comptable["normalized variance explained"] = varex_norm
# write component maps to 4D image
comp_maps = utils.unmask(computefeats2(data_oc, comp_ts, mask), mask)
io_generator.save_file(comp_maps, "z-scored PCA components img")
# Select components using decision tree
if algorithm == "kundu":
comptable, metric_metadata = kundu_tedpca(
comptable,
n_echos,
kdaw,
rdaw,
stabilize=False,
)
elif algorithm == "kundu-stabilize":
comptable, metric_metadata = kundu_tedpca(
comptable,
n_echos,
kdaw,
rdaw,
stabilize=True,
)
else:
if isinstance(algorithm, float):
alg_str = "variance explained-based"
elif isinstance(algorithm, int):
alg_str = "a fixed number of components and no"
else:
alg_str = algorithm
LGR.info(
f"Selected {comptable.shape[0]} components with {round(100*varex_norm.sum(),2)}% "
f"normalized variance explained using {alg_str} dimensionality detection"
)
comptable["classification"] = "accepted"
comptable["rationale"] = ""
# Save decomposition files
comp_names = [
io.add_decomp_prefix(comp, prefix="pca", max_value=comptable.index.max())
for comp in comptable.index.values
]
mixing_df = pd.DataFrame(data=comp_ts, columns=comp_names)
io_generator.save_file(mixing_df, "PCA mixing tsv")
# Save component table and associated json
io_generator.save_file(comptable, "PCA metrics tsv")
metric_metadata = metrics.collect.get_metadata(comptable)
io_generator.save_file(metric_metadata, "PCA metrics json")
decomp_metadata = {
"Method": (
"Principal components analysis implemented by sklearn. "
"Components are sorted by variance explained in descending order. "
),
}
for comp_name in comp_names:
decomp_metadata[comp_name] = {
"Description": "PCA fit to optimally combined data.",
"Method": "tedana",
}
io_generator.save_file(decomp_metadata, "PCA decomposition json")
acc = comptable[comptable.classification == "accepted"].index.values
n_components = acc.size
voxel_kept_comp_weighted = voxel_comp_weights[:, acc] * varex[None, acc]
kept_data = np.dot(voxel_kept_comp_weighted, comp_ts[:, acc].T)
kept_data = stats.zscore(kept_data, axis=1) # variance normalize time series
kept_data = stats.zscore(kept_data, axis=None) # variance normalize everything
return kept_data, n_components
|
def tedpca(
data_cat,
data_oc,
combmode,
mask,
adaptive_mask,
t2sG,
io_generator,
tes,
algorithm="aic",
kdaw=10.0,
rdaw=1.0,
verbose=False,
low_mem=False,
):
"""
Use principal components analysis (PCA) to identify and remove thermal
noise from multi-echo data.
Parameters
----------
data_cat : (S x E x T) array_like
Input functional data
data_oc : (S x T) array_like
Optimally combined time series data
combmode : {'t2s', 'paid'} str
How optimal combination of echos should be made, where 't2s' indicates
using the method of Posse 1999 and 'paid' indicates using the method of
Poser 2006
mask : (S,) array_like
Boolean mask array
adaptive_mask : (S,) array_like
Array where each value indicates the number of echoes with good signal
for that voxel. This mask may be thresholded; for example, with values
less than 3 set to 0.
For more information on thresholding, see `make_adaptive_mask`.
t2sG : (S,) array_like
Map of voxel-wise T2* estimates.
io_generator : :obj:`tedana.io.OutputGenerator`
The output generation object for this workflow
tes : :obj:`list`
List of echo times associated with `data_cat`, in milliseconds
algorithm : {'kundu', 'kundu-stabilize', 'mdl', 'aic', 'kic', float}, optional
Method with which to select components in TEDPCA. PCA
decomposition with the mdl, kic and aic options are based on a Moving Average
(stationary Gaussian) process and are ordered from most to least aggressive
(see Li et al., 2007).
If a float is provided, then it is assumed to represent percentage of variance
explained (0-1) to retain from PCA.
If an int is provided, then it is assumed to be the number of components
to select
Default is 'aic'.
kdaw : :obj:`float`, optional
Dimensionality augmentation weight for Kappa calculations. Must be a
non-negative float, or -1 (a special value). Default is 10.
rdaw : :obj:`float`, optional
Dimensionality augmentation weight for Rho calculations. Must be a
non-negative float, or -1 (a special value). Default is 1.
verbose : :obj:`bool`, optional
Whether to output files from fitmodels_direct or not. Default: False
low_mem : :obj:`bool`, optional
Whether to use incremental PCA (for low-memory systems) or not.
This is only compatible with the "kundu" or "kundu-stabilize" algorithms.
Default: False
Returns
-------
kept_data : (S x T) :obj:`numpy.ndarray`
Dimensionally reduced optimally combined functional data
n_components : :obj:`int`
Number of components retained from PCA decomposition
Notes
-----
====================== =================================================
Notation Meaning
====================== =================================================
:math:`\\kappa` Component pseudo-F statistic for TE-dependent
(BOLD) model.
:math:`\\rho` Component pseudo-F statistic for TE-independent
(artifact) model.
:math:`v` Voxel
:math:`V` Total number of voxels in mask
:math:`\\zeta` Something
:math:`c` Component
:math:`p` Something else
====================== =================================================
Steps:
1. Variance normalize either multi-echo or optimally combined data,
depending on settings.
2. Decompose normalized data using PCA or SVD.
3. Compute :math:`{\\kappa}` and :math:`{\\rho}`:
.. math::
{\\kappa}_c = \\frac{\\sum_{v}^V {\\zeta}_{c,v}^p * \
F_{c,v,R_2^*}}{\\sum {\\zeta}_{c,v}^p}
{\\rho}_c = \\frac{\\sum_{v}^V {\\zeta}_{c,v}^p * \
F_{c,v,S_0}}{\\sum {\\zeta}_{c,v}^p}
4. Some other stuff. Something about elbows.
5. Classify components as thermal noise if they meet both of the
following criteria:
- Nonsignificant :math:`{\\kappa}` and :math:`{\\rho}`.
- Nonsignificant variance explained.
Outputs:
This function writes out several files:
=========================== =============================================
Default Filename Content
=========================== =============================================
desc-PCA_metrics.tsv PCA component table
desc-PCA_metrics.json Metadata sidecar file describing the
component table
desc-PCA_mixing.tsv PCA mixing matrix
desc-PCA_components.nii.gz Component weight maps
desc-PCA_decomposition.json Metadata sidecar file describing the PCA
decomposition
=========================== =============================================
See Also
--------
:func:`tedana.utils.make_adaptive_mask` : The function used to create
the ``adaptive_mask`` parameter.
:py:mod:`tedana.constants` : The module describing the filenames for
various naming conventions
"""
if algorithm == "kundu":
alg_str = "followed by the Kundu component selection decision tree (Kundu et al., 2013)"
RefLGR.info(
"Kundu, P., Brenowitz, N. D., Voon, V., Worbe, Y., "
"Vértes, P. E., Inati, S. J., ... & Bullmore, E. T. "
"(2013). Integrated strategy for improving functional "
"connectivity mapping using multiecho fMRI. Proceedings "
"of the National Academy of Sciences, 110(40), "
"16187-16192."
)
elif algorithm == "kundu-stabilize":
alg_str = (
"followed by the 'stabilized' Kundu component "
"selection decision tree (Kundu et al., 2013)"
)
RefLGR.info(
"Kundu, P., Brenowitz, N. D., Voon, V., Worbe, Y., "
"Vértes, P. E., Inati, S. J., ... & Bullmore, E. T. "
"(2013). Integrated strategy for improving functional "
"connectivity mapping using multiecho fMRI. Proceedings "
"of the National Academy of Sciences, 110(40), "
"16187-16192."
)
elif isinstance(algorithm, Number):
if isinstance(algorithm, float):
alg_str = (
"in which the number of components was determined based on a "
"variance explained threshold"
)
else:
alg_str = "in which the number of components is pre-defined"
else:
alg_str = (
"based on the PCA component estimation with a Moving Average"
"(stationary Gaussian) process (Li et al., 2007)"
)
RefLGR.info(
"Li, Y.O., Adalı, T. and Calhoun, V.D., (2007). "
"Estimating the number of independent components for "
"functional magnetic resonance imaging data. "
"Human brain mapping, 28(11), pp.1251-1266."
)
RepLGR.info(
"Principal component analysis {0} was applied to "
"the optimally combined data for dimensionality "
"reduction.".format(alg_str)
)
n_samp, n_echos, n_vols = data_cat.shape
LGR.info(
f"Computing PCA of optimally combined multi-echo data with selection criteria: {algorithm}"
)
data = data_oc[mask, :]
data_z = ((data.T - data.T.mean(axis=0)) / data.T.std(axis=0)).T # var normalize ts
data_z = (data_z - data_z.mean()) / data_z.std() # var normalize everything
if algorithm in ["mdl", "aic", "kic"]:
data_img = io.new_nii_like(io_generator.reference_img, utils.unmask(data, mask))
mask_img = io.new_nii_like(io_generator.reference_img, mask.astype(int))
voxel_comp_weights, varex, varex_norm, comp_ts = ma_pca(
data_img, mask_img, algorithm, normalize=True
)
elif isinstance(algorithm, Number):
ppca = PCA(copy=False, n_components=algorithm, svd_solver="full")
ppca.fit(data_z)
comp_ts = ppca.components_.T
varex = ppca.explained_variance_
voxel_comp_weights = np.dot(np.dot(data_z, comp_ts), np.diag(1.0 / varex))
varex_norm = ppca.explained_variance_ratio_
elif low_mem:
voxel_comp_weights, varex, varex_norm, comp_ts = low_mem_pca(data_z)
else:
ppca = PCA(copy=False, n_components=(n_vols - 1))
ppca.fit(data_z)
comp_ts = ppca.components_.T
varex = ppca.explained_variance_
voxel_comp_weights = np.dot(np.dot(data_z, comp_ts), np.diag(1.0 / varex))
varex_norm = ppca.explained_variance_ratio_
# Compute Kappa and Rho for PCA comps
required_metrics = [
"kappa",
"rho",
"countnoise",
"countsigFT2",
"countsigFS0",
"dice_FT2",
"dice_FS0",
"signal-noise_t",
"variance explained",
"normalized variance explained",
"d_table_score",
]
comptable = metrics.collect.generate_metrics(
data_cat,
data_oc,
comp_ts,
adaptive_mask,
tes,
io_generator,
"PCA",
metrics=required_metrics,
)
# varex_norm from PCA overrides varex_norm from dependence_metrics,
# but we retain the original
comptable["estimated normalized variance explained"] = comptable[
"normalized variance explained"
]
comptable["normalized variance explained"] = varex_norm
# write component maps to 4D image
comp_maps = utils.unmask(computefeats2(data_oc, comp_ts, mask), mask)
io_generator.save_file(comp_maps, "z-scored PCA components img")
# Select components using decision tree
if algorithm == "kundu":
comptable, metric_metadata = kundu_tedpca(
comptable,
n_echos,
kdaw,
rdaw,
stabilize=False,
)
elif algorithm == "kundu-stabilize":
comptable, metric_metadata = kundu_tedpca(
comptable,
n_echos,
kdaw,
rdaw,
stabilize=True,
)
else:
if isinstance(algorithm, float):
alg_str = "variance explained-based"
elif isinstance(algorithm, int):
alg_str = "a fixed number of components and no"
else:
alg_str = algorithm
LGR.info(
f"Selected {comptable.shape[0]} components with {round(100*varex_norm.sum(),2)}% "
f"normalized variance explained using {alg_str} dimensionality detection"
)
comptable["classification"] = "accepted"
comptable["rationale"] = ""
# Save decomposition files
comp_names = [
io.add_decomp_prefix(comp, prefix="pca", max_value=comptable.index.max())
for comp in comptable.index.values
]
mixing_df = pd.DataFrame(data=comp_ts, columns=comp_names)
io_generator.save_file(mixing_df, "PCA mixing tsv")
# Save component table and associated json
io_generator.save_file(comptable, "PCA metrics tsv")
metric_metadata = metrics.collect.get_metadata(comptable)
io_generator.save_file(metric_metadata, "PCA metrics json")
decomp_metadata = {
"Method": (
"Principal components analysis implemented by sklearn. "
"Components are sorted by variance explained in descending order. "
),
}
for comp_name in comp_names:
decomp_metadata[comp_name] = {
"Description": "PCA fit to optimally combined data.",
"Method": "tedana",
}
io_generator.save_file(decomp_metadata, "PCA decomposition json")
acc = comptable[comptable.classification == "accepted"].index.values
n_components = acc.size
voxel_kept_comp_weighted = voxel_comp_weights[:, acc] * varex[None, acc]
kept_data = np.dot(voxel_kept_comp_weighted, comp_ts[:, acc].T)
kept_data = stats.zscore(kept_data, axis=1) # variance normalize time series
kept_data = stats.zscore(kept_data, axis=None) # variance normalize everything
return kept_data, n_components
|
33,115 |
def rk4(f, x, t, dt, order=4):
"""Runge-Kutta (explicit, non-adaptive) numerical ODE solvers.
Parameters
----------
f : function
The forcing of the ODE must a function of the form f(t, x)
x : ndarray or float
State vector of the forcing term
t : float
Starting time of the integration
dt : float
Time step interval of the ODE solver
order : int, optional
The order of RK method. Default: 4
Returns
-------
ndarray
State vector at the new time step t+dt
"""
if order >=1: k1 = dt * f(t , x) # noqa
if order >=2: k2 = dt * f(t+dt/2, x+k1/2) # noqa
if order ==3: k3 = dt * f(t+dt , x+k2*2-k1) # noqa
if order ==4: # noqa
k3 = dt * f(t+dt/2, x+k2/2) # noqa
k4 = dt * f(t+dt , x+k3) # noqa
if order ==1: return x + k1 # noqa
elif order ==2: return x + k2 # noqa
elif order ==3: return x + (k1 + 4*k2 + k3)/6 # noqa
elif order ==4: return x + (k1 + 2*(k2 + k3) + k4)/6 # noqa
else: raise NotImplementedError # noqa
# fmt: on
|
def rk4(f, x, t, dt, order=4):
"""Runge-Kutta (explicit, non-adaptive) numerical ODE solvers.
Parameters
----------
f : function
The forcing of the ODE must a function of the form f(t, x)
x : ndarray or float
State vector of the forcing term
t : float
Starting time of the integration
dt : float
Time step interval of the ODE solver
order : int, optional
The order of RK method. Default: 4
Returns
-------
ndarray
State vector at the new time, `t+dt`
"""
if order >=1: k1 = dt * f(t , x) # noqa
if order >=2: k2 = dt * f(t+dt/2, x+k1/2) # noqa
if order ==3: k3 = dt * f(t+dt , x+k2*2-k1) # noqa
if order ==4: # noqa
k3 = dt * f(t+dt/2, x+k2/2) # noqa
k4 = dt * f(t+dt , x+k3) # noqa
if order ==1: return x + k1 # noqa
elif order ==2: return x + k2 # noqa
elif order ==3: return x + (k1 + 4*k2 + k3)/6 # noqa
elif order ==4: return x + (k1 + 2*(k2 + k3) + k4)/6 # noqa
else: raise NotImplementedError # noqa
# fmt: on
|
4,184 |
def read_dig_montage_brainvision(fname):
r"""Read subject-specific digitization montage from a brainvision file.
Parameters
----------
bvct : path-like
BrainVision CapTrak coordinates file from which to read digitization
locations. This is typically in XML format. If str (filename), all
other arguments are ignored.
Returns
-------
montage : instance of DigMontage
The digitizer montage.
See Also
--------
DigMontage
Montage
read_montage
"""
_check_fname(fname, overwrite='read', must_exist=True)
data = _parse_brainvision_dig_montage(fname)
# XXX: to change to the new naming in v.0.20 (all this block should go)
data.pop('point_names')
data['hpi_dev'] = data['hpi']
data['hpi'] = data.pop('elp')
data['ch_pos'] = data.pop('dig_ch_pos')
return make_dig_montage(
**data,
transform_to_head=False,
compute_dev_head_t=False,
)
|
def read_dig_montage_brainvision(fname):
r"""Read subject-specific digitization montage from a brainvision file.
Parameters
----------
fname : path-like
BrainVision CapTrak coordinates file from which to read digitization
locations. This is typically in XML format. If str (filename), all
other arguments are ignored.
Returns
-------
montage : instance of DigMontage
The digitizer montage.
See Also
--------
DigMontage
Montage
read_montage
"""
_check_fname(fname, overwrite='read', must_exist=True)
data = _parse_brainvision_dig_montage(fname)
# XXX: to change to the new naming in v.0.20 (all this block should go)
data.pop('point_names')
data['hpi_dev'] = data['hpi']
data['hpi'] = data.pop('elp')
data['ch_pos'] = data.pop('dig_ch_pos')
return make_dig_montage(
**data,
transform_to_head=False,
compute_dev_head_t=False,
)
|
35,144 |
def predict_labels_aot(session, aot_executor, input_data, runs_per_sample=1):
"""Predicts labels for each sample in input_data using host-driven AOT.
Returns an iterator of (label, runtime) tuples. This function can only
be used with models for which the output is the confidence for each class."""
assert aot_executor.get_num_inputs() == 1
assert aot_executor.get_num_outputs() == 1
assert runs_per_sample > 0
for counter, sample in enumerate(input_data):
logging.info("Evaluating sample %d", counter)
aot_executor.get_input(0).copyfrom(sample)
result = aot_executor.module.time_evaluator("run", session.device, number=runs_per_sample)()
predicted_label = aot_executor.get_output(0).numpy().argmax()
runtime = result.mean
yield predicted_label, runtime
|
def predict_labels_aot(session, aot_executor, input_data, runs_per_sample=1):
"""Predicts labels for each sample in input_data using host-driven AOT.
Returns an iterator of (label, runtime) tuples. This function can only
be used with models for which the output is the confidence for each class."""
assert aot_executor.get_num_inputs() == 1
assert aot_executor.get_num_outputs() == 1
assert runs_per_sample > 0
for counter, sample in enumerate(input_data):
logging.info(f"Evaluating sample {counter}")
aot_executor.get_input(0).copyfrom(sample)
result = aot_executor.module.time_evaluator("run", session.device, number=runs_per_sample)()
predicted_label = aot_executor.get_output(0).numpy().argmax()
runtime = result.mean
yield predicted_label, runtime
|
58,547 |
def deployment(
_func_or_class: Optional[Callable] = None,
name: Optional[str] = None,
version: Optional[str] = None,
prev_version: Optional[str] = None,
num_replicas: Optional[int] = None,
init_args: Optional[Tuple[Any]] = None,
route_prefix: Optional[str] = None,
ray_actor_options: Optional[Dict] = None,
user_config: Optional[Any] = None,
max_concurrent_queries: Optional[int] = None,
) -> Callable[[Callable], Deployment]:
"""Define a Serve deployment.
Args:
name (Optional[str]): Globally-unique name identifying this deployment.
If not provided, the name of the class or function will be used.
version (Optional[str]): Version of the deployment. This is used to
indicate a code change for the deployment; when it is re-deployed
with a version change, a rolling update of the replicas will be
performed. If not provided, every deployment will be treated as a
new version.
prev_version (Optional[str]): Version of the existing deployment which
is used as a precondition for the next deployment. If prev_version
does not match with the existing deployment's version, the
deployment will fail. If not provided, deployment procedure will
not check the existing deployment's version.
num_replicas (Optional[int]): The number of processes to start up that
will handle requests to this backend. Defaults to 1.
init_args (Optional[Tuple]): Arguments to be passed to the class
constructor when starting up deployment replicas. These can also be
passed when you call `.deploy()` on the returned Deployment.
route_prefix (Optional[str]): Requests to paths under this HTTP path
prefix will be routed to this deployment. Defaults to '/{name}'.
Routing is done based on longest-prefix match, so if you have
deployment A with a prefix of '/a' and deployment B with a prefix
of '/a/b', requests to '/a', '/a/', and '/a/c' go to A and requests
to '/a/b', '/a/b/', and '/a/b/c' go to B. Routes must not end with
a '/' unless they're the root (just '/'), which acts as a
catch-all.
ray_actor_options (dict): Options to be passed to the Ray actor
constructor such as resource requirements.
user_config (Optional[Any]): [experimental] Config to pass to the
reconfigure method of the backend. This can be updated dynamically
without changing the version of the deployment and restarting its
replicas. The user_config needs to be hashable to keep track of
updates, so it must only contain hashable types, lists, and
dictionaries.
max_concurrent_queries (Optional[int]): The maximum number of queries
that will be sent to a replica of this backend without receiving a
response. Defaults to 100.
Example:
>>> @serve.deployment(name="deployment1", version="v1")
class MyDeployment:
pass
>>> MyDeployment.deploy(*init_args)
>>> MyDeployment.options(num_replicas=2, init_args=init_args).deploy()
Returns:
Deployment
"""
config = BackendConfig()
if num_replicas is not None:
config.num_replicas = num_replicas
if user_config is not None:
config.user_config = user_config
if max_concurrent_queries is not None:
config.max_concurrent_queries = max_concurrent_queries
def decorator(_func_or_class):
return Deployment(
_func_or_class,
name if name is not None else _func_or_class.__name__,
config,
version=version,
prev_version=prev_version,
init_args=init_args,
route_prefix=route_prefix,
ray_actor_options=ray_actor_options,
_internal=True,
)
# This handles both parametrized and non-parametrized usage of the
# decorator. See the @serve.batch code for more details.
return decorator(_func_or_class) if callable(_func_or_class) else decorator
|
def deployment(
_func_or_class: Optional[Callable] = None,
name: Optional[str] = None,
version: Optional[str] = None,
prev_version: Optional[str] = None,
num_replicas: Optional[int] = None,
init_args: Optional[Tuple[Any]] = None,
route_prefix: Optional[str] = None,
ray_actor_options: Optional[Dict] = None,
user_config: Optional[Any] = None,
max_concurrent_queries: Optional[int] = None,
) -> Callable[[Callable], Deployment]:
"""Define a Serve deployment.
Args:
name (Optional[str]): Globally-unique name identifying this deployment.
If not provided, the name of the class or function will be used.
version (Optional[str]): Version of the deployment. This is used to
indicate a code change for the deployment; when it is re-deployed
with a version change, a rolling update of the replicas will be
performed. If not provided, every deployment will be treated as a
new version.
prev_version (Optional[str]): Version of the existing deployment which
is used as a precondition for the next deployment. If prev_version
does not match with the existing deployment's version, the
deployment will fail. If not provided, deployment procedure will
not check the existing deployment's version.
num_replicas (Optional[int]): The number of processes to start up that
will handle requests to this backend. Defaults to 1.
init_args (Optional[Tuple]): Arguments to be passed to the class
constructor when starting up deployment replicas. These can also be
passed when you call `.deploy()` on the returned Deployment.
route_prefix (Optional[str]): Requests to paths under this HTTP path
prefix will be routed to this deployment. Defaults to '/{name}'.
Routing is done based on longest-prefix match, so if you have
deployment A with a prefix of '/a' and deployment B with a prefix
of '/a/b', requests to '/a', '/a/', and '/a/c' go to A and requests
to '/a/b', '/a/b/', and '/a/b/c' go to B. Routes must not end with
a '/' unless they're the root (just '/'), which acts as a
catch-all.
ray_actor_options (dict): Options to be passed to the Ray actor
constructor such as resource requirements.
user_config (Optional[Any]): [experimental] Config to pass to the
reconfigure method of the backend. This can be updated dynamically
without changing the version of the deployment and restarting its
replicas. The user_config needs to be hashable to keep track of
updates, so it must only contain hashable types, or hashable
types nested in list or dictionary.
max_concurrent_queries (Optional[int]): The maximum number of queries
that will be sent to a replica of this backend without receiving a
response. Defaults to 100.
Example:
>>> @serve.deployment(name="deployment1", version="v1")
class MyDeployment:
pass
>>> MyDeployment.deploy(*init_args)
>>> MyDeployment.options(num_replicas=2, init_args=init_args).deploy()
Returns:
Deployment
"""
config = BackendConfig()
if num_replicas is not None:
config.num_replicas = num_replicas
if user_config is not None:
config.user_config = user_config
if max_concurrent_queries is not None:
config.max_concurrent_queries = max_concurrent_queries
def decorator(_func_or_class):
return Deployment(
_func_or_class,
name if name is not None else _func_or_class.__name__,
config,
version=version,
prev_version=prev_version,
init_args=init_args,
route_prefix=route_prefix,
ray_actor_options=ray_actor_options,
_internal=True,
)
# This handles both parametrized and non-parametrized usage of the
# decorator. See the @serve.batch code for more details.
return decorator(_func_or_class) if callable(_func_or_class) else decorator
|
46,206 |
def guess_rgb(shape):
"""Guess if the passed shape comes from rgb data.
If last dim is 3 or 4 assume the data is rgb, including rgba.
Parameters
----------
shape : list of int
Shape of the data that should be checked.
Returns
-------
bool
If data is rgb or not.
"""
ndim = len(shape)
last_dim = shape[-1]
if ndim > 2 and last_dim < 5:
return True
else:
return False
|
def guess_rgb(shape):
"""Guess if the passed shape comes from rgb data.
If last dim is 3 or 4 assume the data is rgb, including rgba.
Parameters
----------
shape : list of int
Shape of the data that should be checked.
Returns
-------
bool
If data is rgb or not.
"""
ndim = len(shape)
last_dim = shape[-1]
return ndim > 2 and last_dim < 5
|
32,942 |
def redact_url(url, query_string_obfuscation_pattern):
# type: (str, re.Pattern) -> str
hqs, fs, f = url.partition("#")
h, qss, qs = hqs.partition("?")
redacted_query = re.sub(query_string_obfuscation_pattern, "<redacted>", compat.to_unicode(qs))
return h + qss + redacted_query + fs + f
|
def redact_url(url, query_string_obfuscation_pattern):
# type: (str, re.Pattern) -> str
hqs, fs, f = url.partition("#")
h, qss, qs = hqs.partition("?")
redacted_query = query_string_obfuscation_pattern.sub("<redacted>", compat.to_unicode(qs))
return h + qss + redacted_query + fs + f
|
20,031 |
def read_data(filename):
"""Read hyperspectral image data from file.
Inputs:
filename = Name of image file
Returns:
spectral_array = Hyperspectral data instance
:param filename: str
:return spectral_array: __main__.Spectral_data
"""
# Initialize dictionary
header_dict = {}
# Remove any file extension and set .hdr filename
filename_base = os.path.splitext(filename)[0]
headername = filename_base + ".hdr"
with open(headername, "r") as f:
# Replace characters for easier parsing
hdata = f.read()
hdata = hdata.replace(",\n", ",")
hdata = hdata.replace("\n,", ",")
hdata = hdata.replace("{\n", "{")
hdata = hdata.replace("\n}", "}")
hdata = hdata.replace(" \n ", "")
hdata = hdata.replace(";", "")
hdata = hdata.split("\n")
# Loop through and create a dictionary from the header file
for i, string in enumerate(hdata):
if ' = ' in string:
header_data = string.split(" = ")
header_dict.update({header_data[0].rstrip(): header_data[1].rstrip()})
elif ' : ' in string:
header_data = string.split(" : ")
header_dict.update({header_data[0].rstrip(): header_data[1].rstrip()})
# Reformat wavelengths
header_dict["wavelength"] = header_dict["wavelength"].replace("{", "")
header_dict["wavelength"] = header_dict["wavelength"].replace("}", "")
header_dict["wavelength"] = header_dict["wavelength"].replace(" ", "")
header_dict["wavelength"] = header_dict["wavelength"].split(",")
# Create dictionary of wavelengths
wavelength_dict = {}
for j, wavelength in enumerate(header_dict["wavelength"]):
wavelength_dict.update({float(wavelength): float(j)})
# Replace datatype ID number with the numpy datatype
dtype_dict = {"1": np.uint8, "2": np.int16, "3": np.int32, "4": np.float32, "5": np.float64, "6": np.complex64,
"9": np.complex128, "12": np.uint16, "13": np.uint32, "14": np.uint64, "15": np.uint64}
header_dict["data type"] = dtype_dict[header_dict["data type"]]
# Read in the data from the file
raw_data = np.fromfile(filename, header_dict["data type"], -1)
# Reshape the raw data into a datacube array
array_data = raw_data.reshape(int(header_dict["lines"]),
int(header_dict["bands"]),
int(header_dict["samples"])).transpose((0, 2, 1))
# Check for default bands (that get used to make pseudo_rgb image)
default_bands = None
if "default bands" in header_dict:
header_dict["default bands"] = header_dict["default bands"].replace("{", "")
header_dict["default bands"] = header_dict["default bands"].replace("}", "")
default_bands = header_dict["default bands"].split(",")
# Find array min and max values
max_pixel = float(np.amax(array_data))
min_pixel = float(np.amin(array_data))
wavelength_units = header_dict.get("wavelength units")
if wavelength_units is None:
wavelength_units = "nm"
# Create an instance of the spectral_data class
spectral_array = Spectral_data(array_data=array_data,
max_wavelength=float(str(header_dict["wavelength"][-1]).rstrip()),
min_wavelength=float(str(header_dict["wavelength"][0]).rstrip()),
max_value=max_pixel, min_value=min_pixel,
d_type=header_dict["data type"],
wavelength_dict=wavelength_dict, samples=int(header_dict["samples"]),
lines=int(header_dict["lines"]), interleave=header_dict["interleave"],
wavelength_units=wavelength_units, array_type="datacube",
pseudo_rgb=None, filename=filename, default_bands=default_bands)
# Make pseudo-rgb image and replace it inside the class instance object
pseudo_rgb = _make_pseudo_rgb(spectral_array)
spectral_array.pseudo_rgb = pseudo_rgb
_debug(visual=pseudo_rgb, filename=os.path.join(params.debug_outdir, str(params.device)+"_pseudo_rgb.png"))
return spectral_array
|
def read_data(filename):
"""Read hyperspectral image data from file.
Inputs:
filename = Name of image file
Returns:
spectral_array = Hyperspectral data instance
:param filename: str
:return spectral_array: __main__.Spectral_data
"""
# Initialize dictionary
header_dict = {}
# Remove any file extension and set .hdr filename
filename_base = os.path.splitext(filename)[0]
headername = filename_base + ".hdr"
with open(headername, "r") as f:
# Replace characters for easier parsing
hdata = f.read()
hdata = hdata.replace(",\n", ",")
hdata = hdata.replace("\n,", ",")
hdata = hdata.replace("{\n", "{")
hdata = hdata.replace("\n}", "}")
hdata = hdata.replace(" \n ", "")
hdata = hdata.replace(";", "")
hdata = hdata.split("\n")
# Loop through and create a dictionary from the header file
for i, string in enumerate(hdata):
if ' = ' in string:
header_data = string.split(" = ")
header_dict.update({header_data[0].rstrip(): header_data[1].rstrip()})
elif ' : ' in string:
header_data = string.split(" : ")
header_dict.update({header_data[0].rstrip(): header_data[1].rstrip()})
# Reformat wavelengths
header_dict["wavelength"] = header_dict["wavelength"].replace("{", "")
header_dict["wavelength"] = header_dict["wavelength"].replace("}", "")
header_dict["wavelength"] = header_dict["wavelength"].replace(" ", "")
header_dict["wavelength"] = header_dict["wavelength"].split(",")
# Create dictionary of wavelengths
wavelength_dict = {}
for j, wavelength in enumerate(header_dict["wavelength"]):
wavelength_dict.update({float(wavelength): float(j)})
# Replace datatype ID number with the numpy datatype
dtype_dict = {"1": np.uint8, "2": np.int16, "3": np.int32, "4": np.float32, "5": np.float64, "6": np.complex64,
"9": np.complex128, "12": np.uint16, "13": np.uint32, "14": np.uint64, "15": np.uint64}
header_dict["data type"] = dtype_dict[header_dict["data type"]]
# Read in the data from the file
raw_data = np.fromfile(filename, header_dict["data type"], -1)
# Reshape the raw data into a datacube array
array_data = raw_data.reshape(int(header_dict["lines"]),
int(header_dict["bands"]),
int(header_dict["samples"])).transpose((0, 2, 1))
# Check for default bands (that get used to make pseudo_rgb image)
default_bands = None
if "default bands" in header_dict:
header_dict["default bands"] = header_dict["default bands"].replace("{", "")
header_dict["default bands"] = header_dict["default bands"].replace("}", "")
default_bands = header_dict["default bands"].split(",")
# Find array min and max values
max_pixel = float(np.amax(array_data))
min_pixel = float(np.amin(array_data))
wavelength_units = header_dict.get("wavelength units")
if wavelength_units is None:
wavelength_units = "nm"
# Create an instance of the spectral_data class
spectral_array = Spectral_data(array_data=array_data,
max_wavelength=float(str(header_dict["wavelength"][-1]).rstrip()),
min_wavelength=float(str(header_dict["wavelength"][0]).rstrip()),
max_value=max_pixel, min_value=min_pixel,
d_type=header_dict["data type"],
wavelength_dict=wavelength_dict, samples=int(header_dict["samples"]),
lines=int(header_dict["lines"]), interleave=header_dict["interleave"],
wavelength_units=wavelength_units, array_type="datacube",
pseudo_rgb=None, filename=filename, default_bands=default_bands)
# Make pseudo-rgb image and replace it inside the class instance object
pseudo_rgb = _make_pseudo_rgb(spectral_array)
spectral_array.pseudo_rgb = pseudo_rgb
_debug(visual=pseudo_rgb, filename=os.path.join(params.debug_outdir, str(params.device) + "_pseudo_rgb.png"))
return spectral_array
|
16,244 |
def _setup_entities(
hass: HomeAssistant, entry: ConfigEntry, device_ids: list
) -> list[Entity]:
"""Set up Tuya Cover."""
device_manager = hass.data[DOMAIN][entry.entry_id][TUYA_DEVICE_MANAGER]
entities: list[Entity] = []
for device_id in device_ids:
device = device_manager.device_map[device_id]
if device is None:
continue
entities.append(TuyaHaCover(device, device_manager))
hass.data[DOMAIN][entry.entry_id][TUYA_HA_DEVICES].add(device_id)
return entities
|
def _setup_entities(
hass: HomeAssistant, entry: ConfigEntry, device_ids: list[str]
) -> list[Entity]:
"""Set up Tuya Cover."""
device_manager = hass.data[DOMAIN][entry.entry_id][TUYA_DEVICE_MANAGER]
entities: list[Entity] = []
for device_id in device_ids:
device = device_manager.device_map[device_id]
if device is None:
continue
entities.append(TuyaHaCover(device, device_manager))
hass.data[DOMAIN][entry.entry_id][TUYA_HA_DEVICES].add(device_id)
return entities
|
39,297 |
def ncells_from_cells_py36(cells):
"""Get the number of cells from a VTK cell connectivity array.
Works on on all Python>=3.5
"""
c = 0
n_cells = 0
while c < cells.size:
c += cells[c] + 1
n_cells += 1
return n_cells
|
def ncells_from_cells_py36(cells):
"""Get the number of cells from a VTK cell connectivity array.
Works on all Python>=3.5
"""
c = 0
n_cells = 0
while c < cells.size:
c += cells[c] + 1
n_cells += 1
return n_cells
|
59,946 |
def distribute_tests(testcases, skip_system_check, skip_prgenv_check,
node_map):
temporary_registry = None
new_checks = []
for t in testcases:
if not t.check.is_fixture():
cls = type(t.check)
basename = cls.__name__
original_var_info = cls.get_variant_info(
t.check.variant_num, recurse=True
)
def _rfm_distributed_set_run_nodes(obj):
if not obj.local:
obj.job.pin_nodes = obj._rfm_nodelist
def _rfm_distributed_set_build_nodes(obj):
if not obj.local and not obj.build_locally:
obj.build_job.pin_nodes = obj._rfm_nodelist
# We re-set the valid system and environment in a hook to
# make sure that it will not be overwriten by a parent
# post-init hook
def _rfm_distributed_set_valid_sys_env(obj):
obj.valid_systems = [t._partition.fullname]
obj.valid_prog_environs = [t._environ.name]
class BaseTest(t.check.__class__):
_rfm_nodelist = builtins.parameter(node_map[t._partition.fullname])
valid_systems = [t._partition.fullname]
valid_prog_environs = [t._environ.name]
nc = make_test(
f'__D_{t._partition.name}_{t._environ.name}_{basename}',
(BaseTest, ),
{},
methods=[
builtins.run_before('run')(_rfm_distributed_set_run_nodes),
builtins.run_before('compile')(_rfm_distributed_set_build_nodes),
# TODO this hook is not working properly
# builtins.run_after('init')(_rfm_distributed_set_valid_sys_env),
]
)
# We have to set the prefix manually
nc._rfm_dynamic_test_prefix = t.check.prefix
for i in range(nc.num_variants):
# Check if this variant should be instantiated
var_info = copy.deepcopy(nc.get_variant_info(i, recurse=True))
var_info['params'].pop('_rfm_nodelist')
if var_info == original_var_info:
if temporary_registry is None:
temporary_registry = TestRegistry.create(nc, variant_num=i)
else:
temporary_registry.add(nc, variant_num=i)
if temporary_registry:
new_checks = temporary_registry.instantiate_all()
return generate_testcases(new_checks, skip_system_check,
skip_prgenv_check)
else:
return []
|
def distribute_tests(testcases, skip_system_check, skip_prgenv_check,
node_map):
temporary_registry = None
new_checks = []
for t in testcases:
if t.check.is_fixture():
continue
cls = type(t.check)
basename = cls.__name__
original_var_info = cls.get_variant_info(
t.check.variant_num, recurse=True
)
def _rfm_distributed_set_run_nodes(obj):
if not obj.local:
obj.job.pin_nodes = obj._rfm_nodelist
def _rfm_distributed_set_build_nodes(obj):
if not obj.local and not obj.build_locally:
obj.build_job.pin_nodes = obj._rfm_nodelist
# We re-set the valid system and environment in a hook to
# make sure that it will not be overwriten by a parent
# post-init hook
def _rfm_distributed_set_valid_sys_env(obj):
obj.valid_systems = [t._partition.fullname]
obj.valid_prog_environs = [t._environ.name]
class BaseTest(t.check.__class__):
_rfm_nodelist = builtins.parameter(node_map[t._partition.fullname])
valid_systems = [t._partition.fullname]
valid_prog_environs = [t._environ.name]
nc = make_test(
f'__D_{t._partition.name}_{t._environ.name}_{basename}',
(BaseTest, ),
{},
methods=[
builtins.run_before('run')(_rfm_distributed_set_run_nodes),
builtins.run_before('compile')(_rfm_distributed_set_build_nodes),
# TODO this hook is not working properly
# builtins.run_after('init')(_rfm_distributed_set_valid_sys_env),
]
)
# We have to set the prefix manually
nc._rfm_dynamic_test_prefix = t.check.prefix
for i in range(nc.num_variants):
# Check if this variant should be instantiated
var_info = copy.deepcopy(nc.get_variant_info(i, recurse=True))
var_info['params'].pop('_rfm_nodelist')
if var_info == original_var_info:
if temporary_registry is None:
temporary_registry = TestRegistry.create(nc, variant_num=i)
else:
temporary_registry.add(nc, variant_num=i)
if temporary_registry:
new_checks = temporary_registry.instantiate_all()
return generate_testcases(new_checks, skip_system_check,
skip_prgenv_check)
else:
return []
|
25,184 |
def test_property_docstring() -> None:
code = """
class A:
@property
def test(self):
'''Docstring'''
return 42
A.test #@
"""
node = extract_node(code)
inferred = next(node.infer())
assert isinstance(inferred, objects.Property)
assert inferred.doc_node
assert inferred.doc_node.value == "Docstring"
with pytest.warns(DeprecationWarning) as records:
assert inferred.doc == "Docstring"
assert len(records) == 1
|
def test_property_docstring() -> None:
code = """
class A:
@property
def test(self):
'''Docstring'''
return 42
A.test #@
"""
node = extract_node(code)
inferred = next(node.infer())
assert isinstance(inferred, objects.Property)
assert isinstance(inferred.doc_node, nodes.Const)
assert inferred.doc_node.value == "Docstring"
with pytest.warns(DeprecationWarning) as records:
assert inferred.doc == "Docstring"
assert len(records) == 1
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.