id
int64 11
59.9k
| original
stringlengths 33
150k
| modified
stringlengths 37
150k
|
---|---|---|
30,153 |
def validate_consumption(obj, zone_key):
# Data quality check
if obj['consumption'] is not None and obj['consumption'] < 0:
raise ValidationError('%s: consumption has negative value '
'%s' % (zone_key, obj['consumption']))
# Plausibility Check, no more than 500GW
if obj['consumption'] is not None and abs(obj['consumption']) > 500000:
raise ValidationError('%s: consumption is not realistic (>500GW) '
'%s' % (zone_key, obj['consumption']))
validate_reasonable_time(obj, zone_key)
|
def validate_consumption(obj, zone_key):
# Data quality check
if obj['consumption'] is not None and obj['consumption'] < 0:
raise ValidationError('%s: consumption has negative value '
'%s' % (zone_key, obj['consumption']))
# Plausibility Check, no more than 500GW
if abs(obj.get('consumption', 0)) > 500000:
raise ValidationError('%s: consumption is not realistic (>500GW) '
'%s' % (zone_key, obj['consumption']))
validate_reasonable_time(obj, zone_key)
|
39,651 |
def _get_output_dask_ar_meta_for_estimator(model_fn, estimator, input_dask_ar):
"""
Returns the output metadata array
for the model function (predict, transform etc)
by running the appropriate function on dummy data
of shape (1, n_features)
Parameters
----------
model_fun: Model function
_predict, _transform etc
estimator : Estimator
The underlying estimator that is fit.
input_dask_ar: The input dask_array
Returns
-------
metadata: metadata of output dask array
"""
# sklearn fails if input array has size size
# It requires at least 1 sample to run successfully
imput_meta = input_dask_ar._meta
if hasattr(imput_meta, "__array_function__"):
ar = np.zeros(
shape=(1, input_dask_ar.shape[1]),
dtype=input_dask_ar.dtype,
like=imput_meta,
)
elif "scipy.sparse" in type(imput_meta).__module__:
# sparse matrices dont support
# `like` due to non implimented __array_function__
# Refer https:/q/github.com/scipy/scipy/issues/10362
# Note below works for both cupy and scipy sparse matrices
ar = type(imput_meta)((1, input_dask_ar.shape[1]), dtype=input_dask_ar.dtype)
else:
msg = (
"\nYou did not provide metadata, so Dask is running the"
"function on a small dataset to guess output types. "
"It is possible that Dask will guess incorrectly.\n"
"To provide an explicit output types or to silence this message, "
"please provide the `predict_meta`, `predict_proba_meta`,"
"`transform_meta` as appropiate"
)
warnings.warn(msg)
ar = np.zeros(shape=(1, input_dask_ar.shape[1]), dtype=input_dask_ar.dtype)
return model_fn(ar, estimator)
|
def _get_output_dask_ar_meta_for_estimator(model_fn, estimator, input_dask_ar):
"""
Returns the output metadata array
for the model function (predict, transform etc)
by running the appropriate function on dummy data
of shape (1, n_features)
Parameters
----------
model_fun: Model function
_predict, _transform etc
estimator : Estimator
The underlying estimator that is fit.
input_dask_ar: The input dask_array
Returns
-------
metadata: metadata of output dask array
"""
# sklearn fails if input array has size size
# It requires at least 1 sample to run successfully
input_meta = input_dask_ar._meta
if hasattr(imput_meta, "__array_function__"):
ar = np.zeros(
shape=(1, input_dask_ar.shape[1]),
dtype=input_dask_ar.dtype,
like=imput_meta,
)
elif "scipy.sparse" in type(imput_meta).__module__:
# sparse matrices dont support
# `like` due to non implimented __array_function__
# Refer https:/q/github.com/scipy/scipy/issues/10362
# Note below works for both cupy and scipy sparse matrices
ar = type(imput_meta)((1, input_dask_ar.shape[1]), dtype=input_dask_ar.dtype)
else:
msg = (
"\nYou did not provide metadata, so Dask is running the"
"function on a small dataset to guess output types. "
"It is possible that Dask will guess incorrectly.\n"
"To provide an explicit output types or to silence this message, "
"please provide the `predict_meta`, `predict_proba_meta`,"
"`transform_meta` as appropiate"
)
warnings.warn(msg)
ar = np.zeros(shape=(1, input_dask_ar.shape[1]), dtype=input_dask_ar.dtype)
return model_fn(ar, estimator)
|
26,116 |
def test_get_txn_after_bls_key_rotation(looper, txnPoolNodeSet,
sdk_wallet_stewards,
sdk_wallet_trustee,
sdk_wallet_client,
sdk_pool_handle):
check_update_bls_key(node_num=0, saved_multi_sigs_count=4,
looper=looper, txnPoolNodeSet=txnPoolNodeSet,
sdk_wallet_stewards=sdk_wallet_stewards,
sdk_wallet_client=sdk_wallet_client,
sdk_pool_handle=sdk_pool_handle,
pool_refresh=False)
check_update_bls_key(node_num=1, saved_multi_sigs_count=4,
looper=looper, txnPoolNodeSet=txnPoolNodeSet,
sdk_wallet_stewards=sdk_wallet_stewards,
sdk_wallet_client=sdk_wallet_client,
sdk_pool_handle=sdk_pool_handle,
pool_refresh=False)
check_update_bls_key(node_num=2, saved_multi_sigs_count=4,
looper=looper, txnPoolNodeSet=txnPoolNodeSet,
sdk_wallet_stewards=sdk_wallet_stewards,
sdk_wallet_client=sdk_wallet_client,
sdk_pool_handle=sdk_pool_handle,
pool_refresh=False)
check_update_bls_key(node_num=3, saved_multi_sigs_count=4,
looper=looper, txnPoolNodeSet=txnPoolNodeSet,
sdk_wallet_stewards=sdk_wallet_stewards,
sdk_wallet_client=sdk_wallet_client,
sdk_pool_handle=sdk_pool_handle,
pool_refresh=False)
# Stop receiving of commits in a circle, so no nodes will have the equal set of multi signatures
with delay_rules_without_processing(txnPoolNodeSet[0].nodeIbStasher, cDelay(delay=1200, sender_filter=txnPoolNodeSet[3].name)):
with delay_rules_without_processing(txnPoolNodeSet[1].nodeIbStasher, cDelay(delay=1200, sender_filter=txnPoolNodeSet[0].name)):
with delay_rules_without_processing(txnPoolNodeSet[2].nodeIbStasher, cDelay(delay=1200, sender_filter=txnPoolNodeSet[1].name)):
with delay_rules_without_processing(txnPoolNodeSet[3].nodeIbStasher, cDelay(delay=1200, sender_filter=txnPoolNodeSet[2].name)):
did_future = create_and_store_my_did(sdk_wallet_client[0], "{}")
did, verkey = looper.loop.run_until_complete(did_future)
nym_request_future = ledger.build_nym_request(sdk_wallet_trustee[1], did, verkey, None, None)
nym_request = looper.loop.run_until_complete(nym_request_future)
nym_response_future = ledger.sign_and_submit_request(sdk_pool_handle, sdk_wallet_trustee[0], sdk_wallet_trustee[1], nym_request)
looper.loop.run_until_complete(nym_response_future)
get_txn_request_future = ledger.build_get_txn_request(sdk_wallet_client[1], "DOMAIN", 1)
get_txn_request = looper.loop.run_until_complete(get_txn_request_future)
get_txn_response_future = ledger.submit_request(sdk_pool_handle, get_txn_request)
looper.loop.run_until_complete(get_txn_response_future)
|
def test_get_txn_after_bls_key_rotation(looper, txnPoolNodeSet,
sdk_wallet_stewards,
sdk_wallet_trustee,
sdk_wallet_client,
sdk_pool_handle):
check_update_bls_key(node_num=0, saved_multi_sigs_count=4,
looper=looper, txnPoolNodeSet=txnPoolNodeSet,
sdk_wallet_stewards=sdk_wallet_stewards,
sdk_wallet_client=sdk_wallet_client,
sdk_pool_handle=sdk_pool_handle,
pool_refresh=False)
check_update_bls_key(node_num=1, saved_multi_sigs_count=4,
looper=looper, txnPoolNodeSet=txnPoolNodeSet,
sdk_wallet_stewards=sdk_wallet_stewards,
sdk_wallet_client=sdk_wallet_client,
sdk_pool_handle=sdk_pool_handle,
pool_refresh=False)
check_update_bls_key(node_num=2, saved_multi_sigs_count=4,
looper=looper, txnPoolNodeSet=txnPoolNodeSet,
sdk_wallet_stewards=sdk_wallet_stewards,
sdk_wallet_client=sdk_wallet_client,
sdk_pool_handle=sdk_pool_handle,
pool_refresh=False)
check_update_bls_key(node_num=3, saved_multi_sigs_count=4,
looper=looper, txnPoolNodeSet=txnPoolNodeSet,
sdk_wallet_stewards=sdk_wallet_stewards,
sdk_wallet_client=sdk_wallet_client,
sdk_pool_handle=sdk_pool_handle,
pool_refresh=False)
# Stop receiving of commits in a circle, so no nodes will have non-equal set of participants in multi signatures
with delay_rules_without_processing(txnPoolNodeSet[0].nodeIbStasher, cDelay(delay=1200, sender_filter=txnPoolNodeSet[3].name)):
with delay_rules_without_processing(txnPoolNodeSet[1].nodeIbStasher, cDelay(delay=1200, sender_filter=txnPoolNodeSet[0].name)):
with delay_rules_without_processing(txnPoolNodeSet[2].nodeIbStasher, cDelay(delay=1200, sender_filter=txnPoolNodeSet[1].name)):
with delay_rules_without_processing(txnPoolNodeSet[3].nodeIbStasher, cDelay(delay=1200, sender_filter=txnPoolNodeSet[2].name)):
did_future = create_and_store_my_did(sdk_wallet_client[0], "{}")
did, verkey = looper.loop.run_until_complete(did_future)
nym_request_future = ledger.build_nym_request(sdk_wallet_trustee[1], did, verkey, None, None)
nym_request = looper.loop.run_until_complete(nym_request_future)
nym_response_future = ledger.sign_and_submit_request(sdk_pool_handle, sdk_wallet_trustee[0], sdk_wallet_trustee[1], nym_request)
looper.loop.run_until_complete(nym_response_future)
get_txn_request_future = ledger.build_get_txn_request(sdk_wallet_client[1], "DOMAIN", 1)
get_txn_request = looper.loop.run_until_complete(get_txn_request_future)
get_txn_response_future = ledger.submit_request(sdk_pool_handle, get_txn_request)
looper.loop.run_until_complete(get_txn_response_future)
|
24,908 |
def run_symilar(argv: Optional[Sequence[str]] = None):
"""Run symilar
Arguments can be a list of strings normally supplied as arguments on the command line
"""
from pylint.checkers.similar import Run as SimilarRun
SimilarRun(argv or sys.argv[1:])
|
def run_symilar(argv: Optional[Sequence[str]] = None):
"""Run symilar
argv can be a list of strings normally supplied as arguments on the command line
"""
from pylint.checkers.similar import Run as SimilarRun
SimilarRun(argv or sys.argv[1:])
|
44,938 |
def test_flow_runner_uses_default_executor_on_flow_if_present():
t = SuccessTask()
with Flow(name="test", executor=Executor()) as flow:
result = t()
with raise_on_exception():
with pytest.raises(NotImplementedError):
FlowRunner(flow=flow).run(executor=Executor())
|
def test_flow_runner_uses_default_executor_on_flow_if_present():
t = SuccessTask()
with Flow(name="test", executor=Executor()) as flow:
result = t()
with raise_on_exception():
with pytest.raises(NotImplementedError):
FlowRunner(flow=flow).run(executor=None)
|
498 |
def set_request_duration_reporting_threshold(threshold):
"""Decorator to override the default reporting threshold for a view.
If requests to the view take longer than the threshold a Sentry event
will get created.
See `corehq.middleware.LogLongRequestMiddleware` for where the duration is compared
to the threshold.
"""
def decorator(view):
setattr(view, DURATION_REPORTING_THRESHOLD, threshold)
return view
return decorator
|
def set_request_duration_reporting_threshold(seconds):
"""Decorator to override the default reporting threshold for a view.
If requests to the view take longer than the threshold a Sentry event
will get created.
See `corehq.middleware.LogLongRequestMiddleware` for where the duration is compared
to the threshold.
"""
def decorator(view):
setattr(view, DURATION_REPORTING_THRESHOLD, threshold)
return view
return decorator
|
2,371 |
def is_in_interval_range(x, interval):
"""Test whether values of x are in interval range from Interval.
Parameters
----------
x : ndarray
Array whose elements are tested to be in interval range.
interval: Interval
An Interval range.
"""
if interval.low_inclusive:
low = np.greater_equal(x, interval.low)
else:
low = np.greater(x, interval.low)
if not np.all(low):
return False
if interval.high_inclusive:
high = np.less_equal(x, interval.high)
else:
high = np.less(x, interval.high)
# Note: np.all returns numpy.bool_
if np.all(high):
return True
else:
return False
|
def is_in_interval_range(x, interval):
"""Test whether values of x are all in interval range from Interval.
Parameters
----------
x : ndarray
Array whose elements are tested to be in interval range.
interval: Interval
An Interval range.
"""
if interval.low_inclusive:
low = np.greater_equal(x, interval.low)
else:
low = np.greater(x, interval.low)
if not np.all(low):
return False
if interval.high_inclusive:
high = np.less_equal(x, interval.high)
else:
high = np.less(x, interval.high)
# Note: np.all returns numpy.bool_
if np.all(high):
return True
else:
return False
|
24,169 |
def _assert_check(aggregator, rate_metric_count):
# prefix every metric with check name (kyototycoon.)
# no replications, so ignore kyototycoon.replication.delay
for mname in GAUGES:
if mname != 'replication.delay':
aggregator.assert_metric('kyototycoon.{}'.format(mname), tags=TAGS, count=2)
for mname in DB_GAUGES:
aggregator.assert_metric('kyototycoon.{}'.format(mname), tags=TAGS + ['db:0'], count=2)
for mname in ALL_RATES:
aggregator.assert_metric('kyototycoon.{}_per_s'.format(mname), tags=TAGS, count=rate_metric_count)
# service check
aggregator.assert_service_check(KyotoTycoonCheck.SERVICE_CHECK_NAME, status=KyotoTycoonCheck.OK, tags=TAGS, count=2)
aggregator.assert_all_metrics_covered()
|
def _assert_check(aggregator, rate_metric_count=1):
# prefix every metric with check name (kyototycoon.)
# no replications, so ignore kyototycoon.replication.delay
for mname in GAUGES:
if mname != 'replication.delay':
aggregator.assert_metric('kyototycoon.{}'.format(mname), tags=TAGS, count=2)
for mname in DB_GAUGES:
aggregator.assert_metric('kyototycoon.{}'.format(mname), tags=TAGS + ['db:0'], count=2)
for mname in ALL_RATES:
aggregator.assert_metric('kyototycoon.{}_per_s'.format(mname), tags=TAGS, count=rate_metric_count)
# service check
aggregator.assert_service_check(KyotoTycoonCheck.SERVICE_CHECK_NAME, status=KyotoTycoonCheck.OK, tags=TAGS, count=2)
aggregator.assert_all_metrics_covered()
|
41,466 |
def read_tasoc_lightcurve(filename,
flux_column="FLUX_RAW",
quality_bitmask="default"):
"""Returns a `TessLightCurve`.
Parameters
----------
filename : str
Local path or remote url of TASOC light curve FITS file.
flux_column : 'flux_RAW' - this contains the T'DA extracted lightcurve,
with no corrections applied to the raw light curves. Corrected lightcurves
may be a thing in the future as there is a flux_corr column.
quality_bitmask : For now this always none - as no calibration applied
"""
lc = read_generic_lightcurve(filename,
flux_column=flux_column.lower(),
time_format='btjd',
quality_column="QUALITY")
# Filter out poor-quality data
# NOTE: Unfortunately Astropy Table masking does not yet work for columns
# that are Quantity objects, so for now we remove poor-quality data instead
# of masking. Details: https://github.com/astropy/astropy/issues/10119
#quality_mask = TessQualityFlags.create_quality_mask(
# quality_array=lc['dquality'],
# bitmask=quality_bitmask)
#lc = lc[quality_mask]
lc.meta['TARGETID'] = lc.meta.get('TICID')
lc.meta['QUALITY_BITMASK'] = quality_bitmask
#lc.meta['QUALITY_MASK'] = quality_mask
# QLP light curves are normalized by default
lc.meta['NORMALIZED'] = True
return TessLightCurve(data=lc)
|
def read_tasoc_lightcurve(filename, flux_column="FLUX_RAW"):
"""Returns a `TessLightCurve`.
Parameters
----------
filename : str
Local path or remote url of TASOC light curve FITS file.
flux_column : 'flux_RAW' - this contains the T'DA extracted lightcurve,
with no corrections applied to the raw light curves. Corrected lightcurves
may be a thing in the future as there is a flux_corr column.
quality_bitmask : For now this always none - as no calibration applied
"""
lc = read_generic_lightcurve(filename,
flux_column=flux_column.lower(),
time_format='btjd',
quality_column="QUALITY")
# Filter out poor-quality data
# NOTE: Unfortunately Astropy Table masking does not yet work for columns
# that are Quantity objects, so for now we remove poor-quality data instead
# of masking. Details: https://github.com/astropy/astropy/issues/10119
#quality_mask = TessQualityFlags.create_quality_mask(
# quality_array=lc['dquality'],
# bitmask=quality_bitmask)
#lc = lc[quality_mask]
lc.meta['TARGETID'] = lc.meta.get('TICID')
lc.meta['QUALITY_BITMASK'] = quality_bitmask
#lc.meta['QUALITY_MASK'] = quality_mask
# QLP light curves are normalized by default
lc.meta['NORMALIZED'] = True
return TessLightCurve(data=lc)
|
28,312 |
def validate_dynamic_column_data(data: Mapping[str, Any]) -> None:
"""
Validate the given dicts tags and values. Note that None is not a valid
value, and keys should be valid SQLite column names
(i.e. contain only alphanumeric characters and underscores).
Args:
data: the metadata mapping (tags to values)
"""
for tag, val in data.items():
if not tag.isidentifier():
raise KeyError(
f"Tag {tag} is not a valid tag. "
"Use only alphanumeric characters and underscores!"
)
if val is None:
raise ValueError(
f"Tag {tag} has value None. " "That is not a valid metadata value!"
)
|
def validate_dynamic_column_data(data: Mapping[str, Any]) -> None:
"""
Validate the given dicts tags and values. Note that None is not a valid
value, and keys should be valid SQLite column names
(i.e. contain only alphanumeric characters and underscores).
Args:
data: the metadata mapping (tags to values)
"""
for tag, val in data.items():
if not tag.isidentifier():
raise KeyError(
f"Tag {tag} is not a valid tag. "
"Use only alphanumeric characters and underscores!"
)
if val is None:
raise ValueError(
f"Tag {tag} has value None. That is not a valid metadata value!"
)
|
31,056 |
def viper_download(client, args):
file_hash = args.get('file_hash')
if len(file_hash) == 64:
sample_info = client.sample_information(file_hash)
sample = sample_download(file_hash)
if sample.status_code == 200:
filename = sample_info['data']['name']
viper_id = sample_info['data']['id']
mime = sample_info['data']['mime']
file_type = sample_info['data']['type']
size = sample_info['data']['size']
table_object = [{"File Name": filename, "File Hash": file_hash,
"ViperID": viper_id, "MIME": mime, "File Type": file_type, "Size": size}]
context_object = {'Viper': {"Name": filename, "SHA256": file_hash,
"ViperID": viper_id, "MIME": mime, "Type": file_type, "Size": size}}
demisto.results({'ContentsFormat': formats['table'], 'Type': entryTypes['note'],
'Contents': table_object, "EntryContext": context_object})
demisto.results(fileResult(filename, sample.content))
else:
return_error('No valid sample found')
else:
return_error('Hash length is invalid.')
|
def viper_download(client, args):
file_hash = args.get('file_hash')
if len(file_hash) == 64:
sample_info = client.sample_information(file_hash)
sample = sample_download(file_hash)
if sample.status_code == 200:
filename = sample_info['data']['name']
viper_id = sample_info['data']['id']
mime = sample_info['data']['mime']
file_type = sample_info['data']['type']
size = sample_info['data']['size']
table_object = [{"File Name": filename, "File Hash": file_hash,
"ViperID": viper_id, "MIME": mime, "File Type": file_type, "Size": size}]
context_object = {'Viper': {"Name": filename, "SHA256": file_hash,
"ViperID": viper_id, "MIME": mime, "Type": file_type, "Size": size}}
demisto.results({'ContentsFormat': formats['table'], 'Type': entryTypes['note'],
'Contents': table_object, "EntryContext": context_object})
demisto.results(fileResult(filename, sample.content))
else:
raise DemistoException('No valid sample found')
else:
return_error('Hash length is invalid.')
|
20,595 |
def init_envvars() -> None:
"""Initialize environment variables which need to be set early."""
if objects.backend == usertypes.Backend.QtWebEngine:
software_rendering = config.val.qt.force_software_rendering
if software_rendering == 'software-opengl':
os.environ['QT_XCB_FORCE_SOFTWARE_OPENGL'] = '1'
elif software_rendering == 'qt-quick':
os.environ['QT_QUICK_BACKEND'] = 'software'
elif software_rendering == 'chromium':
os.environ['QT_WEBENGINE_DISABLE_NOUVEAU_WORKAROUND'] = '1'
else:
assert objects.backend == usertypes.Backend.QtWebKit, objects.backend
if config.val.qt.force_platform is not None:
os.environ['QT_QPA_PLATFORM'] = config.val.qt.force_platform
if config.val.qt.force_platformtheme is not None:
os.environ['QT_QPA_PLATFORMTHEME'] = config.val.qt.force_platformtheme
if config.val.window.hide_decoration:
os.environ['QT_WAYLAND_DISABLE_WINDOWDECORATION'] = '1'
if config.val.qt.highdpi:
env_var = ('QT_ENABLE_HIGHDPI_SCALING'
if qtutils.version_check('5.14', compiled=False)
else 'QT_AUTO_SCREEN_SCALE_FACTOR')
os.environ[env_var] = '1'
for var in config.val.qt.environ:
val = config.val.qt.environ[var]
if val == 'None':
os.environ[var] = ''
else:
os.environ[var] = val
|
def init_envvars() -> None:
"""Initialize environment variables which need to be set early."""
if objects.backend == usertypes.Backend.QtWebEngine:
software_rendering = config.val.qt.force_software_rendering
if software_rendering == 'software-opengl':
os.environ['QT_XCB_FORCE_SOFTWARE_OPENGL'] = '1'
elif software_rendering == 'qt-quick':
os.environ['QT_QUICK_BACKEND'] = 'software'
elif software_rendering == 'chromium':
os.environ['QT_WEBENGINE_DISABLE_NOUVEAU_WORKAROUND'] = '1'
else:
assert objects.backend == usertypes.Backend.QtWebKit, objects.backend
if config.val.qt.force_platform is not None:
os.environ['QT_QPA_PLATFORM'] = config.val.qt.force_platform
if config.val.qt.force_platformtheme is not None:
os.environ['QT_QPA_PLATFORMTHEME'] = config.val.qt.force_platformtheme
if config.val.window.hide_decoration:
os.environ['QT_WAYLAND_DISABLE_WINDOWDECORATION'] = '1'
if config.val.qt.highdpi:
env_var = ('QT_ENABLE_HIGHDPI_SCALING'
if qtutils.version_check('5.14', compiled=False)
else 'QT_AUTO_SCREEN_SCALE_FACTOR')
os.environ[env_var] = '1'
for var, val in config.val.qt.environ.items():
if val == 'None':
os.environ[var] = ''
else:
os.environ[var] = val
|
24,636 |
def find_ion_saturation_current(
voltage: np.ndarray,
current: np.ndarray,
*,
fit_type: str = "exp_plus_linear",
current_bound: float = None,
voltage_bound: float = None,
) -> Tuple[ffuncs.Linear, ISatExtras]:
"""
Determines the ion-saturation current (:math:`I_{sat}`) for a given
current-voltage (IV) curve obtained from a swept Langmuir probe.
The current collected by a Langmuir probe reaches ion-saturation
when the probe is sufficiently biased so the influx of electrons is
completely repelled leading to only the collection of ions. (For
additional details see the **Notes** section below.)
**Aliases:** `find_isat_`
Parameters
----------
voltage: `numpy.ndarray`
1-D numpy array of monotonically increasing probe biases
(should be in volts).
current: `numpy.ndarray`
1-D numpy array of probe current (should be in amperes)
corresponding to the ``voltage`` array.
fit_type: `str`
The type of curve (:term:`fit-function`) to be fitted to the
Langmuir trace, valid options are listed below.
(DEFAULT ``"exp_plus_linear"``)
+-----------------------+----------------------------------------------------------+
| ``"linear"`` | `~plasmapy.analysis.fit_functions.Linear` |
+-----------------------+----------------------------------------------------------+
| ``"exponential"`` | `~plasmapy.analysis.fit_functions.ExponentialPlusOffset` |
+-----------------------+----------------------------------------------------------+
| ``"exp_plus_linear"`` | `~plasmapy.analysis.fit_functions.ExponentialPlusLinear` |
+-----------------------+----------------------------------------------------------+
current_bound: `float`
A fraction representing a percentile window around the minimum
current for which to collect the points. For example, a value
of ``0.1`` indicates to use all points within 10% of the
minimum current. (DEFAULT ``None``)
|
If neither ``current_bound`` or ``voltage_bound`` are specified,
then the routine will collect indices based on an internal
``current_bound`` setting for the specified ``fit_type``.
+-----------------------+--------------------------------------+
| ``"linear"`` | 0.4 |
+-----------------------+--------------------------------------+
| ``"exponential"`` | 1.0 |
+-----------------------+--------------------------------------+
| ``"exp_plus_linear"`` | 1.0 |
+-----------------------+--------------------------------------+
voltage_bound: `float`
A bias voltage (in volts) that specifies an upper bound used to
collect the points for the curve fit. That is, points that
satisfy ``voltage <= voltage_bound`` are used in the fit.
(DEFAULT ``None``)
Returns
-------
isat: `~plasmapy.analysis.fit_functions.Linear`
A :term:`fit-function` representing the linear portion of the
fitter curve.
extras: `ISatExtras`
Additional information from the curve fit:
* ``extras.fitted_func`` is the :term:`fit-function` (specified
by ``fit_type``) fitted to the IV-curve
* ``extras.rsq`` is the coefficient of determination
(r-squared) value of the ``extras.fitted_func`` to the IV-curve
* ``extras.fitted_indices`` is a `slice` object representing the
points used in the curve fit (i.e.
``(voltage[extras.fitted_indices], current[extras.fitted_indices])``).
Notes
-----
This routine works by:
1. Selecting the points to be used in the fit as determined by
``voltage_bound`` or ``current_bound``.
2. Fitting the selected points with the :term:`fit-function`
specified by ``fit_type``.
3. Extracting the linear component of the fit and returning that as
the ion-saturation current.
This routine opts to return a function representing a linear
ion-saturation current, since, while ideal planar Langmuir probes
reach a steady-state ion-saturation current, real world Langmuir
probes "suffer" from expanding sheaths as the bias voltage
increases. This sheath expansion results the ion-saturation
current also increasing.
"""
rtn_extras = ISatExtras(rsq=None, fitted_func=None, fitted_indices=None)._asdict()
_settings = {
"linear": {
"func": ffuncs.Linear,
"current_bound": 0.4,
},
"exp_plus_linear": {
"func": ffuncs.ExponentialPlusLinear,
"current_bound": 1.0,
},
"exp_plus_offset": {
"func": ffuncs.ExponentialPlusOffset,
"current_bound": 1.0,
},
}
try:
default_current_bound = _settings[fit_type]["current_bound"]
fit_func = _settings[fit_type]["func"]()
rtn_extras["fitted_func"] = fit_func
except KeyError:
raise ValueError(
f"Requested fit '{fit_type}' is not a valid option. Valid options "
f"are {list(_settings.keys())}."
)
# check voltage and current arrays
voltage, current = check_sweep(voltage, current, strip_units=True)
# condition kwargs voltage_bound and current_bound
if voltage_bound is None and current_bound is None:
current_bound = default_current_bound
elif voltage_bound is not None and current_bound is not None:
raise ValueError(
"Both keywords 'current_bound' and `voltage_bound' are specified, "
"use only one."
)
if current_bound is not None:
if not isinstance(current_bound, numbers.Real):
raise TypeError(
f"Keyword 'current_bound' is of type {type(current_bound)}, "
f"expected an int or float."
)
current_min = current.min()
current_bound = (1.0 - current_bound) * current_min
mask = np.where(current <= current_bound)[0]
else: # voltage_bound is not None
if not isinstance(voltage_bound, numbers.Real):
raise TypeError(
f"Keyword 'voltage_bound' is of type {type(voltage_bound)}, "
f"expected an int or float."
)
mask = np.where(voltage <= voltage_bound)[0]
if mask.size == 0:
raise ValueError(
f"The specified bounding keywords, 'voltage_bound' "
f"({voltage_bound}) and 'current_bound' ({current_bound}), "
f"resulted in a fit window containing no points."
)
mask = slice(0, mask[-1] + 1)
rtn_extras["fitted_indices"] = mask
volt_sub = voltage[mask]
curr_sub = current[mask]
fit_func.curve_fit(volt_sub, curr_sub)
rtn_extras["rsq"] = fit_func.rsq
m = getattr(fit_func.params, "m", 0.0)
b = getattr(fit_func.params, "b", 0.0)
m_err = getattr(fit_func.param_errors, "m", 0.0)
b_err = getattr(fit_func.param_errors, "b", 0.0)
isat = ffuncs.Linear(params=(m, b), param_errors=(m_err, b_err))
return isat, ISatExtras(**rtn_extras)
|
def find_ion_saturation_current(
voltage: np.ndarray,
current: np.ndarray,
*,
fit_type: str = "exp_plus_linear",
current_bound: float = None,
voltage_bound: float = None,
) -> Tuple[ffuncs.Linear, ISatExtras]:
"""
Determines the ion-saturation current (:math:`I_{sat}`) for a given
current-voltage (IV) curve obtained from a swept Langmuir probe.
The current collected by a Langmuir probe reaches ion-saturation
when the probe is sufficiently biased so the influx of electrons is
completely repelled leading to only the collection of ions. (For
additional details see the **Notes** section below.)
**Aliases:** `find_isat_`
Parameters
----------
voltage: `numpy.ndarray`
1-D numpy array of monotonically increasing probe biases
(should be in volts).
current: `numpy.ndarray`
1-D numpy array of probe current (should be in amperes)
corresponding to the ``voltage`` array.
fit_type: `str`
The type of curve (:term:`fit-function`) to be fitted to the
Langmuir trace, valid options are listed below.
(DEFAULT ``"exp_plus_linear"``)
+-----------------------+----------------------------------------------------------+
| ``"linear"`` | `~plasmapy.analysis.fit_functions.Linear` |
+-----------------------+----------------------------------------------------------+
| ``"exponential"`` | `~plasmapy.analysis.fit_functions.ExponentialPlusOffset` |
+-----------------------+----------------------------------------------------------+
| ``"exp_plus_linear"`` | `~plasmapy.analysis.fit_functions.ExponentialPlusLinear` |
+-----------------------+----------------------------------------------------------+
current_bound: `float`
A fraction representing a percentile window around the minimum
current for which to collect the points. For example, a value
of ``0.1`` indicates to use all points within 10% of the
minimum current. (DEFAULT ``None``)
|
If neither ``current_bound`` or ``voltage_bound`` are specified,
then the routine will collect indices based on an internal
``current_bound`` setting for the specified ``fit_type``.
+-----------------------+--------------------------------------+
| ``"linear"`` | 0.4 |
+-----------------------+--------------------------------------+
| ``"exponential"`` | 1.0 |
+-----------------------+--------------------------------------+
| ``"exp_plus_linear"`` | 1.0 |
+-----------------------+--------------------------------------+
voltage_bound: `float`
A bias voltage (in volts) that specifies an upper bound used to
collect the points for the curve fit. That is, points that
satisfy ``voltage <= voltage_bound`` are used in the fit.
(DEFAULT ``None``)
Returns
-------
isat: `~plasmapy.analysis.fit_functions.Linear`
A :term:`fit-function` representing the linear portion of the
fitter curve.
extras: `ISatExtras`
Additional information from the curve fit:
* ``extras.fitted_func`` is the :term:`fit-function` (specified
by ``fit_type``) fitted to the IV-curve
* ``extras.rsq`` is the coefficient of determination
(r-squared) value of the ``extras.fitted_func`` to the IV-curve
* ``extras.fitted_indices`` is a `slice` object representing the
points used in the curve fit (i.e.
``(voltage[extras.fitted_indices], current[extras.fitted_indices])``).
Notes
-----
This routine works by:
1. Selecting the points to be used in the fit as determined by
``voltage_bound`` or ``current_bound``.
2. Fitting the selected points with the :term:`fit-function`
specified by ``fit_type``.
3. Extracting the linear component of the fit and returning that as
the ion-saturation current.
This routine opts to return a function representing a linear
ion-saturation current, since, while ideal planar Langmuir probes
reach a steady-state ion-saturation current, real world Langmuir
probes "suffer" from expanding sheaths as the bias voltage
increases. This sheath expansion results the ion-saturation
current also increasing.
"""
rtn_extras = ISatExtras(rsq=None, fitted_func=None, fitted_indices=None)._asdict()
_settings = {
"linear": {
"func": ffuncs.Linear,
"current_bound": 0.4,
},
"exp_plus_linear": {
"func": ffuncs.ExponentialPlusLinear,
"current_bound": 1.0,
},
"exp_plus_offset": {
"func": ffuncs.ExponentialPlusOffset,
"current_bound": 1.0,
},
}
try:
default_current_bound = _settings[fit_type]["current_bound"]
fit_func = _settings[fit_type]["func"]()
rtn_extras["fitted_func"] = fit_func
except KeyError:
raise ValueError(
f"Requested fit '{fit_type}' is not a valid option. Valid options "
f"are {list(_settings.keys())}."
)
# check voltage and current arrays
voltage, current = check_sweep(voltage, current, strip_units=True)
# condition kwargs voltage_bound and current_bound
if voltage_bound is None and current_bound is None:
current_bound = default_current_bound
elif voltage_bound is not None and current_bound is not None:
raise ValueError(
"Both keywords 'current_bound' and 'voltage_bound' are specified, "
"use only one."
)
if current_bound is not None:
if not isinstance(current_bound, numbers.Real):
raise TypeError(
f"Keyword 'current_bound' is of type {type(current_bound)}, "
f"expected an int or float."
)
current_min = current.min()
current_bound = (1.0 - current_bound) * current_min
mask = np.where(current <= current_bound)[0]
else: # voltage_bound is not None
if not isinstance(voltage_bound, numbers.Real):
raise TypeError(
f"Keyword 'voltage_bound' is of type {type(voltage_bound)}, "
f"expected an int or float."
)
mask = np.where(voltage <= voltage_bound)[0]
if mask.size == 0:
raise ValueError(
f"The specified bounding keywords, 'voltage_bound' "
f"({voltage_bound}) and 'current_bound' ({current_bound}), "
f"resulted in a fit window containing no points."
)
mask = slice(0, mask[-1] + 1)
rtn_extras["fitted_indices"] = mask
volt_sub = voltage[mask]
curr_sub = current[mask]
fit_func.curve_fit(volt_sub, curr_sub)
rtn_extras["rsq"] = fit_func.rsq
m = getattr(fit_func.params, "m", 0.0)
b = getattr(fit_func.params, "b", 0.0)
m_err = getattr(fit_func.param_errors, "m", 0.0)
b_err = getattr(fit_func.param_errors, "b", 0.0)
isat = ffuncs.Linear(params=(m, b), param_errors=(m_err, b_err))
return isat, ISatExtras(**rtn_extras)
|
22,436 |
def data_input_to_staging_path_and_source_path(v, invalid_chars=('/',)):
staging_path = v.staging_path(invalid_chars=invalid_chars)
return {
'staging_path': staging_path,
'source_path': data_input_to_path(v),
'metadata_files': [{'staging_path': f"{staging_path}.{mf[0]}", 'source_path': mf[1]} for mf in v.all_metadata_files]
}
|
def data_input_to_staging_path_and_source_path(v, invalid_chars=('/',)):
staging_path = v.get_staging_path(invalid_chars=invalid_chars)
return {
'staging_path': staging_path,
'source_path': data_input_to_path(v),
'metadata_files': [{'staging_path': f"{staging_path}.{mf[0]}", 'source_path': mf[1]} for mf in v.all_metadata_files]
}
|
32,112 |
def create_intel_command(client: Client, args: Dict[str, Any]) -> Dict:
"""
create_intel command: Creates Intel in CTIX
"""
data = {
"ips": args.get("ips", []),
"urls": args.get("urls", []),
"domains": args.get("domains", []),
"files": args.get("files", []),
"emails": args.get("emails", []),
"malwares": args.get("malwares", []),
"threat_actors": args.get("threat_actors", []),
"attack_patterns": args.get("attack_patterns", []),
"title": args.get("title", None),
"description": args.get("description", None),
"confidence": args.get("confidence", None),
"tlp": args.get("tlp", None),
}
create_intel_response = client.create_intel(data)
return create_intel_response
|
def create_intel_command(client: Client, args: Dict[str, Any]) -> Dict:
"""
create_intel command: Creates Intel in CTIX
"""
data = {
"ips": args.get("ips", []),
"urls": args.get("urls", []),
"domains": args.get("domains", []),
"files": args.get("files", []),
"emails": args.get("emails", []),
"malwares": args.get("malwares", []),
"threat_actors": args.get("threat_actors", []),
"attack_patterns": args.get("attack_patterns", []),
"title": args.get("title", None),
"description": args.get("description", None),
"confidence": args.get("confidence", None),
"tlp": args.get("tlp"),
}
create_intel_response = client.create_intel(data)
return create_intel_response
|
49,287 |
def register(connection):
create_deterministic_function = functools.partial(
connection.create_function,
deterministic=True,
)
create_deterministic_function("django_date_extract", 2, _sqlite_datetime_extract)
create_deterministic_function("django_date_trunc", 4, _sqlite_date_trunc)
create_deterministic_function(
"django_datetime_cast_date", 3, _sqlite_datetime_cast_date
)
create_deterministic_function(
"django_datetime_cast_time", 3, _sqlite_datetime_cast_time
)
create_deterministic_function(
"django_datetime_extract", 4, _sqlite_datetime_extract
)
create_deterministic_function("django_datetime_trunc", 4, _sqlite_datetime_trunc)
create_deterministic_function("django_time_extract", 2, _sqlite_time_extract)
create_deterministic_function("django_time_trunc", 4, _sqlite_time_trunc)
create_deterministic_function("django_time_diff", 2, _sqlite_time_diff)
create_deterministic_function("django_timestamp_diff", 2, _sqlite_timestamp_diff)
create_deterministic_function("django_format_dtdelta", 3, _sqlite_format_dtdelta)
create_deterministic_function("regexp", 2, _sqlite_regexp)
create_deterministic_function("BITXOR", 2, _sqlite_bitxor)
create_deterministic_function("COT", 1, _sqlite_cot)
create_deterministic_function("LPAD", 3, _sqlite_lpad)
create_deterministic_function("MD5", 1, _sqlite_md5)
create_deterministic_function("REPEAT", 2, _sqlite_repeat)
create_deterministic_function("REVERSE", 1, _sqlite_reverse)
create_deterministic_function("RPAD", 3, _sqlite_rpad)
create_deterministic_function("SHA1", 1, _sqlite_sha1)
create_deterministic_function("SHA224", 1, _sqlite_sha224)
create_deterministic_function("SHA256", 1, _sqlite_sha256)
create_deterministic_function("SHA384", 1, _sqlite_sha384)
create_deterministic_function("SHA512", 1, _sqlite_sha512)
create_deterministic_function("SIGN", 1, _sqlite_sign)
# Don't use the built-in RANDOM() function because it returns a value
# in the range [-1 * 2^63, 2^63 - 1] instead of [0, 1).
connection.create_function("RAND", 0, random.random)
connection.create_aggregate("STDDEV_POP", 1, StdDevPop)
connection.create_aggregate("STDDEV_SAMP", 1, StdDevSamp)
connection.create_aggregate("VAR_POP", 1, VarPop)
connection.create_aggregate("VAR_SAMP", 1, VarSamp)
# Some math functions are enabled by default in SQLite 3.35+.
sql = "SELECT 1 FROM pragma_compile_options() WHERE compile_options = ? LIMIT 1"
if connection.execute(sql, ["ENABLE_MATH_FUNCTIONS"]).fetchone() is None:
create_deterministic_function("ACOS", 1, _sqlite_acos)
create_deterministic_function("ASIN", 1, _sqlite_asin)
create_deterministic_function("ATAN", 1, _sqlite_atan)
create_deterministic_function("ATAN2", 2, _sqlite_atan2)
create_deterministic_function("CEILING", 1, _sqlite_ceiling)
create_deterministic_function("COS", 1, _sqlite_cos)
create_deterministic_function("DEGREES", 1, _sqlite_degrees)
create_deterministic_function("EXP", 1, _sqlite_exp)
create_deterministic_function("FLOOR", 1, _sqlite_floor)
create_deterministic_function("LN", 1, _sqlite_ln)
create_deterministic_function("LOG", 2, _sqlite_log)
create_deterministic_function("MOD", 2, _sqlite_mod)
create_deterministic_function("PI", 0, _sqlite_pi)
create_deterministic_function("POWER", 2, _sqlite_power)
create_deterministic_function("RADIANS", 1, _sqlite_radians)
create_deterministic_function("SIN", 1, _sqlite_sin)
create_deterministic_function("SQRT", 1, _sqlite_sqrt)
create_deterministic_function("TAN", 1, _sqlite_tan)
|
def register(connection):
create_deterministic_function = functools.partial(
connection.create_function,
deterministic=True,
)
create_deterministic_function("django_date_extract", 2, _sqlite_datetime_extract)
create_deterministic_function("django_date_trunc", 4, _sqlite_date_trunc)
create_deterministic_function(
"django_datetime_cast_date", 3, _sqlite_datetime_cast_date
)
create_deterministic_function(
"django_datetime_cast_time", 3, _sqlite_datetime_cast_time
)
create_deterministic_function(
"django_datetime_extract", 4, _sqlite_datetime_extract
)
create_deterministic_function("django_datetime_trunc", 4, _sqlite_datetime_trunc)
create_deterministic_function("django_time_extract", 2, _sqlite_time_extract)
create_deterministic_function("django_time_trunc", 4, _sqlite_time_trunc)
create_deterministic_function("django_time_diff", 2, _sqlite_time_diff)
create_deterministic_function("django_timestamp_diff", 2, _sqlite_timestamp_diff)
create_deterministic_function("django_format_dtdelta", 3, _sqlite_format_dtdelta)
create_deterministic_function("regexp", 2, _sqlite_regexp)
create_deterministic_function("BITXOR", 2, _sqlite_bitxor)
create_deterministic_function("COT", 1, _sqlite_cot)
create_deterministic_function("LPAD", 3, _sqlite_lpad)
create_deterministic_function("MD5", 1, _sqlite_md5)
create_deterministic_function("REPEAT", 2, _sqlite_repeat)
create_deterministic_function("REVERSE", 1, _sqlite_reverse)
create_deterministic_function("RPAD", 3, _sqlite_rpad)
create_deterministic_function("SHA1", 1, _sqlite_sha1)
create_deterministic_function("SHA224", 1, _sqlite_sha224)
create_deterministic_function("SHA256", 1, _sqlite_sha256)
create_deterministic_function("SHA384", 1, _sqlite_sha384)
create_deterministic_function("SHA512", 1, _sqlite_sha512)
create_deterministic_function("SIGN", 1, _sqlite_sign)
# Don't use the built-in RANDOM() function because it returns a value
# in the range [-1 * 2^63, 2^63 - 1] instead of [0, 1).
connection.create_function("RAND", 0, random.random)
connection.create_aggregate("STDDEV_POP", 1, StdDevPop)
connection.create_aggregate("STDDEV_SAMP", 1, StdDevSamp)
connection.create_aggregate("VAR_POP", 1, VarPop)
connection.create_aggregate("VAR_SAMP", 1, VarSamp)
# Some math functions are enabled by default in SQLite 3.35+.
sql = "SELECT 1 FROM pragma_compile_options() WHERE compile_options = 'ENABLE_MATH_FUNCTIONS' LIMIT 1"
if connection.execute(sql).fetchone() is None:
create_deterministic_function("ACOS", 1, _sqlite_acos)
create_deterministic_function("ASIN", 1, _sqlite_asin)
create_deterministic_function("ATAN", 1, _sqlite_atan)
create_deterministic_function("ATAN2", 2, _sqlite_atan2)
create_deterministic_function("CEILING", 1, _sqlite_ceiling)
create_deterministic_function("COS", 1, _sqlite_cos)
create_deterministic_function("DEGREES", 1, _sqlite_degrees)
create_deterministic_function("EXP", 1, _sqlite_exp)
create_deterministic_function("FLOOR", 1, _sqlite_floor)
create_deterministic_function("LN", 1, _sqlite_ln)
create_deterministic_function("LOG", 2, _sqlite_log)
create_deterministic_function("MOD", 2, _sqlite_mod)
create_deterministic_function("PI", 0, _sqlite_pi)
create_deterministic_function("POWER", 2, _sqlite_power)
create_deterministic_function("RADIANS", 1, _sqlite_radians)
create_deterministic_function("SIN", 1, _sqlite_sin)
create_deterministic_function("SQRT", 1, _sqlite_sqrt)
create_deterministic_function("TAN", 1, _sqlite_tan)
|
29,311 |
def apply_change_list(story_id, change_list):
"""Applies a changelist to a story and returns the result.
Args:
story_id: str. ID of the given story.
change_list: list(StoryChange). A change list to be applied to the given
story.
Returns:
Story, list(str), list(str). The resulting story domain object, the
exploration IDs removed from story and the exploration IDs added to
the story.
Raises:
Exception. The elements in change_list is not of domain object type.
"""
story = story_fetchers.get_story_by_id(story_id)
exp_ids_in_old_story = story.story_contents.get_all_linked_exp_ids()
try:
for change in change_list:
if not isinstance(change, story_domain.StoryChange):
raise Exception('Expected change to be of type StoryChange')
if change.cmd == story_domain.CMD_ADD_STORY_NODE:
story.add_node(change.node_id, change.title)
elif change.cmd == story_domain.CMD_DELETE_STORY_NODE:
story.delete_node(change.node_id)
elif (change.cmd ==
story_domain.CMD_UPDATE_STORY_NODE_OUTLINE_STATUS):
if change.new_value:
story.mark_node_outline_as_finalized(change.node_id)
else:
story.mark_node_outline_as_unfinalized(change.node_id)
elif change.cmd == story_domain.CMD_UPDATE_STORY_NODE_PROPERTY:
if (change.property_name ==
story_domain.STORY_NODE_PROPERTY_OUTLINE):
story.update_node_outline(change.node_id, change.new_value)
elif (change.property_name ==
story_domain.STORY_NODE_PROPERTY_TITLE):
story.update_node_title(change.node_id, change.new_value)
elif (change.property_name ==
story_domain.STORY_NODE_PROPERTY_DESCRIPTION):
story.update_node_description(
change.node_id, change.new_value)
elif (change.property_name ==
story_domain.STORY_NODE_PROPERTY_THUMBNAIL_FILENAME):
story.update_node_thumbnail_filename(
change.node_id, change.new_value)
elif (change.property_name ==
story_domain.STORY_NODE_PROPERTY_THUMBNAIL_BG_COLOR):
story.update_node_thumbnail_bg_color(
change.node_id, change.new_value)
elif (change.property_name ==
story_domain.STORY_NODE_PROPERTY_ACQUIRED_SKILL_IDS):
story.update_node_acquired_skill_ids(
change.node_id, change.new_value)
elif (change.property_name ==
story_domain.STORY_NODE_PROPERTY_PREREQUISITE_SKILL_IDS):
story.update_node_prerequisite_skill_ids(
change.node_id, change.new_value)
elif (change.property_name ==
story_domain.STORY_NODE_PROPERTY_DESTINATION_NODE_IDS):
story.update_node_destination_node_ids(
change.node_id, change.new_value)
elif (change.property_name ==
story_domain.STORY_NODE_PROPERTY_EXPLORATION_ID):
story.update_node_exploration_id(
change.node_id, change.new_value)
elif change.cmd == story_domain.CMD_UPDATE_STORY_PROPERTY:
if (change.property_name ==
story_domain.STORY_PROPERTY_TITLE):
story.update_title(change.new_value)
elif (change.property_name ==
story_domain.STORY_PROPERTY_THUMBNAIL_FILENAME):
story.update_thumbnail_filename(change.new_value)
elif (change.property_name ==
story_domain.STORY_PROPERTY_THUMBNAIL_BG_COLOR):
story.update_thumbnail_bg_color(change.new_value)
elif (change.property_name ==
story_domain.STORY_PROPERTY_DESCRIPTION):
story.update_description(change.new_value)
elif (change.property_name ==
story_domain.STORY_PROPERTY_NOTES):
story.update_notes(change.new_value)
elif (change.property_name ==
story_domain.STORY_PROPERTY_LANGUAGE_CODE):
story.update_language_code(change.new_value)
elif (change.property_name ==
story_domain.STORY_PROPERTY_URL_FRAGMENT):
story.update_url_fragment(change.new_value)
elif (change.property_name ==
story_domain.STORY_PROPERTY_META_TAG_CONTENT):
story.update_meta_tag_content(change.new_value)
elif change.cmd == story_domain.CMD_UPDATE_STORY_CONTENTS_PROPERTY:
if (change.property_name ==
story_domain.INITIAL_NODE_ID):
story.update_initial_node(change.new_value)
if change.property_name == story_domain.NODE:
story.rearrange_node_in_story(
change.old_value, change.new_value)
elif (
change.cmd ==
story_domain.CMD_MIGRATE_SCHEMA_TO_LATEST_VERSION):
# Loading the story model from the datastore into a
# Story domain object automatically converts it to use the
# latest schema version. As a result, simply resaving the
# story is sufficient to apply the schema migration.
continue
exp_ids_in_modified_story = (
story.story_contents.get_all_linked_exp_ids())
exp_ids_removed_from_story = list(
set(exp_ids_in_old_story).difference(exp_ids_in_modified_story))
exp_ids_added_to_story = list(
set(exp_ids_in_modified_story).difference(exp_ids_in_old_story))
return story, exp_ids_removed_from_story, exp_ids_added_to_story
except Exception as e:
logging.error(
'%s %s %s %s' % (
e.__class__.__name__, e, story_id, change_list)
)
raise e
|
def apply_change_list(story_id, change_list):
"""Applies a changelist to a story and returns the result.
Args:
story_id: str. ID of the given story.
change_list: list(StoryChange). A change list to be applied to the given
story.
Returns:
Story, list(str), list(str). The resulting story domain object, the
exploration IDs removed from story and the exploration IDs added to
the story.
Raises:
Exception. The elements in change_list are not of a domain object type.
"""
story = story_fetchers.get_story_by_id(story_id)
exp_ids_in_old_story = story.story_contents.get_all_linked_exp_ids()
try:
for change in change_list:
if not isinstance(change, story_domain.StoryChange):
raise Exception('Expected change to be of type StoryChange')
if change.cmd == story_domain.CMD_ADD_STORY_NODE:
story.add_node(change.node_id, change.title)
elif change.cmd == story_domain.CMD_DELETE_STORY_NODE:
story.delete_node(change.node_id)
elif (change.cmd ==
story_domain.CMD_UPDATE_STORY_NODE_OUTLINE_STATUS):
if change.new_value:
story.mark_node_outline_as_finalized(change.node_id)
else:
story.mark_node_outline_as_unfinalized(change.node_id)
elif change.cmd == story_domain.CMD_UPDATE_STORY_NODE_PROPERTY:
if (change.property_name ==
story_domain.STORY_NODE_PROPERTY_OUTLINE):
story.update_node_outline(change.node_id, change.new_value)
elif (change.property_name ==
story_domain.STORY_NODE_PROPERTY_TITLE):
story.update_node_title(change.node_id, change.new_value)
elif (change.property_name ==
story_domain.STORY_NODE_PROPERTY_DESCRIPTION):
story.update_node_description(
change.node_id, change.new_value)
elif (change.property_name ==
story_domain.STORY_NODE_PROPERTY_THUMBNAIL_FILENAME):
story.update_node_thumbnail_filename(
change.node_id, change.new_value)
elif (change.property_name ==
story_domain.STORY_NODE_PROPERTY_THUMBNAIL_BG_COLOR):
story.update_node_thumbnail_bg_color(
change.node_id, change.new_value)
elif (change.property_name ==
story_domain.STORY_NODE_PROPERTY_ACQUIRED_SKILL_IDS):
story.update_node_acquired_skill_ids(
change.node_id, change.new_value)
elif (change.property_name ==
story_domain.STORY_NODE_PROPERTY_PREREQUISITE_SKILL_IDS):
story.update_node_prerequisite_skill_ids(
change.node_id, change.new_value)
elif (change.property_name ==
story_domain.STORY_NODE_PROPERTY_DESTINATION_NODE_IDS):
story.update_node_destination_node_ids(
change.node_id, change.new_value)
elif (change.property_name ==
story_domain.STORY_NODE_PROPERTY_EXPLORATION_ID):
story.update_node_exploration_id(
change.node_id, change.new_value)
elif change.cmd == story_domain.CMD_UPDATE_STORY_PROPERTY:
if (change.property_name ==
story_domain.STORY_PROPERTY_TITLE):
story.update_title(change.new_value)
elif (change.property_name ==
story_domain.STORY_PROPERTY_THUMBNAIL_FILENAME):
story.update_thumbnail_filename(change.new_value)
elif (change.property_name ==
story_domain.STORY_PROPERTY_THUMBNAIL_BG_COLOR):
story.update_thumbnail_bg_color(change.new_value)
elif (change.property_name ==
story_domain.STORY_PROPERTY_DESCRIPTION):
story.update_description(change.new_value)
elif (change.property_name ==
story_domain.STORY_PROPERTY_NOTES):
story.update_notes(change.new_value)
elif (change.property_name ==
story_domain.STORY_PROPERTY_LANGUAGE_CODE):
story.update_language_code(change.new_value)
elif (change.property_name ==
story_domain.STORY_PROPERTY_URL_FRAGMENT):
story.update_url_fragment(change.new_value)
elif (change.property_name ==
story_domain.STORY_PROPERTY_META_TAG_CONTENT):
story.update_meta_tag_content(change.new_value)
elif change.cmd == story_domain.CMD_UPDATE_STORY_CONTENTS_PROPERTY:
if (change.property_name ==
story_domain.INITIAL_NODE_ID):
story.update_initial_node(change.new_value)
if change.property_name == story_domain.NODE:
story.rearrange_node_in_story(
change.old_value, change.new_value)
elif (
change.cmd ==
story_domain.CMD_MIGRATE_SCHEMA_TO_LATEST_VERSION):
# Loading the story model from the datastore into a
# Story domain object automatically converts it to use the
# latest schema version. As a result, simply resaving the
# story is sufficient to apply the schema migration.
continue
exp_ids_in_modified_story = (
story.story_contents.get_all_linked_exp_ids())
exp_ids_removed_from_story = list(
set(exp_ids_in_old_story).difference(exp_ids_in_modified_story))
exp_ids_added_to_story = list(
set(exp_ids_in_modified_story).difference(exp_ids_in_old_story))
return story, exp_ids_removed_from_story, exp_ids_added_to_story
except Exception as e:
logging.error(
'%s %s %s %s' % (
e.__class__.__name__, e, story_id, change_list)
)
raise e
|
29,059 |
def add_fingerprinters(
agent_configuration: AgentConfiguration, fingerprinters: Sequence[PluginConfiguration] = []
) -> AgentConfiguration:
network_scan_configuration = replace(
agent_configuration.propagation.network_scan, fingerprinters=fingerprinters
)
return replace_network_scan_configuration(agent_configuration, network_scan_configuration)
|
def add_fingerprinters(
agent_configuration: AgentConfiguration, fingerprinters: Sequence[PluginConfiguration]
) -> AgentConfiguration:
network_scan_configuration = replace(
agent_configuration.propagation.network_scan, fingerprinters=fingerprinters
)
return replace_network_scan_configuration(agent_configuration, network_scan_configuration)
|
2,658 |
def test_gaussian_mixture_single_component_stable():
"""
Non-regression test for #23032 ensuring 1-component GM works on only a
few samples.
"""
rng = np.random.RandomState(0)
X = rng.multivariate_normal(np.zeros(2), np.identity(2), size=3)
gm = GaussianMixture(n_components=1)
gm.fit(X)
print(gm.sample()) # This will throw a ValueError
|
def test_gaussian_mixture_single_component_stable():
"""
Non-regression test for #23032 ensuring 1-component GM works on only a
few samples.
"""
rng = np.random.RandomState(0)
X = rng.multivariate_normal(np.zeros(2), np.identity(2), size=3)
gm = GaussianMixture(n_components=1)
gm.fit(X).sample()
|
55,032 |
def test_analytic_deprecation():
"""Tests if the kwarg `analytic` is used and displays error message.
"""
msg = "The attribute `analytic` has been replaced by `shots=None`. "
msg += "Please use `shots=None` instead of `analytic=True`."
with pytest.raises(
DeviceError,
match=msg,
):
qml.device("default.gaussian", wires=1, shots=1, analytic=True)
|
def test_analytic_deprecation():
"""Tests if the kwarg `analytic` is used and displays error message.
"""
msg = "The analytic argument has been replaced by shots=None. "
msg += "Please use shots=None instead of analytic=True."
with pytest.raises(
DeviceError,
match=msg,
):
qml.device("default.gaussian", wires=1, shots=1, analytic=True)
|
12,267 |
def _tokenize_line(command):
'''
Tokenize a single line of QASM code.
Parameters
----------
command : str
one line of QASM code to be broken into "tokens".
Returns
-------
tokens : list of str
the tokens corresponding to the qasm line taken as input.
'''
# for gates without arguments
if "(" not in command:
tokens = list(chain(*[a.split() for a in command.split(",")]))
tokens = [token.strip() for token in tokens]
# for classically controlled gates
elif re.match(r"\s*if\s*\(", command):
groups = re.match(r"\s*if\s*\((.*)\)\s*(.*)\s+\((.*)\)(.*)", command)
# for classically controlled gates with arguments
if groups:
tokens = ["if", "(", groups.group(1), ")"]
tokens_gate = _tokenize_line("{} ({}) {}".format(groups.group(2),
groups.group(3),
groups.group(4)))
tokens += tokens_gate
# for classically controlled gates without arguments
else:
groups = re.match(r"\s*if\s*\((.*)\)(.*)", command)
tokens = ["if", "(", groups.group(1), ")"]
tokens_gate = _tokenize_line(groups.group(2))
tokens += tokens_gate
tokens = [token.strip() for token in tokens]
# for gates with arguments
else:
groups = re.match(r"(^.*?)\((.*)\)(.*)", command)
if not groups:
raise SyntaxError("QASM: Incorrect bracket formatting")
tokens = groups.group(1).split()
tokens.append("(")
tokens += groups.group(2).split(",")
tokens.append(")")
tokens += groups.group(3).split(",")
tokens = [token.strip() for token in tokens]
return tokens
|
def _tokenize_line(command):
'''
Tokenize a single line of QASM code.
Parameters
----------
command : str
One line of QASM code to be broken into "tokens".
Returns
-------
tokens : list of str
the tokens corresponding to the qasm line taken as input.
'''
# for gates without arguments
if "(" not in command:
tokens = list(chain(*[a.split() for a in command.split(",")]))
tokens = [token.strip() for token in tokens]
# for classically controlled gates
elif re.match(r"\s*if\s*\(", command):
groups = re.match(r"\s*if\s*\((.*)\)\s*(.*)\s+\((.*)\)(.*)", command)
# for classically controlled gates with arguments
if groups:
tokens = ["if", "(", groups.group(1), ")"]
tokens_gate = _tokenize_line("{} ({}) {}".format(groups.group(2),
groups.group(3),
groups.group(4)))
tokens += tokens_gate
# for classically controlled gates without arguments
else:
groups = re.match(r"\s*if\s*\((.*)\)(.*)", command)
tokens = ["if", "(", groups.group(1), ")"]
tokens_gate = _tokenize_line(groups.group(2))
tokens += tokens_gate
tokens = [token.strip() for token in tokens]
# for gates with arguments
else:
groups = re.match(r"(^.*?)\((.*)\)(.*)", command)
if not groups:
raise SyntaxError("QASM: Incorrect bracket formatting")
tokens = groups.group(1).split()
tokens.append("(")
tokens += groups.group(2).split(",")
tokens.append(")")
tokens += groups.group(3).split(",")
tokens = [token.strip() for token in tokens]
return tokens
|
29,508 |
def validate_topic(topic: str) -> str:
assert topic is not None
topic = topic.strip()
if topic == "":
raise JsonableError(_("Topic can't be empty"))
for character in topic:
unicodeCategory = unicodedata.category(character)
if unicodeCategory == "Cc" or unicodeCategory == "Cs" or unicodeCategory == "Cn":
raise JsonableError(_("Invalid characters in topic!"))
return topic
|
def validate_topic(topic: str) -> str:
assert topic is not None
topic = topic.strip()
if topic == "":
raise JsonableError(_("Topic can't be empty"))
for character in topic:
unicodeCategory = unicodedata.category(character)
if unicodeCategory in ["Cc", "Cs", "Cn"]:
raise JsonableError(_("Invalid characters in topic!"))
return topic
|
34,381 |
def compare_nlu(
configs: List[Text],
data: TrainingData,
exclusion_percentages: List[int],
f_score_results: Dict[Text, Any],
model_names: List[Text],
output: Text,
runs: int,
) -> List[int]:
"""
Trains and compares multiple NLU models.
For each run and exclusion percentage a model per config file is trained.
Thereby, the model is trained only on the current percentage of training data.
Afterwards, the model is tested on the complete test data of that run.
All results are stored in the provided output directory.
Args:
configs: config files needed for training
data: training data
exclusion_percentages: percentages of training data to exclude during comparison
f_score_results: dictionary of model name to f-score results per run
model_names: names of the models to train
output: the output directory
runs: number of comparison runs
Returns: training examples per run
"""
from rasa.train import train_nlu
training_examples_per_run = []
for run in range(runs):
logger.info("Beginning comparison run {}/{}".format(run + 1, runs))
run_path = os.path.join(output, "run_{}".format(run + 1))
io_utils.create_path(run_path)
test_path = os.path.join(run_path, TEST_DATA_FILE)
io_utils.create_path(test_path)
train, test = data.train_test_split()
write_to_file(test_path, test.nlu_as_markdown())
training_examples_per_run = []
for percentage in exclusion_percentages:
percent_string = f"{percentage}%_exclusion"
_, train = train.train_test_split(percentage / 100)
training_examples_per_run.append(len(train.training_examples))
model_output_path = os.path.join(run_path, percent_string)
train_split_path = os.path.join(model_output_path, "train")
train_nlu_split_path = os.path.join(
model_output_path, "train", TRAIN_DATA_FILE
)
train_nlg_split_path = os.path.join(
model_output_path, "train", NLG_DATA_FILE
)
io_utils.create_path(train_nlu_split_path)
write_to_file(train_nlu_split_path, train.nlu_as_markdown())
write_to_file(train_nlg_split_path, train.nlg_as_markdown())
for nlu_config, model_name in zip(configs, model_names):
logger.info(
"Evaluating configuration '{}' with {} training data.".format(
model_name, percent_string
)
)
try:
model_path = train_nlu(
nlu_config,
train_split_path,
model_output_path,
fixed_model_name=model_name,
)
except Exception as e:
logger.warning(f"Training model '{model_name}' failed. Error: {e}")
f_score_results[model_name][run].append(0.0)
continue
model_path = os.path.join(get_model(model_path), "nlu")
output_path = os.path.join(model_output_path, f"{model_name}_report")
result = run_evaluation(
test_path, model_path, output_directory=output_path, errors=True
)
f1 = result["intent_evaluation"]["f1_score"]
f_score_results[model_name][run].append(f1)
return training_examples_per_run
|
def compare_nlu(
configs: List[Text],
data: TrainingData,
exclusion_percentages: List[int],
f_score_results: Dict[Text, Any],
model_names: List[Text],
output: Text,
runs: int,
) -> List[int]:
"""
Trains and compares multiple NLU models.
For each run and exclusion percentage a model per config file is trained.
Thereby, the model is trained only on the current percentage of training data.
Afterwards, the model is tested on the complete test data of that run.
All results are stored in the provided output directory.
Args:
configs: config files needed for training
data: training data
exclusion_percentages: percentages of training data to exclude during comparison
f_score_results: dictionary of model name to f-score results per run
model_names: names of the models to train
output: the output directory
runs: number of comparison runs
Returns: training examples per run
"""
from rasa.train import train_nlu
training_examples_per_run = []
for run in range(runs):
logger.info("Beginning comparison run {}/{}".format(run + 1, runs))
run_path = os.path.join(output, "run_{}".format(run + 1))
io_utils.create_path(run_path)
test_path = os.path.join(run_path, TEST_DATA_FILE)
io_utils.create_path(test_path)
train, test = data.train_test_split()
write_to_file(test_path, test.nlu_as_markdown())
training_examples_per_run = []
for percentage in exclusion_percentages:
percent_string = f"{percentage}%_exclusion"
_, train = train.train_test_split(percentage / 100)
training_examples_per_run.append(len(train.training_examples))
model_output_path = os.path.join(run_path, percent_string)
train_split_path = os.path.join(model_output_path, "train")
train_nlu_split_path = os.path.join(
model_output_path, "train", TRAIN_DATA_FILE
)
train_nlg_split_path = os.path.join(
train_split_path, NLG_DATA_FILE
)
io_utils.create_path(train_nlu_split_path)
write_to_file(train_nlu_split_path, train.nlu_as_markdown())
write_to_file(train_nlg_split_path, train.nlg_as_markdown())
for nlu_config, model_name in zip(configs, model_names):
logger.info(
"Evaluating configuration '{}' with {} training data.".format(
model_name, percent_string
)
)
try:
model_path = train_nlu(
nlu_config,
train_split_path,
model_output_path,
fixed_model_name=model_name,
)
except Exception as e:
logger.warning(f"Training model '{model_name}' failed. Error: {e}")
f_score_results[model_name][run].append(0.0)
continue
model_path = os.path.join(get_model(model_path), "nlu")
output_path = os.path.join(model_output_path, f"{model_name}_report")
result = run_evaluation(
test_path, model_path, output_directory=output_path, errors=True
)
f1 = result["intent_evaluation"]["f1_score"]
f_score_results[model_name][run].append(f1)
return training_examples_per_run
|
27,440 |
def IERS_A_in_cache():
"""
Check if the IERS Bulletin A table is locally cached.
"""
urls = (conf.iers_auto_url, conf.iers_auto_url_mirror)
for url_key in urls:
# The below code which accesses ``urlmapfn`` is stolen from
# astropy.utils.data.download_file()
try:
dldir, urlmapfn = _get_download_cache_locs()
except (IOError, OSError) as e:
msg = 'Remote data cache could not be accessed due to '
estr = '' if len(e.args) < 1 else (': ' + str(e))
warnings.warn(CacheMissingWarning(msg + e.__class__.__name__ + estr))
else:
with _open_shelve(urlmapfn, True) as url2hash:
# TODO: try to figure out how to test this in the unicode case
if str(url_key) in url2hash:
return True
return False
|
def IERS_A_in_cache():
"""
Check if the IERS Bulletin A table is locally cached.
"""
urls = (iers.IERS_A_URL, iers.IERS_A_URL_MIRROR)
for url_key in urls:
# The below code which accesses ``urlmapfn`` is stolen from
# astropy.utils.data.download_file()
try:
dldir, urlmapfn = _get_download_cache_locs()
except (IOError, OSError) as e:
msg = 'Remote data cache could not be accessed due to '
estr = '' if len(e.args) < 1 else (': ' + str(e))
warnings.warn(CacheMissingWarning(msg + e.__class__.__name__ + estr))
else:
with _open_shelve(urlmapfn, True) as url2hash:
# TODO: try to figure out how to test this in the unicode case
if str(url_key) in url2hash:
return True
return False
|
30,962 |
def main():
value = demisto.args()['left']
if type(value) is list:
value = demisto.args()['left'][0]
relative_date = demisto.args()['right']
return_results(check_date(value, relative_date))
|
def main():
value = demisto.args()['left']
if type(value) is list:
value = demisto.args().get('left')[0]
relative_date = demisto.args()['right']
return_results(check_date(value, relative_date))
|
54,178 |
def samstat(bam, nth=1, out_dir=''):
prefix = os.path.join(out_dir,
os.path.basename(strip_ext_bam(bam)))
samstat_qc = '{}.samstats.qc'.format(prefix)
cmd = 'samtools sort -n {bam} -T {prefix}.tmp -O sam | '
cmd += 'SAMstats --sorted_sam_file - --outf {samstat_qc}'
cmd = cmd.format(
bam=bam,
prefix=prefix,
samstat_qc=samstat_qc)
run_shell_cmd(cmd)
return samstat_qc
|
def samstat(bam, nth=1, out_dir=''):
prefix = os.path.join(out_dir,
os.path.basename(strip_ext_bam(bam)))
samstat_qc = '{}.samstats.qc'.format(prefix)
cmd = 'samtools sort -n {bam} -T {prefix}.tmp -O sam | '
cmds = [
'samtools sort -n {bam} -T {prefix}.tmp -O sam',
'SAMstats --sorted_sam_file - --outf {samstat_qc}',
]
cmd = ' | '.join(cmds)
cmd = cmd.format(
bam=bam,
prefix=prefix,
samstat_qc=samstat_qc)
run_shell_cmd(cmd)
return samstat_qc
|
34,211 |
def test_train_core_compare(run_in_default_project):
temp_dir = os.getcwd()
write_yaml_file(
{
"language": "en",
"pipeline": "supervised_embeddings",
"policies": [{"name": "KerasPolicy"}],
},
"config_1.yml",
)
write_yaml_file(
{
"language": "en",
"pipeline": "supervised_embeddings",
"policies": [{"name": "MemoizationPolicy"}],
},
"config_2.yml",
)
run_in_default_project(
"train",
"core",
"-c",
"config_1.yml",
"config_2.yml",
"--stories",
"data/stories.md",
"--out",
"core_comparison_results",
"--runs",
"2",
"--percentages",
"25",
"75",
"--augmentation",
"5",
)
assert os.path.exists(os.path.join(temp_dir, "core_comparison_results"))
sub_dirs = list_subdirectories(os.path.join(temp_dir, "core_comparison_results"))
assert len(sub_dirs) == 2
files = list_files(os.path.join(temp_dir, "core_comparison_results", sub_dirs[0]))
assert len(files) == 4
assert files[0].endswith("tar.gz")
|
def test_train_core_compare(run_in_default_project):
temp_dir = os.getcwd()
write_yaml_file(
{
"language": "en",
"pipeline": "supervised_embeddings",
"policies": [{"name": "KerasPolicy"}],
},
"config_1.yml",
)
write_yaml_file(
{
"language": "en",
"pipeline": "supervised_embeddings",
"policies": [{"name": "MemoizationPolicy"}],
},
"config_2.yml",
)
run_in_default_project(
"train",
"core",
"-c",
"config_1.yml",
"config_2.yml",
"--stories",
"data/stories.md",
"--out",
"core_comparison_results",
"--runs",
"2",
"--percentages",
"25",
"75",
"--augmentation",
"5",
)
assert os.path.exists(os.path.join(temp_dir, "core_comparison_results"))
run_directories = list_subdirectories(os.path.join(temp_dir, "core_comparison_results"))
assert len(sub_dirs) == 2
files = list_files(os.path.join(temp_dir, "core_comparison_results", sub_dirs[0]))
assert len(files) == 4
assert files[0].endswith("tar.gz")
|
10,910 |
def build_and_install_software(ecs, init_session_state, exit_on_failure=True):
"""
Build and install software for all provided parsed easyconfig files.
:param ecs: easyconfig files to install software with
:param init_session_state: initial session state, to use in test reports
:param exit_on_failure: whether or not to exit on installation failure
"""
# obtain a copy of the starting environment so each build can start afresh
# we shouldn't use the environment from init_session_state, since relevant env vars might have been set since
# e.g. via easyconfig.handle_allowed_system_deps
init_env = copy.deepcopy(os.environ)
start_progress_bar(PROGRESS_BAR_OVERALL, size=len(ecs))
res = []
for ec in ecs:
ec_res = {}
try:
(ec_res['success'], app_log, err) = build_and_install_one(ec, init_env)
ec_res['log_file'] = app_log
if not ec_res['success']:
ec_res['err'] = EasyBuildError(err)
except Exception as err:
# purposely catch all exceptions
ec_res['success'] = False
ec_res['err'] = err
ec_res['traceback'] = traceback.format_exc()
# keep track of success/total count
if ec_res['success']:
test_msg = "Successfully built %s" % ec['spec']
else:
test_msg = "Build of %s failed" % ec['spec']
if 'err' in ec_res:
test_msg += " (err: %s)" % ec_res['err']
# dump test report next to log file
test_report_txt = create_test_report(test_msg, [(ec, ec_res)], init_session_state)
if 'log_file' in ec_res and ec_res['log_file']:
test_report_fp = "%s_test_report.md" % '.'.join(ec_res['log_file'].split('.')[:-1])
parent_dir = os.path.dirname(test_report_fp)
# parent dir for test report may not be writable at this time, e.g. when --read-only-installdir is used
if os.stat(parent_dir).st_mode & 0o200:
write_file(test_report_fp, test_report_txt['full'])
else:
adjust_permissions(parent_dir, stat.S_IWUSR, add=True, recursive=False)
write_file(test_report_fp, test_report_txt['full'])
adjust_permissions(parent_dir, stat.S_IWUSR, add=False, recursive=False)
if not ec_res['success'] and exit_on_failure:
if 'traceback' in ec_res:
raise EasyBuildError(ec_res['traceback'])
else:
raise EasyBuildError(test_msg)
res.append((ec, ec_res))
update_progress_bar(PROGRESS_BAR_OVERALL, progress_size=1)
stop_progress_bar(PROGRESS_BAR_OVERALL, visible=True)
return res
|
def build_and_install_software(ecs, init_session_state, exit_on_failure=True):
"""
Build and install software for all provided parsed easyconfig files.
:param ecs: easyconfig files to install software with
:param init_session_state: initial session state, to use in test reports
:param exit_on_failure: whether or not to exit on installation failure
"""
# obtain a copy of the starting environment so each build can start afresh
# we shouldn't use the environment from init_session_state, since relevant env vars might have been set since
# e.g. via easyconfig.handle_allowed_system_deps
init_env = copy.deepcopy(os.environ)
start_progress_bar(PROGRESS_BAR_OVERALL, size=len(ecs))
res = []
for ec in ecs:
ec_res = {}
try:
(ec_res['success'], app_log, err) = build_and_install_one(ec, init_env)
ec_res['log_file'] = app_log
if not ec_res['success']:
ec_res['err'] = EasyBuildError(err)
except Exception as err:
# purposely catch all exceptions
ec_res['success'] = False
ec_res['err'] = err
ec_res['traceback'] = traceback.format_exc()
# keep track of success/total count
if ec_res['success']:
test_msg = "Successfully built %s" % ec['spec']
else:
test_msg = "Build of %s failed" % ec['spec']
if 'err' in ec_res:
test_msg += " (err: %s)" % ec_res['err']
# dump test report next to log file
test_report_txt = create_test_report(test_msg, [(ec, ec_res)], init_session_state)
if 'log_file' in ec_res and ec_res['log_file']:
test_report_fp = "%s_test_report.md" % '.'.join(ec_res['log_file'].split('.')[:-1])
parent_dir = os.path.dirname(test_report_fp)
# parent dir for test report may not be writable at this time, e.g. when --read-only-installdir is used
if os.stat(parent_dir).st_mode & 0o200:
write_file(test_report_fp, test_report_txt['full'])
else:
adjust_permissions(parent_dir, stat.S_IWUSR, add=True, recursive=False)
write_file(test_report_fp, test_report_txt['full'])
adjust_permissions(parent_dir, stat.S_IWUSR, add=False, recursive=False)
if not ec_res['success'] and exit_on_failure:
if 'traceback' in ec_res:
raise EasyBuildError(ec_res['traceback'])
else:
raise EasyBuildError(test_msg)
res.append((ec, ec_res))
update_progress_bar(PROGRESS_BAR_OVERALL)
stop_progress_bar(PROGRESS_BAR_OVERALL, visible=True)
return res
|
40,730 |
def test_no_polyaxon_client(no_site_packages):
with pytest.raises(
RuntimeError,
match=r"This contrib module requires polyaxon (polyaxon-client if you are using polyaxon v0.x) to be installed",
):
PolyaxonLogger()
|
def test_no_polyaxon_client(no_site_packages):
with pytest.raises(
RuntimeError,
match=r"This contrib module requires polyaxon",
):
PolyaxonLogger()
|
4,106 |
def p_typecast(s):
# s.sy == "<"
pos = s.position()
s.next()
base_type = p_c_base_type(s)
is_memslice = isinstance(base_type, Nodes.MemoryViewSliceTypeNode)
is_template = isinstance(base_type, Nodes.TemplatedTypeNode)
is_const_volatile = isinstance(base_type, Nodes.CConstOrVolatileTypeNode)
is_ctuple = isinstance(base_type, Nodes.CTupleBaseTypeNode)
if not is_memslice and not is_template and not is_const_volatile and not is_ctuple and base_type.name is None:
s.error("Unknown type")
declarator = p_c_declarator(s, empty = 1)
if s.sy == '?':
s.next()
typecheck = 1
else:
typecheck = 0
s.expect(">")
operand = p_factor(s)
if is_memslice:
return ExprNodes.CythonArrayNode(pos, base_type_node=base_type, operand=operand)
return ExprNodes.TypecastNode(pos,
base_type = base_type,
declarator = declarator,
operand = operand,
typecheck = typecheck)
|
def p_typecast(s):
# s.sy == "<"
pos = s.position()
s.next()
base_type = p_c_base_type(s)
is_memslice = isinstance(base_type, Nodes.MemoryViewSliceTypeNode)
is_template = isinstance(base_type, Nodes.TemplatedTypeNode)
is_const_volatile = isinstance(base_type, Nodes.CConstOrVolatileTypeNode)
is_ctuple = isinstance(base_type, Nodes.CTupleBaseTypeNode)
if not (is_memslice or is_template or is_const_volatile or is_ctuple) and base_type.name is None:
s.error("Unknown type")
declarator = p_c_declarator(s, empty = 1)
if s.sy == '?':
s.next()
typecheck = 1
else:
typecheck = 0
s.expect(">")
operand = p_factor(s)
if is_memslice:
return ExprNodes.CythonArrayNode(pos, base_type_node=base_type, operand=operand)
return ExprNodes.TypecastNode(pos,
base_type = base_type,
declarator = declarator,
operand = operand,
typecheck = typecheck)
|
20,526 |
def get_parser():
# initialize default param
param_default = Param()
# parser initialisation
parser = argparse.ArgumentParser(
description=(
"This function takes an anatomical image and its cord segmentation (binary file), and outputs the "
"cord segmentation labeled with vertebral level. The algorithm requires an initialization (first disc) and "
"then performs a disc search in the superior, then inferior direction, using template disc matching based "
"on mutual information score. The automatic method uses the module implemented in "
"'spinalcordtoolbox/vertebrae/detect_c2c3.py' to detect the C2-C3 disc.\n"
"Tips: To run the function with init txt file that includes flags -initz/-initcenter:\n"
" sct_label_vertebrae -i t2.nii.gz -s t2_seg-manual.nii.gz '$(< init_label_vertebrae.txt)'"
),
formatter_class=SmartFormatter,
add_help=None,
prog=os.path.basename(__file__).strip(".py")
)
mandatory = parser.add_argument_group("\nMANDATORY ARGUMENTS")
mandatory.add_argument(
'-i',
metavar=Metavar.file,
required=True,
help="Input image. Example: t2.nii.gz"
)
mandatory.add_argument(
'-s',
metavar=Metavar.file,
required=True,
help="Segmentation of the spinal cord. Example: t2_seg.nii.gz"
)
mandatory.add_argument(
'-c',
choices=['t1', 't2'],
required=True,
help="Type of image contrast. 't2': cord dark / CSF bright. 't1': cord bright / CSF dark"
)
optional = parser.add_argument_group("\nOPTIONAL ARGUMENTS")
optional.add_argument(
"-h",
"--help",
action="help",
help="Show this help message and exit."
)
optional.add_argument(
'-t',
metavar=Metavar.folder,
default=os.path.join(sct.__data_dir__, "PAM50"),
help="Path to template."
)
optional.add_argument(
'-initz',
metavar=Metavar.list,
type=list_type(',', int),
help=("R|Initialize using slice number and disc value. Example: 68,4 (slice 68 corresponds to disc C3/C4). "
"Example: 125,3\n"
"WARNING: Slice number should correspond to superior-inferior direction (e.g. Z in RPI orientation, but "
"Y in LIP orientation).")
)
optional.add_argument(
'-initcenter',
metavar=Metavar.int,
type=int,
help=("Initialize using disc value centered in the rostro-caudal direction. If the spine is curved, then "
"consider the disc that projects onto the cord at the center of the z-FOV.")
)
optional.add_argument(
'-initfile',
metavar=Metavar.file,
help="Initialize labeling by providing a text file which includes either -initz or -initcenter flag."
)
optional.add_argument(
'-initlabel',
metavar=Metavar.file,
help=("Initialize vertebral labeling by providing a nifti file that has a single disc label. An example of "
"such file is a single voxel with value '3', which would be located at the posterior tip of C2-C3 disc. "
"Such label file can be created using: sct_label_utils -i IMAGE_REF -create-viewer 3 ; or by using the "
"Python module 'detect_c2c3' implemented in 'spinalcordtoolbox/vertebrae/detect_c2c3.py'.")
)
optional.add_argument(
'-discfile',
metavar=Metavar.file,
help=("File with disc labels, which will be used to transform the input segmentation into a vertebral level "
"file. In that case, there will be no disc detection. The convention for disc labels is the following: "
"value=3 -> disc C2/C3, value=4 -> disc C3/C4, etc.")
)
optional.add_argument(
'-ofolder',
metavar=Metavar.file,
action=ActionCreateFolder,
default='',
help=("Output folder.")
)
optional.add_argument(
'-denoise',
choices=['0', '1'],
default='0',
help="Apply denoising filter to the data. Sometimes denoising is too aggressive, so use with care."
)
optional.add_argument(
'-laplacian',
choices=['0', '1'],
default='0',
help="Apply Laplacian filtering. More accuracy but could mistake disc depending on anatomy."
)
optional.add_argument(
'-scale-dist',
metavar=Metavar.float,
type=float,
default=1.,
help=("Scaling factor to adjust the average distance between two adjacent intervertebral discs. For example, "
"if you are dealing with images from pediatric population, the distance should be reduced, so you can "
"try a scaling factor of about 0.7.")
)
optional.add_argument(
'-param',
metavar=Metavar.list,
type=list_type(',', str),
help=(f"R|Advanced parameters. Assign value with \"=\"; Separate arguments with \",\"\n"
f" - shift_AP [mm]: AP shift of centerline for disc search. Default={param_default.shift_AP}.\n"
f" - size_AP [mm]: AP window size for disc search. Default={param_default.size_AP}.\n"
f" - size_RL [mm]: RL window size for disc search. Default={param_default.size_RL}.\n"
f" - size_IS [mm]: IS window size for disc search. Default={param_default.size_IS}.\n"
f" - gaussian_std [mm]: STD of the Gaussian function, centered at the most rostral point of the "
f"image, and used to weight C2-C3 disk location finding towards the rostral portion of the FOV. Values "
f"to set between 0.1 (strong weighting) and 999 (no weighting). "
f"Default={param_default.gaussian_std}.\n")
)
optional.add_argument(
'-r',
choices=['0', '1'],
default='1',
help="Remove temporary files."
)
optional.add_argument(
'-v',
choices=['0', '1', '2'],
default='1',
help="Verbose. 0: nothing. 1: basic. 2: extended."
)
optional.add_argument(
'-qc',
metavar=Metavar.folder,
action=ActionCreateFolder,
default=param_default.path_qc,
help="The path where the quality control generated content will be saved."
)
optional.add_argument(
'-qc-dataset',
metavar=Metavar.str,
help="If provided, this string will be mentioned in the QC report as the dataset the process was run on."
)
optional.add_argument(
'-qc-subject',
metavar=Metavar.str,
help="If provided, this string will be mentioned in the QC report as the subject the process was run on."
)
return parser
|
def get_parser():
# initialize default param
param_default = Param()
# parser initialisation
parser = argparse.ArgumentParser(
description=(
"This function takes an anatomical image and its cord segmentation (binary file), and outputs the "
"cord segmentation labeled with vertebral level. The algorithm requires an initialization (first disc) and "
"then performs a disc search in the superior, then inferior direction, using template disc matching based "
"on mutual information score. The automatic method uses the module implemented in "
"'spinalcordtoolbox/vertebrae/detect_c2c3.py' to detect the C2-C3 disc.\n"
"Tips: To run the function with init txt file that includes flags -initz/-initcenter:\n"
" sct_label_vertebrae -i t2.nii.gz -s t2_seg-manual.nii.gz '$(< init_label_vertebrae.txt)'"
),
formatter_class=SmartFormatter,
add_help=None,
prog=os.path.basename(__file__).strip(".py")
)
mandatory = parser.add_argument_group("\nMANDATORY ARGUMENTS")
mandatory.add_argument(
'-i',
metavar=Metavar.file,
required=True,
help="Input image. Example: t2.nii.gz"
)
mandatory.add_argument(
'-s',
metavar=Metavar.file,
required=True,
help="Segmentation of the spinal cord. Example: t2_seg.nii.gz"
)
mandatory.add_argument(
'-c',
choices=['t1', 't2'],
required=True,
help="Type of image contrast. 't2': cord dark / CSF bright. 't1': cord bright / CSF dark"
)
optional = parser.add_argument_group("\nOPTIONAL ARGUMENTS")
optional.add_argument(
"-h",
"--help",
action="help",
help="Show this help message and exit."
)
optional.add_argument(
'-t',
metavar=Metavar.folder,
default=os.path.join(sct.__data_dir__, "PAM50"),
help="Path to template."
)
optional.add_argument(
'-initz',
metavar=Metavar.list,
type=list_type(',', int),
help=("R|Initialize using slice number and disc value. Example: 68,4 (slice 68 corresponds to disc C3/C4). "
"Example: 125,3\n"
"WARNING: Slice number should correspond to superior-inferior direction (e.g. Z in RPI orientation, but "
"Y in LIP orientation).")
)
optional.add_argument(
'-initcenter',
metavar=Metavar.int,
type=int,
help=("Initialize using disc value centered in the rostro-caudal direction. If the spine is curved, then "
"consider the disc that projects onto the cord at the center of the z-FOV.")
)
optional.add_argument(
'-initfile',
metavar=Metavar.file,
help="Initialize labeling by providing a text file which includes either -initz or -initcenter flag."
)
optional.add_argument(
'-initlabel',
metavar=Metavar.file,
help=("Initialize vertebral labeling by providing a nifti file that has a single disc label. An example of "
"such file is a single voxel with value '3', which would be located at the posterior tip of C2-C3 disc. "
"Such label file can be created using: sct_label_utils -i IMAGE_REF -create-viewer 3 ; or by using the "
"Python module 'detect_c2c3' implemented in 'spinalcordtoolbox/vertebrae/detect_c2c3.py'.")
)
optional.add_argument(
'-discfile',
metavar=Metavar.file,
help=("File with disc labels, which will be used to transform the input segmentation into a vertebral level "
"file. In that case, there will be no disc detection. The convention for disc labels is the following: "
"value=3 -> disc C2/C3, value=4 -> disc C3/C4, etc.")
)
optional.add_argument(
'-ofolder',
metavar=Metavar.file,
action=ActionCreateFolder,
default='',
help=("Output folder.")
)
optional.add_argument(
'-denoise',
choices=['0', '1'],
default='0',
help="Apply denoising filter to the data. Sometimes denoising is too aggressive, so use with care."
)
optional.add_argument(
'-laplacian',
choices=['0', '1'],
default='0',
help="Apply Laplacian filtering. More accurate but could mistake disc depending on anatomy."
)
optional.add_argument(
'-scale-dist',
metavar=Metavar.float,
type=float,
default=1.,
help=("Scaling factor to adjust the average distance between two adjacent intervertebral discs. For example, "
"if you are dealing with images from pediatric population, the distance should be reduced, so you can "
"try a scaling factor of about 0.7.")
)
optional.add_argument(
'-param',
metavar=Metavar.list,
type=list_type(',', str),
help=(f"R|Advanced parameters. Assign value with \"=\"; Separate arguments with \",\"\n"
f" - shift_AP [mm]: AP shift of centerline for disc search. Default={param_default.shift_AP}.\n"
f" - size_AP [mm]: AP window size for disc search. Default={param_default.size_AP}.\n"
f" - size_RL [mm]: RL window size for disc search. Default={param_default.size_RL}.\n"
f" - size_IS [mm]: IS window size for disc search. Default={param_default.size_IS}.\n"
f" - gaussian_std [mm]: STD of the Gaussian function, centered at the most rostral point of the "
f"image, and used to weight C2-C3 disk location finding towards the rostral portion of the FOV. Values "
f"to set between 0.1 (strong weighting) and 999 (no weighting). "
f"Default={param_default.gaussian_std}.\n")
)
optional.add_argument(
'-r',
choices=['0', '1'],
default='1',
help="Remove temporary files."
)
optional.add_argument(
'-v',
choices=['0', '1', '2'],
default='1',
help="Verbose. 0: nothing. 1: basic. 2: extended."
)
optional.add_argument(
'-qc',
metavar=Metavar.folder,
action=ActionCreateFolder,
default=param_default.path_qc,
help="The path where the quality control generated content will be saved."
)
optional.add_argument(
'-qc-dataset',
metavar=Metavar.str,
help="If provided, this string will be mentioned in the QC report as the dataset the process was run on."
)
optional.add_argument(
'-qc-subject',
metavar=Metavar.str,
help="If provided, this string will be mentioned in the QC report as the subject the process was run on."
)
return parser
|
55,087 |
def generate_basis_set(l, alpha, coeff, r):
r"""Generate a set of basis function objects.
Args:
l list((tuple[int])): angular momentum numbers of the basis function.
alpha list((array(float))): exponents of the Gaussian functions forming basis functions
coeff list((array(float))): coefficients of the contracted Gaussian functions
r list((array(float))): positions of the Gaussian functions forming the basis functions
Returns:
list(BasisFunction): list containing a set of basis function objects.
**Example**
>>> l = [(0, 0, 0), (0, 0, 0)]
>>> exponents = [[3.42525091, 0.62391373, 0.1688554], [3.42525091, 0.62391373, 0.1688554]]
>>> coefficients = [[0.15432897, 0.53532814, 0.44463454], [0.15432897, 0.53532814, 0.44463454]]
>>> centers = [[0.0, 0.0, -0.694349], [0.0, 0.0, 0.694349]]
>>> basis_set = generate_basis_set(l, exponents, coefficients, centers)
>>> print(basis_set)
[<molecule.BasisFunction object at 0x7f7566db2910>, <molecule.BasisFunction object at 0x7f7566db2a30>]
"""
return [BasisFunction(l[i], alpha[i], coeff[i], r[i]) for i in range(len(l))]
|
def generate_basis_set(l, alpha, coeff, r):
r"""Generate a set of basis function objects.
Args:
l list((tuple[int])): angular momentum numbers of the basis function.
alpha list((array(float))): exponents of the Gaussian functions forming basis functions
coeff list((array(float))): coefficients of the contracted Gaussian functions
r list((array(float))): positions of the Gaussian functions forming the basis functions
Returns:
list(BasisFunction): list containing a set of basis function objects.
**Example**
>>> l = [(0, 0, 0), (0, 0, 0)]
>>> exponents = [[3.42525091, 0.62391373, 0.1688554], [3.42525091, 0.62391373, 0.1688554]]
>>> coefficients = [[0.15432897, 0.53532814, 0.44463454], [0.15432897, 0.53532814, 0.44463454]]
>>> centers = [[0.0, 0.0, -0.694349], [0.0, 0.0, 0.694349]]
>>> basis_set = generate_basis_set(l, exponents, coefficients, centers)
>>> print(basis_set)
[<molecule.BasisFunction object at 0x7f7566db2910>, <molecule.BasisFunction object at 0x7f7566db2a30>]
"""
return [BasisFunction(l[i], alpha[i], coeff[i], r[i]) for i in range(len(l))]
|
31,193 |
def resolve_incidents_command(client, args):
incident_ids = args.get('incident_ids')
result = client.resolve_incidents(incident_ids)
if not result.get('success'):
raise DemistoException(result['message'])
msg = result.get('message')
markdown = "### " + msg
return CommandResults(
readable_output=markdown,
outputs_prefix='LogPoint.Incidents.resolve',
outputs_key_field='',
outputs=msg
)
|
def resolve_incidents_command(client, args):
incident_ids = argToList(args.get('incident_ids'))
result = client.resolve_incidents(incident_ids)
if not result.get('success'):
raise DemistoException(result['message'])
msg = result.get('message')
markdown = "### " + msg
return CommandResults(
readable_output=markdown,
outputs_prefix='LogPoint.Incidents.resolve',
outputs_key_field='',
outputs=msg
)
|
31,156 |
def transform_object_list(object_type: str, object_list=None):
"""
Transform list objects, i.e. - replace the scim uri to a compressed object name.
This is done as PAN XSOAR is unable to process json keys with symbols like - '.' or ':'.
:type object_type: ``str``
:param object_type: Type of IdentityIQ object.
:type object_list: ``JSON``
:param object_list: List of Identity resources objects.
:return: Transformed list object.
"""
if object_list is None:
return None
transformed_list = []
for object in object_list:
transformed_list.append(transform_object(object_type, object))
return transformed_list
|
def transform_object_list(object_type: str, object_list=None):
"""
Transform list objects, i.e. - replace the scim uri to a compressed object name.
This is done as PAN XSOAR is unable to process json keys with symbols like - '.' or ':'.
:type object_type: ``str``
:param object_type: Type of IdentityIQ object.
:type object_list: ``list``
:param object_list: List of Identity resources objects.
:return: Transformed list object.
"""
if object_list is None:
return None
transformed_list = []
for object in object_list:
transformed_list.append(transform_object(object_type, object))
return transformed_list
|
58,528 |
def _check_tensor_lists_input(tensor_lists):
"""Check if the input is a list of lists of supported tensor types."""
if not isinstance(tensor_lists, list):
raise RuntimeError("The input must be a list of lists of tensors. "
"Got '{}'.".format(type(tensor_lists)))
if not tensor_lists:
raise RuntimeError("Got an empty list of lists of tensors.")
for t in tensor_lists:
_check_tensor_list_input(t)
|
def _check_tensor_lists_input(tensor_lists):
"""Check if the input is a list of lists of supported tensor types."""
if not isinstance(tensor_lists, list):
raise RuntimeError("The input must be a list of lists of tensors. "
"Got '{}'.".format(type(tensor_lists)))
if not tensor_lists:
raise RuntimeError(f"Did not receive tensors. Got: {tensor_lists}")
for t in tensor_lists:
_check_tensor_list_input(t)
|
21,941 |
def singular_values_plot(syslist, omega=None,
plot=True, omega_limits=None, omega_num=None,
*args, **kwargs):
"""Singular value plot for a system
Plots a Singular Value plot for the system over a (optional) frequency range.
Parameters
----------
syslist : linsys
List of linear systems (single system is OK)
omega : array_like
List of frequencies in rad/sec to be used for frequency response
plot : bool
If True (default), plot magnitude and phase
omega_limits : array_like of two values
Limits of the to generate frequency vector.
If Hz=True the limits are in Hz otherwise in rad/s.
omega_num : int
Number of samples to plot. Defaults to
config.defaults['freqplot.number_of_samples'].
Returns
-------
sigma : ndarray (or list of ndarray if len(syslist) > 1))
singular values
omega : ndarray (or list of ndarray if len(syslist) > 1))
frequency in rad/sec
Other Parameters
----------------
grid : bool
If True, plot grid lines on gain and phase plots. Default is set by
`config.defaults['bode.grid']`.
Examples
--------
>>> den = [75, 1]
>>> sys = ct.tf([[[87.8], [-86.4]], [[108.2], [-109.6]]], [[den, den], [den, den]])
>>> omega = np.logspace(-4, 1, 1000)
>>> sigma, omega = singular_values_plot(sys)
"""
# Make a copy of the kwargs dictionary since we will modify it
kwargs = dict(kwargs)
# Check to see if legacy 'Plot' keyword was used
if 'Plot' in kwargs:
import warnings
warnings.warn("'Plot' keyword is deprecated in bode_plot; use 'plot'",
FutureWarning)
# Map 'Plot' keyword to 'plot' keyword
plot = kwargs.pop('Plot')
# Get values for params (and pop from list to allow keyword use in plot)
dB = config._get_param('bode', 'dB', kwargs, _bode_defaults, pop=True)
Hz = config._get_param('bode', 'Hz', kwargs, _bode_defaults, pop=True)
grid = config._get_param('bode', 'grid', kwargs, _bode_defaults, pop=True)
plot = config._get_param('bode', 'grid', plot, True)
# If argument was a singleton, turn it into a tuple
if not hasattr(syslist, '__iter__'):
syslist = (syslist,)
# Decide whether to go above Nyquist frequency
omega_range_given = True if omega is not None else False
if omega is None:
omega_num = config._get_param(
'freqplot', 'number_of_samples', omega_num)
if omega_limits is None:
# Select a default range if none is provided
omega = _default_frequency_range(syslist, number_of_samples=omega_num)
else:
omega_range_given = True
omega_limits = np.asarray(omega_limits)
if len(omega_limits) != 2:
raise ValueError("len(omega_limits) must be 2")
if Hz:
omega_limits *= 2. * math.pi
omega = np.logspace(np.log10(omega_limits[0]),
np.log10(omega_limits[1]), num=omega_num,
endpoint=True)
if plot:
fig = plt.gcf()
ax_sigma = None
# Get the current axes if they already exist
for ax in fig.axes:
if ax.get_label() == 'control-sigma':
ax_sigma = ax
# If no axes present, create them from scratch
if ax_sigma is None:
plt.clf()
ax_sigma = plt.subplot(111, label='control-sigma')
color_cycle = plt.rcParams['axes.prop_cycle'].by_key()['color']
sigmas, omegas, nyquistfrqs = [], [], []
for idx_sys, sys in enumerate(syslist):
omega_sys = np.asarray(omega)
if sys.isdtime(strict=True):
nyquistfrq = math.pi / sys.dt
if not omega_range_given:
# limit up to and including nyquist frequency
omega_sys = np.hstack((
omega_sys[omega_sys < nyquistfrq], nyquistfrq))
else:
nyquistfrq = None
mag, phase, omega = sys.frequency_response(omega)
fresp = mag * np.exp(1j * phase)
#fresp = evalfr(sys, 1j * omega_sys)
fresp = fresp.transpose((2, 0, 1))
sigma = np.linalg.svd(fresp, compute_uv=False)
sigmas.append(sigma)
omegas.append(omega_sys)
nyquistfrqs.append(nyquistfrq)
if plot:
color = color_cycle[idx_sys % len(color_cycle)]
nyquistfrq_plot = None
if Hz:
omega_plot = omega_sys / (2. * math.pi)
if nyquistfrq:
nyquistfrq_plot = nyquistfrq / (2. * math.pi)
else:
omega_plot = omega_sys
if nyquistfrq:
nyquistfrq_plot = nyquistfrq
sigma_plot = sigma
if dB:
ax_sigma.semilogx(omega_plot, 20 * np.log10(sigma_plot), color=color, *args, **kwargs)
else:
ax_sigma.loglog(omega_plot, sigma_plot, color=color, *args, **kwargs)
if nyquistfrq_plot is not None:
ax_sigma.axvline(x=nyquistfrq_plot, color=color)
# Add a grid to the plot + labeling
ax_sigma.grid(grid, which='both')
ax_sigma.set_ylabel("Magnitude (dB)" if dB else "Magnitude")
ax_sigma.set_xlabel("Frequency (Hz)" if Hz else "Frequency (rad/sec)")
if len(syslist) == 1:
return sigmas[0], omegas[0]
else:
return sigmas, omegas
#
|
def singular_values_plot(syslist, omega=None,
plot=True, omega_limits=None, omega_num=None,
*args, **kwargs):
"""Singular value plot for a system
Plots a Singular Value plot for the system over a (optional) frequency range.
Parameters
----------
syslist : linsys
List of linear systems (single system is OK)
omega : array_like
List of frequencies in rad/sec to be used for frequency response
plot : bool
If True (default), plot magnitude and phase
omega_limits : array_like of two values
Limits of the frequency vector to generate.
If Hz=True the limits are in Hz otherwise in rad/s.
omega_num : int
Number of samples to plot. Defaults to
config.defaults['freqplot.number_of_samples'].
Returns
-------
sigma : ndarray (or list of ndarray if len(syslist) > 1))
singular values
omega : ndarray (or list of ndarray if len(syslist) > 1))
frequency in rad/sec
Other Parameters
----------------
grid : bool
If True, plot grid lines on gain and phase plots. Default is set by
`config.defaults['bode.grid']`.
Examples
--------
>>> den = [75, 1]
>>> sys = ct.tf([[[87.8], [-86.4]], [[108.2], [-109.6]]], [[den, den], [den, den]])
>>> omega = np.logspace(-4, 1, 1000)
>>> sigma, omega = singular_values_plot(sys)
"""
# Make a copy of the kwargs dictionary since we will modify it
kwargs = dict(kwargs)
# Check to see if legacy 'Plot' keyword was used
if 'Plot' in kwargs:
import warnings
warnings.warn("'Plot' keyword is deprecated in bode_plot; use 'plot'",
FutureWarning)
# Map 'Plot' keyword to 'plot' keyword
plot = kwargs.pop('Plot')
# Get values for params (and pop from list to allow keyword use in plot)
dB = config._get_param('bode', 'dB', kwargs, _bode_defaults, pop=True)
Hz = config._get_param('bode', 'Hz', kwargs, _bode_defaults, pop=True)
grid = config._get_param('bode', 'grid', kwargs, _bode_defaults, pop=True)
plot = config._get_param('bode', 'grid', plot, True)
# If argument was a singleton, turn it into a tuple
if not hasattr(syslist, '__iter__'):
syslist = (syslist,)
# Decide whether to go above Nyquist frequency
omega_range_given = True if omega is not None else False
if omega is None:
omega_num = config._get_param(
'freqplot', 'number_of_samples', omega_num)
if omega_limits is None:
# Select a default range if none is provided
omega = _default_frequency_range(syslist, number_of_samples=omega_num)
else:
omega_range_given = True
omega_limits = np.asarray(omega_limits)
if len(omega_limits) != 2:
raise ValueError("len(omega_limits) must be 2")
if Hz:
omega_limits *= 2. * math.pi
omega = np.logspace(np.log10(omega_limits[0]),
np.log10(omega_limits[1]), num=omega_num,
endpoint=True)
if plot:
fig = plt.gcf()
ax_sigma = None
# Get the current axes if they already exist
for ax in fig.axes:
if ax.get_label() == 'control-sigma':
ax_sigma = ax
# If no axes present, create them from scratch
if ax_sigma is None:
plt.clf()
ax_sigma = plt.subplot(111, label='control-sigma')
color_cycle = plt.rcParams['axes.prop_cycle'].by_key()['color']
sigmas, omegas, nyquistfrqs = [], [], []
for idx_sys, sys in enumerate(syslist):
omega_sys = np.asarray(omega)
if sys.isdtime(strict=True):
nyquistfrq = math.pi / sys.dt
if not omega_range_given:
# limit up to and including nyquist frequency
omega_sys = np.hstack((
omega_sys[omega_sys < nyquistfrq], nyquistfrq))
else:
nyquistfrq = None
mag, phase, omega = sys.frequency_response(omega)
fresp = mag * np.exp(1j * phase)
#fresp = evalfr(sys, 1j * omega_sys)
fresp = fresp.transpose((2, 0, 1))
sigma = np.linalg.svd(fresp, compute_uv=False)
sigmas.append(sigma)
omegas.append(omega_sys)
nyquistfrqs.append(nyquistfrq)
if plot:
color = color_cycle[idx_sys % len(color_cycle)]
nyquistfrq_plot = None
if Hz:
omega_plot = omega_sys / (2. * math.pi)
if nyquistfrq:
nyquistfrq_plot = nyquistfrq / (2. * math.pi)
else:
omega_plot = omega_sys
if nyquistfrq:
nyquistfrq_plot = nyquistfrq
sigma_plot = sigma
if dB:
ax_sigma.semilogx(omega_plot, 20 * np.log10(sigma_plot), color=color, *args, **kwargs)
else:
ax_sigma.loglog(omega_plot, sigma_plot, color=color, *args, **kwargs)
if nyquistfrq_plot is not None:
ax_sigma.axvline(x=nyquistfrq_plot, color=color)
# Add a grid to the plot + labeling
ax_sigma.grid(grid, which='both')
ax_sigma.set_ylabel("Magnitude (dB)" if dB else "Magnitude")
ax_sigma.set_xlabel("Frequency (Hz)" if Hz else "Frequency (rad/sec)")
if len(syslist) == 1:
return sigmas[0], omegas[0]
else:
return sigmas, omegas
#
|
14,136 |
def plot_series(
s, cmap=None, color=None, ax=None, figsize=None, aspect="auto", **style_kwds
):
"""
Plot a GeoSeries.
Generate a plot of a GeoSeries geometry with matplotlib.
Parameters
----------
s : Series
The GeoSeries to be plotted. Currently Polygon,
MultiPolygon, LineString, MultiLineString, Point and MultiPoint
geometries can be plotted.
cmap : str (default None)
The name of a colormap recognized by matplotlib. Any
colormap will work, but categorical colormaps are
generally recommended. Examples of useful discrete
colormaps include:
tab10, tab20, Accent, Dark2, Paired, Pastel1, Set1, Set2
color : str, np.array, pd.Series, List (default None)
If specified, all objects will be colored uniformly.
ax : matplotlib.pyplot.Artist (default None)
axes on which to draw the plot
figsize : pair of floats (default None)
Size of the resulting matplotlib.figure.Figure. If the argument
ax is given explicitly, figsize is ignored.
aspect : 'auto', 'equal', None or float (default 'auto')
Set aspect of axis. If 'auto', the default aspect for map plots is 'equal'; if
however data are not projected (coordinates are long/lat), the aspect is by
default set to 1/cos(s_y * pi/180) with s_y the y coordinate of the middle of
the GeoSeries (the mean of the y range of bounding box) so that a long/lat
square appears square in the middle of the plot. This implies an
Equirectangular projection. If None, the aspect of `ax` won't be changed. It can
also be set manually (float) as the ratio of y-unit to x-unit.
**style_kwds : dict
Color options to be passed on to the actual plot function, such
as ``edgecolor``, ``facecolor``, ``linewidth``, ``markersize``,
``alpha``.
Returns
-------
ax : matplotlib axes instance
"""
if "colormap" in style_kwds:
warnings.warn(
"'colormap' is deprecated, please use 'cmap' instead "
"(for consistency with matplotlib)",
FutureWarning,
)
cmap = style_kwds.pop("colormap")
if "axes" in style_kwds:
warnings.warn(
"'axes' is deprecated, please use 'ax' instead "
"(for consistency with pandas)",
FutureWarning,
)
ax = style_kwds.pop("axes")
try:
import matplotlib.pyplot as plt
except ImportError:
raise ImportError(
"The matplotlib package is required for plotting in geopandas. "
"You can install it using 'conda install -c conda-forge matplotlib' or "
"'pip install matplotlib'."
)
if ax is None:
fig, ax = plt.subplots(figsize=figsize)
if aspect == "auto":
if s.crs and s.crs.is_geographic:
bounds = s.total_bounds
y_coord = np.mean([bounds[1], bounds[3]])
ax.set_aspect(1 / np.cos(y_coord * np.pi / 180))
# formula ported from R package sp
# https://github.com/edzer/sp/blob/master/R/mapasp.R
else:
ax.set_aspect("equal")
elif aspect is not None:
ax.set_aspect(aspect)
if s.empty:
warnings.warn(
"The GeoSeries you are attempting to plot is "
"empty. Nothing has been displayed.",
UserWarning,
)
return ax
if s.is_empty.all():
warnings.warn(
"The GeoSeries you are attempting to plot is "
"composed of empty geometries. Nothing has been displayed.",
UserWarning,
)
return ax
# have colors been given for all geometries?
color_given = isinstance(color, (list, pd.core.series.Series, np.ndarray)) and len(
color
) == len(s)
# if cmap is specified, create range of colors based on cmap
values = None
if cmap is not None:
values = np.arange(len(s))
if hasattr(cmap, "N"):
values = values % cmap.N
style_kwds["vmin"] = style_kwds.get("vmin", values.min())
style_kwds["vmax"] = style_kwds.get("vmax", values.max())
# decompose GeometryCollections
geoms, multiindex = _flatten_multi_geoms(s.geometry, prefix="Geom")
values = np.take(values, multiindex, axis=0) if cmap else None
# ensure indexes are consistent
if color_given and isinstance(color, pd.Series):
color = color.reindex(s.index)
expl_color = np.take(color, multiindex, axis=0) if color_given else color
expl_series = geopandas.GeoSeries(geoms)
geom_types = expl_series.type
poly_idx = np.asarray((geom_types == "Polygon") | (geom_types == "MultiPolygon"))
line_idx = np.asarray(
(geom_types == "LineString")
| (geom_types == "MultiLineString")
| (geom_types == "LinearRing")
)
point_idx = np.asarray((geom_types == "Point") | (geom_types == "MultiPoint"))
# plot all Polygons and all MultiPolygon components in the same collection
polys = expl_series[poly_idx]
if not polys.empty:
# color overrides both face and edgecolor. As we want people to be
# able to use edgecolor as well, pass color to facecolor
facecolor = style_kwds.pop("facecolor", None)
color_ = expl_color[poly_idx] if color_given else color
if color is not None:
facecolor = color_
values_ = values[poly_idx] if cmap else None
_plot_polygon_collection(
ax, polys, values_, facecolor=facecolor, cmap=cmap, **style_kwds
)
# plot all LineStrings and MultiLineString components in same collection
lines = expl_series[line_idx]
if not lines.empty:
values_ = values[line_idx] if cmap else None
color_ = expl_color[line_idx] if color_given else color
_plot_linestring_collection(
ax, lines, values_, color=color_, cmap=cmap, **style_kwds
)
# plot all Points in the same collection
points = expl_series[point_idx]
if not points.empty:
values_ = values[point_idx] if cmap else None
color_ = expl_color[point_idx] if color_given else color
_plot_point_collection(
ax, points, values_, color=color_, cmap=cmap, **style_kwds
)
plt.draw()
return ax
|
def plot_series(
s, cmap=None, color=None, ax=None, figsize=None, aspect="auto", **style_kwds
):
"""
Plot a GeoSeries.
Generate a plot of a GeoSeries geometry with matplotlib.
Parameters
----------
s : Series
The GeoSeries to be plotted. Currently Polygon,
MultiPolygon, LineString, MultiLineString, Point and MultiPoint
geometries can be plotted.
cmap : str (default None)
The name of a colormap recognized by matplotlib. Any
colormap will work, but categorical colormaps are
generally recommended. Examples of useful discrete
colormaps include:
tab10, tab20, Accent, Dark2, Paired, Pastel1, Set1, Set2
color : str, np.array, pd.Series, List (default None)
If specified, all objects will be colored uniformly.
ax : matplotlib.pyplot.Artist (default None)
axes on which to draw the plot
figsize : pair of floats (default None)
Size of the resulting matplotlib.figure.Figure. If the argument
ax is given explicitly, figsize is ignored.
aspect : 'auto', 'equal', None or float (default 'auto')
Set aspect of axis. If 'auto', the default aspect for map plots is 'equal'; if
however data are not projected (coordinates are long/lat), the aspect is by
default set to 1/cos(s_y * pi/180) with s_y the y coordinate of the middle of
the GeoSeries (the mean of the y range of bounding box) so that a long/lat
square appears square in the middle of the plot. This implies an
Equirectangular projection. If None, the aspect of `ax` won't be changed. It can
also be set manually (float) as the ratio of y-unit to x-unit.
**style_kwds : dict
Color options to be passed on to the actual plot function, such
as ``edgecolor``, ``facecolor``, ``linewidth``, ``markersize``,
``alpha``.
Returns
-------
ax : matplotlib axes instance
"""
if "colormap" in style_kwds:
warnings.warn(
"'colormap' is deprecated, please use 'cmap' instead "
"(for consistency with matplotlib)",
FutureWarning,
)
cmap = style_kwds.pop("colormap")
if "axes" in style_kwds:
warnings.warn(
"'axes' is deprecated, please use 'ax' instead "
"(for consistency with pandas)",
FutureWarning,
)
ax = style_kwds.pop("axes")
try:
import matplotlib.pyplot as plt
except ImportError:
raise ImportError(
"The matplotlib package is required for plotting in geopandas. "
"You can install it using 'conda install -c conda-forge matplotlib' or "
"'pip install matplotlib'."
)
if ax is None:
fig, ax = plt.subplots(figsize=figsize)
if aspect == "auto":
if s.crs and s.crs.is_geographic:
bounds = s.total_bounds
y_coord = np.mean([bounds[1], bounds[3]])
ax.set_aspect(1 / np.cos(y_coord * np.pi / 180))
# formula ported from R package sp
# https://github.com/edzer/sp/blob/master/R/mapasp.R
else:
ax.set_aspect("equal")
elif aspect is not None:
ax.set_aspect(aspect)
if s.empty:
warnings.warn(
"The GeoSeries you are attempting to plot is "
"empty. Nothing has been displayed.",
UserWarning,
)
return ax
if s.is_empty.all():
warnings.warn(
"The GeoSeries you are attempting to plot is "
"composed of empty geometries. Nothing has been displayed.",
UserWarning,
)
return ax
# have colors been given for all geometries?
color_given = pd.api.types.is_list_like(color) and len(
color
) == len(s)
# if cmap is specified, create range of colors based on cmap
values = None
if cmap is not None:
values = np.arange(len(s))
if hasattr(cmap, "N"):
values = values % cmap.N
style_kwds["vmin"] = style_kwds.get("vmin", values.min())
style_kwds["vmax"] = style_kwds.get("vmax", values.max())
# decompose GeometryCollections
geoms, multiindex = _flatten_multi_geoms(s.geometry, prefix="Geom")
values = np.take(values, multiindex, axis=0) if cmap else None
# ensure indexes are consistent
if color_given and isinstance(color, pd.Series):
color = color.reindex(s.index)
expl_color = np.take(color, multiindex, axis=0) if color_given else color
expl_series = geopandas.GeoSeries(geoms)
geom_types = expl_series.type
poly_idx = np.asarray((geom_types == "Polygon") | (geom_types == "MultiPolygon"))
line_idx = np.asarray(
(geom_types == "LineString")
| (geom_types == "MultiLineString")
| (geom_types == "LinearRing")
)
point_idx = np.asarray((geom_types == "Point") | (geom_types == "MultiPoint"))
# plot all Polygons and all MultiPolygon components in the same collection
polys = expl_series[poly_idx]
if not polys.empty:
# color overrides both face and edgecolor. As we want people to be
# able to use edgecolor as well, pass color to facecolor
facecolor = style_kwds.pop("facecolor", None)
color_ = expl_color[poly_idx] if color_given else color
if color is not None:
facecolor = color_
values_ = values[poly_idx] if cmap else None
_plot_polygon_collection(
ax, polys, values_, facecolor=facecolor, cmap=cmap, **style_kwds
)
# plot all LineStrings and MultiLineString components in same collection
lines = expl_series[line_idx]
if not lines.empty:
values_ = values[line_idx] if cmap else None
color_ = expl_color[line_idx] if color_given else color
_plot_linestring_collection(
ax, lines, values_, color=color_, cmap=cmap, **style_kwds
)
# plot all Points in the same collection
points = expl_series[point_idx]
if not points.empty:
values_ = values[point_idx] if cmap else None
color_ = expl_color[point_idx] if color_given else color
_plot_point_collection(
ax, points, values_, color=color_, cmap=cmap, **style_kwds
)
plt.draw()
return ax
|
12,277 |
def print_qasm(qc):
'''
Print QASM output of circuit object.
Parameters
----------
qc: QubitCircuit
circuit object to produce QASM output for.
'''
qasm_out = QasmOutput("2.0")
lines = qasm_out._qasm_output(qc)
for line in lines:
print(line)
|
def print_qasm(qc):
'''
Print QASM output of circuit object.
Parameters
----------
qc: :class:`.QubitCircuit`
circuit object to produce QASM output for.
'''
qasm_out = QasmOutput("2.0")
lines = qasm_out._qasm_output(qc)
for line in lines:
print(line)
|
17,487 |
def test_weighted_quantile_with_2d_q():
da = DataArray([1, 1.9, 2.2, 3, 3.7, 4.1, 5])
q = np.array([0.2, 0.4, 0.6, 0.8]).reshape(2, 2)
weights = DataArray(np.ones(len(da)))
with pytest.raises(ValueError):
da.weighted(weights).quantile(q)
|
def test_weighted_quantile_with_2d_q():
da = DataArray([1, 1.9, 2.2, 3, 3.7, 4.1, 5])
q = np.array([0.2, 0.4, 0.6, 0.8]).reshape(2, 2)
weights = xr.ones_like(da)
with pytest.raises(ValueError):
da.weighted(weights).quantile(q)
|
26,211 |
def _dp_poll(test, device_number=0, port_number=None, timeout=-1, exp_pkt=None):
update_payload = getattr(test, "update_payload", None)
if update_payload and callable(update_payload):
exp_pkt = test.update_payload(exp_pkt)
result = test.dataplane.poll(
device_number=device_number, port_number=port_number,
timeout=timeout, exp_pkt=exp_pkt, filters=[]
)
if isinstance(result, test.dataplane.PollSuccess):
test.at_receive(result.packet, device_number=result.device, port_number=result.port)
return result
setattr(ptf.testutils, "dp_poll", _dp_poll)
|
def _dp_poll(test, device_number=0, port_number=None, timeout=-1, exp_pkt=None):
update_payload = getattr(test, "update_payload", None)
if update_payload and callable(update_payload):
exp_pkt = test.update_payload(exp_pkt)
result = test.dataplane.poll(
device_number=device_number, port_number=port_number,
timeout=timeout, exp_pkt=exp_pkt, filters=FILTERS
)
if isinstance(result, test.dataplane.PollSuccess):
test.at_receive(result.packet, device_number=result.device, port_number=result.port)
return result
setattr(ptf.testutils, "dp_poll", _dp_poll)
|
38,321 |
def nbody_answer(ds, ds_str_repr, ds_nparticles, fields):
if not can_run_ds(ds):
return
assert_equal(str(ds), ds_str_repr)
dso = [None, ("sphere", ("c", (0.1, 'unitary')))]
dd = ds.all_data()
assert_equal(dd["particle_position"].shape, (ds_nparticles, 3))
tot = sum(dd[ptype, "particle_position"].shape[0]
for ptype in ds.particle_types_raw)
assert_equal(tot, ds_nparticles)
for dobj_name in dso:
for field, weight_field in fields.items():
if field[0] in ds.particle_types:
particle_type = True
else:
particle_type = False
for axis in [0, 1, 2]:
if not particle_type:
yield PixelizedParticleProjectionValuesTest(
ds, axis, field, weight_field,
dobj_name)
yield FieldValuesTest(ds, field, dobj_name,
particle_type=particle_type)
|
def nbody_answer(ds, ds_str_repr, ds_nparticles, fields):
if not can_run_ds(ds):
return
assert_equal(str(ds), ds_str_repr)
dso = [None, ("sphere", ("c", (0.1, 'unitary')))]
dd = ds.all_data()
assert_equal(dd["particle_position"].shape, (ds_nparticles, 3))
tot = sum(dd[ptype, "particle_position"].shape[0]
for ptype in ds.particle_types_raw)
assert_equal(tot, ds_nparticles)
for dobj_name in dso:
for field, weight_field in fields.items():
particle_type = field[0] in ds.particle_types
for axis in [0, 1, 2]:
if not particle_type:
yield PixelizedParticleProjectionValuesTest(
ds, axis, field, weight_field,
dobj_name)
yield FieldValuesTest(ds, field, dobj_name,
particle_type=particle_type)
|
35 |
def get_cached_homepage():
five_minutes = 5 * dateutil.MINUTE_SECS
lang = web.ctx.get("lang") or "en"
return cache.memcache_memoize(
get_homepage, "home.homepage." + lang, timeout=five_minutes)()
|
def get_cached_homepage():
five_minutes = 5 * dateutil.MINUTE_SECS
lang = web.ctx.get("lang", "en")
return cache.memcache_memoize(
get_homepage, "home.homepage." + lang, timeout=five_minutes)()
|
40,633 |
def simple_download(url: str, destination: [Path, str]):
CHUNK = 32 * 1024
destination = Path(destination)
destination.parent.mkdir(parents=True, exist_ok=True)
temporary = destination.with_suffix(".part")
headers = {'dp-token': get_download_token()}
r = requests.get(url, stream=True, headers=headers)
total_length = int(r.headers.get('content-length', 0))
log.info('Downloading from {} to {}'.format(url, destination))
with temporary.open('ab') as f:
downloaded = f.tell()
if downloaded != 0:
log.warn(f'Found a partial download {temporary}')
with tqdm(initial=downloaded, total=total_length, unit='B', unit_scale=True) as pbar:
while downloaded < total_length:
if downloaded != 0:
log.warn(f'Download stopped abruptly, trying to resume from {downloaded} to reach {total_length}')
headers['Range'] = f'bytes={downloaded}-'
r = requests.get(url, headers=headers, stream=True)
if total_length - downloaded != int(r.headers['content-length']):
raise RuntimeError('It looks like the server does not support resuming downloads')
for chunk in r.iter_content(chunk_size=CHUNK):
if chunk: # filter out keep-alive new chunks
downloaded += len(chunk)
pbar.update(len(chunk))
f.write(chunk)
temporary.rename(destination)
|
def simple_download(url: str, destination: [Path, str]):
CHUNK = 32 * 1024
destination = Path(destination)
destination.parent.mkdir(parents=True, exist_ok=True)
temporary = destination.with_suffix(destination.suffix + '.part')
headers = {'dp-token': get_download_token()}
r = requests.get(url, stream=True, headers=headers)
total_length = int(r.headers.get('content-length', 0))
log.info('Downloading from {} to {}'.format(url, destination))
with temporary.open('ab') as f:
downloaded = f.tell()
if downloaded != 0:
log.warn(f'Found a partial download {temporary}')
with tqdm(initial=downloaded, total=total_length, unit='B', unit_scale=True) as pbar:
while downloaded < total_length:
if downloaded != 0:
log.warn(f'Download stopped abruptly, trying to resume from {downloaded} to reach {total_length}')
headers['Range'] = f'bytes={downloaded}-'
r = requests.get(url, headers=headers, stream=True)
if total_length - downloaded != int(r.headers['content-length']):
raise RuntimeError('It looks like the server does not support resuming downloads')
for chunk in r.iter_content(chunk_size=CHUNK):
if chunk: # filter out keep-alive new chunks
downloaded += len(chunk)
pbar.update(len(chunk))
f.write(chunk)
temporary.rename(destination)
|
5,709 |
def _correa_entropy(X, m):
"""Compute the Correa estimator as described in [6]"""
# No equation number, but referred to as HC_mn
n = X.shape[-1]
X = _pad_along_last_axis(X, m)
i = np.arange(1, n+1)
dj = np.arange(-m, m+1)[:, None]
j = i + dj
j0 = j + m - 1 # 0-indexed version of j
Xibar = np.mean(X[..., j0], axis=-2, keepdims=True)
num = np.sum((X[..., j0] - Xibar)*(j-i), axis=-2)
den = n*np.sum((X[..., j0] - Xibar)**2, axis=-2)
return -np.mean(np.log(num/den), axis=-1)
|
def _correa_entropy(X, m):
"""Compute the Correa estimator as described in [6]."""
# No equation number, but referred to as HC_mn
n = X.shape[-1]
X = _pad_along_last_axis(X, m)
i = np.arange(1, n+1)
dj = np.arange(-m, m+1)[:, None]
j = i + dj
j0 = j + m - 1 # 0-indexed version of j
Xibar = np.mean(X[..., j0], axis=-2, keepdims=True)
num = np.sum((X[..., j0] - Xibar)*(j-i), axis=-2)
den = n*np.sum((X[..., j0] - Xibar)**2, axis=-2)
return -np.mean(np.log(num/den), axis=-1)
|
59,674 |
def build_arg_string(kwargs):
"""
Transform keyword arguments into a GMT argument string.
Make sure all arguments have been previously converted to a string
representation using the ``kwargs_to_strings`` decorator. The only
exceptions are True, False and None.
Any lists or tuples left will be interpreted as multiple entries for the
same command line argument. For example, the kwargs entry ``'B': ['xa',
'yaf']`` will be converted to ``-Bxa -Byaf`` in the argument string.
Parameters
----------
kwargs : dict
Parsed keyword arguments.
Returns
-------
args : str
The space-delimited argument string with '-' inserted before each
keyword. The arguments are sorted alphabetically.
Examples
--------
>>> print(
... build_arg_string(
... dict(
... R="1/2/3/4",
... J="X4i",
... P="",
... E=200,
... X=None,
... Y=None,
... A=True,
... B=False,
... Z=0,
... )
... )
... )
-A -E200 -JX4i -P -R1/2/3/4 -Z0
>>> print(
... build_arg_string(
... dict(
... R="1/2/3/4",
... J="X4i",
... B=["xaf", "yaf", "WSen"],
... I=("1/1p,blue", "2/0.25p,blue"),
... )
... )
... )
-Bxaf -Byaf -BWSen -I1/1p,blue -I2/0.25p,blue -JX4i -R1/2/3/4
"""
sorted_args = []
for key in sorted(kwargs):
if is_nonstr_iter(kwargs[key]):
for value in kwargs[key]:
sorted_args.append(f"-{key}{value}")
elif kwargs[key] is None or kwargs[key] is False: # Skip None and False
continue
elif kwargs[key] is True:
sorted_args.append(f"-{key}")
else:
sorted_args.append(f"-{key}{kwargs[key]}")
return " ".join(sorted_args)
|
def build_arg_string(kwargs):
"""
Transform keyword arguments into a GMT argument string.
Make sure all arguments have been previously converted to a string
representation using the ``kwargs_to_strings`` decorator. The only
exceptions are True, False and None.
Any lists or tuples left will be interpreted as multiple entries for the
same command line argument. For example, the kwargs entry ``'B': ['xa',
'yaf']`` will be converted to ``-Bxa -Byaf`` in the argument string.
Parameters
----------
kwargs : dict
Parsed keyword arguments.
Returns
-------
args : str
The space-delimited argument string with '-' inserted before each
keyword. The arguments are sorted alphabetically.
Examples
--------
>>> print(
... build_arg_string(
... dict(
... A=True,
... B=False,
... E=200,
... J="X4c",
... P="",
... R="1/2/3/4",
... X=None,
... Y=None,
... Z=0,
... )
... )
... )
-A -E200 -JX4i -P -R1/2/3/4 -Z0
>>> print(
... build_arg_string(
... dict(
... R="1/2/3/4",
... J="X4i",
... B=["xaf", "yaf", "WSen"],
... I=("1/1p,blue", "2/0.25p,blue"),
... )
... )
... )
-Bxaf -Byaf -BWSen -I1/1p,blue -I2/0.25p,blue -JX4i -R1/2/3/4
"""
sorted_args = []
for key in sorted(kwargs):
if is_nonstr_iter(kwargs[key]):
for value in kwargs[key]:
sorted_args.append(f"-{key}{value}")
elif kwargs[key] is None or kwargs[key] is False: # Skip None and False
continue
elif kwargs[key] is True:
sorted_args.append(f"-{key}")
else:
sorted_args.append(f"-{key}{kwargs[key]}")
return " ".join(sorted_args)
|
42,391 |
def test_colorbar_raises_value_error():
"""Test that a non matbplotlib axis object raises an value error"""
with pytest.raises(AttributeError):
es.colorbar(list())
|
def test_colorbar_raises_value_error():
"""Test that a non matbplotlib axis object raises an value error"""
with pytest.raises(AttributeError, match="requires a matplotlib"):
es.colorbar(list())
|
32,252 |
def create_test_file(is_nightly, skip_save=False, path_to_pack='', marketplace_version='xsoar', service_account=None):
"""Create a file containing all the tests we need to run for the CI"""
if is_nightly:
if marketplace_version == 'marketplacev2':
# we are adding to the nightly on marketplacev2 few tests that are supported in both marketplacees
# see https://github.com/demisto/etc/issues/44350
tests = set(CONF.get_marketplacev2_tests())
# collect tests that are compatible only with marketplacev2
tests.update(get_test_playbooks_for_specific_marketplace(ID_SET, marketplace_version))
packs_to_install = CONF.get_packs_of_tested_integrations(tests, ID_SET)
packs_to_install.update(get_content_pack_name_of_test(tests, ID_SET))
# collect all packs and tests that are compatible only with marketplacev2
packs_to_install.update(get_all_packs_with_artifacts_for_specific_marketplace(ID_SET, marketplace_version))
else:
packs_to_install = (set(os.listdir(constants.PACKS_DIR)))
tests = set(CONF.get_test_playbook_ids())
packs_to_install = filter_installed_packs(packs_to_install, marketplace_version,
deepcopy(ID_SET))
tests = filter_tests(tests, id_set=deepcopy(ID_SET), is_nightly=True,
modified_packs=set(), marketplace_version=marketplace_version)
logging.info("Nightly - collected all tests that appear in conf.json and all packs from content repo that "
"should be tested")
else:
branches = tools.run_command("git branch")
branch_name_reg = re.search(r"\* (.*)", branches)
if branch_name_reg:
branch_name = branch_name_reg.group(1)
logging.info("Getting changed files from the branch: {0}".format(branch_name))
if path_to_pack:
changed_files = get_list_of_files_in_the_pack(path_to_pack)
files_string = changed_files_to_string(changed_files)
elif os.environ.get("IFRA_ENV_TYPE") == 'Bucket-Upload':
last_commit = get_last_commit_from_index(service_account)
second_last_commit = branch_name if branch_name != 'master' else 'origin/master'
files_string = tools.run_command(f'git diff --name-status {second_last_commit}..{last_commit}')
logging.debug(f'Current commit: {second_last_commit}, Last upload commit: {last_commit}')
elif branch_name != 'master':
files_string = tools.run_command("git diff --name-status origin/master...{0}".format(branch_name))
# Checks if the build is for contributor PR and if so add it's pack.
if os.getenv('CONTRIB_BRANCH'):
packs_diff = tools.run_command('git status -uall --porcelain -- Packs').replace('??', 'A')
files_string = '\n'.join([files_string, packs_diff])
else:
commit_string = tools.run_command("git log -n 2 --pretty='%H'")
logging.debug(f'commit string: {commit_string}')
commit_string = commit_string.replace("'", "")
last_commit, second_last_commit = commit_string.split()
files_string = tools.run_command(f'git diff --name-status {second_last_commit}...{last_commit}')
logging.debug(f'Files string: {files_string}')
tests, packs_to_install = get_test_list_and_content_packs_to_install(files_string, branch_name,
marketplace_version)
tests_string = '\n'.join(tests)
packs_to_install_string = '\n'.join(packs_to_install)
if not skip_save:
logging.info("Creating filter_file.txt")
with open(os.path.join(ARTIFACTS_FOLDER, 'filter_file.txt'), 'w') as filter_file:
filter_file.write(tests_string)
# content_packs_to_install.txt is not used in nightly build
logging.info("Creating content_packs_to_install.txt")
with open(os.path.join(ARTIFACTS_FOLDER, 'content_packs_to_install.txt'), 'w') as content_packs_to_install:
content_packs_to_install.write(packs_to_install_string)
if is_nightly:
logging.debug('Collected the following tests:\n{0}\n'.format(tests_string))
logging.debug('Collected the following packs to install:\n{0}\n'.format('\n'.join(packs_to_install)))
else:
if tests_string:
success_msg = 'Collected the following tests:\n{0}\n'.format(tests_string)
logging.success(success_msg)
else:
logging.error('Did not find tests to run')
if packs_to_install_string:
success_msg = 'Collected the following content packs to install:\n{0}\n'.format(packs_to_install_string)
logging.success(success_msg)
else:
logging.error('Did not find content packs to install')
|
def create_test_file(is_nightly, skip_save=False, path_to_pack='', marketplace_version='xsoar', service_account=None):
"""Create a file containing all the tests we need to run for the CI"""
if is_nightly:
if marketplace_version == 'marketplacev2':
# we are adding to the nightly on marketplacev2 few tests that are supported in both marketplacees
# see https://github.com/demisto/etc/issues/44350
tests = set(CONF.get_marketplacev2_tests())
# collect tests that are compatible only with marketplacev2
tests.update(get_test_playbooks_for_specific_marketplace(ID_SET, marketplace_version))
packs_to_install = CONF.get_packs_of_tested_integrations(tests, ID_SET)
packs_to_install.update(get_content_pack_name_of_test(tests, ID_SET))
# collect all packs and tests that are compatible only with marketplacev2
packs_to_install.update(get_all_packs_with_artifacts_for_specific_marketplace(ID_SET, marketplace_version))
else:
packs_to_install = (set(os.listdir(constants.PACKS_DIR)))
tests = set(CONF.get_test_playbook_ids())
packs_to_install = filter_installed_packs(packs_to_install, marketplace_version,
deepcopy(ID_SET))
tests = filter_tests(tests, id_set=deepcopy(ID_SET), is_nightly=True,
modified_packs=set(), marketplace_version=marketplace_version)
logging.info("Nightly - collected all tests that appear in conf.json and all packs from content repo that "
"should be tested")
else:
branches = tools.run_command("git branch")
branch_name_reg = re.search(r"\* (.*)", branches)
if branch_name_reg:
branch_name = branch_name_reg.group(1)
logging.info("Getting changed files from the branch: {0}".format(branch_name))
if path_to_pack:
changed_files = get_list_of_files_in_the_pack(path_to_pack)
files_string = changed_files_to_string(changed_files)
elif os.environ.get("IFRA_ENV_TYPE") == 'Bucket-Upload':
last_commit = get_last_commit_from_index(service_account)
current_commit = branch_name if branch_name != 'master' else 'origin/master'
files_string = tools.run_command(f'git diff --name-status {second_last_commit}..{last_commit}')
logging.debug(f'Current commit: {second_last_commit}, Last upload commit: {last_commit}')
elif branch_name != 'master':
files_string = tools.run_command("git diff --name-status origin/master...{0}".format(branch_name))
# Checks if the build is for contributor PR and if so add it's pack.
if os.getenv('CONTRIB_BRANCH'):
packs_diff = tools.run_command('git status -uall --porcelain -- Packs').replace('??', 'A')
files_string = '\n'.join([files_string, packs_diff])
else:
commit_string = tools.run_command("git log -n 2 --pretty='%H'")
logging.debug(f'commit string: {commit_string}')
commit_string = commit_string.replace("'", "")
last_commit, second_last_commit = commit_string.split()
files_string = tools.run_command(f'git diff --name-status {second_last_commit}...{last_commit}')
logging.debug(f'Files string: {files_string}')
tests, packs_to_install = get_test_list_and_content_packs_to_install(files_string, branch_name,
marketplace_version)
tests_string = '\n'.join(tests)
packs_to_install_string = '\n'.join(packs_to_install)
if not skip_save:
logging.info("Creating filter_file.txt")
with open(os.path.join(ARTIFACTS_FOLDER, 'filter_file.txt'), 'w') as filter_file:
filter_file.write(tests_string)
# content_packs_to_install.txt is not used in nightly build
logging.info("Creating content_packs_to_install.txt")
with open(os.path.join(ARTIFACTS_FOLDER, 'content_packs_to_install.txt'), 'w') as content_packs_to_install:
content_packs_to_install.write(packs_to_install_string)
if is_nightly:
logging.debug('Collected the following tests:\n{0}\n'.format(tests_string))
logging.debug('Collected the following packs to install:\n{0}\n'.format('\n'.join(packs_to_install)))
else:
if tests_string:
success_msg = 'Collected the following tests:\n{0}\n'.format(tests_string)
logging.success(success_msg)
else:
logging.error('Did not find tests to run')
if packs_to_install_string:
success_msg = 'Collected the following content packs to install:\n{0}\n'.format(packs_to_install_string)
logging.success(success_msg)
else:
logging.error('Did not find content packs to install')
|
57,849 |
def main():
install_logging('Prepare_Content_Packs_For_Testing.log')
option = option_handler()
packs_artifacts_path = option.artifacts_path
extract_destination_path = option.extract_path
storage_bucket_name = option.bucket_name
service_account = option.service_account
target_packs = option.pack_names if option.pack_names else ""
build_number = option.ci_build_number if option.ci_build_number else str(uuid.uuid4())
override_all_packs = option.override_all_packs
signature_key = option.key_string
packs_dependencies_mapping = load_json(option.pack_dependencies) if option.pack_dependencies else {}
storage_base_path = option.storage_base_path
remove_test_playbooks = option.remove_test_playbooks
is_bucket_upload_flow = option.bucket_upload
private_bucket_name = option.private_bucket_name
ci_branch = option.ci_branch
force_upload = option.force_upload
# google cloud storage client initialized
storage_client = init_storage_client(service_account)
storage_bucket = storage_client.bucket(storage_bucket_name)
if storage_base_path:
GCPConfig.STORAGE_BASE_PATH = storage_base_path
# Relevant when triggering test upload flow
if storage_bucket_name:
GCPConfig.PRODUCTION_BUCKET = storage_bucket_name
# download and extract index from public bucket
index_folder_path, index_blob, index_generation = download_and_extract_index(storage_bucket,
extract_destination_path)
# content repo client initialized
content_repo = get_content_git_client(CONTENT_ROOT_PATH)
current_commit_hash, previous_commit_hash = get_recent_commits_data(content_repo, index_folder_path,
is_bucket_upload_flow, ci_branch)
# detect packs to upload
pack_names = get_packs_names(target_packs, previous_commit_hash)
extract_packs_artifacts(packs_artifacts_path, extract_destination_path)
packs_list = [Pack(pack_name, os.path.join(extract_destination_path, pack_name)) for pack_name in pack_names
if os.path.exists(os.path.join(extract_destination_path, pack_name))]
diff_files_list = content_repo.commit(current_commit_hash).diff(content_repo.commit(previous_commit_hash))
# taking care of private packs
is_private_content_updated, private_packs, updated_private_packs_ids = handle_private_content(
index_folder_path, private_bucket_name, extract_destination_path, storage_client, pack_names
)
if not option.override_all_packs:
check_if_index_is_updated(index_folder_path, content_repo, current_commit_hash, previous_commit_hash,
storage_bucket, is_private_content_updated)
# initiate the statistics handler for marketplace packs
statistics_handler = StatisticsHandler(service_account, index_folder_path)
# clean index and gcs from non existing or invalid packs
clean_non_existing_packs(index_folder_path, private_packs, storage_bucket)
# Packages that depend on new packs that are not in the previous index.json
packs_missing_dependencies = []
# starting iteration over packs
for pack in packs_list:
task_status, user_metadata = pack.load_user_metadata()
if not task_status:
pack.status = PackStatus.FAILED_LOADING_USER_METADATA.value
pack.cleanup()
continue
task_status = pack.collect_content_items()
if not task_status:
pack.status = PackStatus.FAILED_COLLECT_ITEMS.name
pack.cleanup()
continue
task_status = pack.upload_integration_images(storage_bucket, diff_files_list, True)
if not task_status:
pack.status = PackStatus.FAILED_IMAGES_UPLOAD.name
pack.cleanup()
continue
task_status = pack.upload_author_image(storage_bucket, diff_files_list, True)
if not task_status:
pack.status = PackStatus.FAILED_AUTHOR_IMAGE_UPLOAD.name
pack.cleanup()
continue
task_status, modified_pack_files_paths, pack_was_modified = pack.detect_modified(
content_repo, index_folder_path, current_commit_hash, previous_commit_hash)
if not task_status:
pack.status = PackStatus.FAILED_DETECTING_MODIFIED_FILES.name
pack.cleanup()
continue
task_status = pack.format_metadata(user_metadata, index_folder_path, packs_dependencies_mapping, build_number,
current_commit_hash, pack_was_modified, statistics_handler, pack_names)
if pack.is_missing_dependencies:
# If the pack is dependent on a new pack (which is not yet in the index.json)
# we will note that it is missing dependencies.
# And finally after updating all the packages in index.json.
# We will go over the pack again to add what was missing
packs_missing_dependencies.append(pack)
if not task_status:
pack.status = PackStatus.FAILED_METADATA_PARSING.name
pack.cleanup()
continue
task_status, not_updated_build = pack.prepare_release_notes(index_folder_path, build_number, pack_was_modified,
modified_pack_files_paths)
if not task_status:
pack.status = PackStatus.FAILED_RELEASE_NOTES.name
pack.cleanup()
continue
if not_updated_build:
pack.status = PackStatus.PACK_IS_NOT_UPDATED_IN_RUNNING_BUILD.name
pack.cleanup()
continue
task_status = pack.remove_unwanted_files(remove_test_playbooks)
if not task_status:
pack.status = PackStatus.FAILED_REMOVING_PACK_SKIPPED_FOLDERS
pack.cleanup()
continue
task_status = pack.sign_pack(signature_key)
if not task_status:
pack.status = PackStatus.FAILED_SIGNING_PACKS.name
pack.cleanup()
continue
task_status, zip_pack_path = pack.zip_pack()
if not task_status:
pack.status = PackStatus.FAILED_ZIPPING_PACK_ARTIFACTS.name
pack.cleanup()
continue
task_status, skipped_upload, _ = pack.upload_to_storage(zip_pack_path, pack.latest_version, storage_bucket,
override_all_packs or pack_was_modified)
if not task_status:
pack.status = PackStatus.FAILED_UPLOADING_PACK.name
pack.cleanup()
continue
task_status, exists_in_index = pack.check_if_exists_in_index(index_folder_path)
if not task_status:
pack.status = PackStatus.FAILED_SEARCHING_PACK_IN_INDEX.name
pack.cleanup()
continue
task_status = pack.prepare_for_index_upload()
if not task_status:
pack.status = PackStatus.FAILED_PREPARING_INDEX_FOLDER.name
pack.cleanup()
continue
task_status = update_index_folder(index_folder_path=index_folder_path, pack_name=pack.name, pack_path=pack.path,
pack_version=pack.latest_version, hidden_pack=pack.hidden)
if not task_status:
pack.status = PackStatus.FAILED_UPDATING_INDEX_FOLDER.name
pack.cleanup()
continue
# in case that pack already exist at cloud storage path and in index, don't show that the pack was changed
if skipped_upload and exists_in_index:
logging.info(f"{pack.name} pack status is {PackStatus.PACK_ALREADY_EXISTS.name}")
pack.status = PackStatus.PACK_ALREADY_EXISTS.name
pack.cleanup()
continue
pack.status = PackStatus.SUCCESS.name
logging.info(f"packs_missing_dependencies: {packs_missing_dependencies}")
# will go over all the packs what was marked as missing dependencies and will update them with the new index.json
for pack in packs_missing_dependencies:
task_status = pack.format_metadata(user_metadata, index_folder_path, packs_dependencies_mapping, build_number,
current_commit_hash, pack_was_modified, statistics_handler, pack_names, True)
if not task_status:
pack.status = PackStatus.FAILED_METADATA_PARSING.name
pack.cleanup()
continue
task_status = update_index_folder(index_folder_path=index_folder_path, pack_name=pack.name, pack_path=pack.path,
pack_version=pack.latest_version, hidden_pack=pack.hidden)
if not task_status:
pack.status = PackStatus.FAILED_UPDATING_INDEX_FOLDER.name
pack.cleanup()
continue
pack.status = PackStatus.SUCCESS.name
# upload core packs json to bucket
create_corepacks_config(storage_bucket, build_number, index_folder_path,
artifacts_dir=os.path.dirname(packs_artifacts_path))
# finished iteration over content packs
upload_index_to_storage(index_folder_path=index_folder_path, extract_destination_path=extract_destination_path,
index_blob=index_blob, build_number=build_number, private_packs=private_packs,
current_commit_hash=current_commit_hash, index_generation=index_generation,
force_upload=force_upload, previous_commit_hash=previous_commit_hash,
landing_page_sections=statistics_handler.landing_page_sections,
artifacts_dir=os.path.dirname(packs_artifacts_path),
storage_bucket=storage_bucket,
)
# get the lists of packs divided by their status
successful_packs, skipped_packs, failed_packs = get_packs_summary(packs_list)
# Store successful and failed packs list in CircleCI artifacts - to be used in Upload Packs To Marketplace job
packs_results_file_path = os.path.join(os.path.dirname(packs_artifacts_path), BucketUploadFlow.PACKS_RESULTS_FILE)
store_successful_and_failed_packs_in_ci_artifacts(
packs_results_file_path, BucketUploadFlow.PREPARE_CONTENT_FOR_TESTING, successful_packs, failed_packs,
updated_private_packs_ids, images_data=get_images_data(packs_list)
)
# summary of packs status
print_packs_summary(successful_packs, skipped_packs, failed_packs, not is_bucket_upload_flow)
|
def main():
install_logging('Prepare_Content_Packs_For_Testing.log')
option = option_handler()
packs_artifacts_path = option.artifacts_path
extract_destination_path = option.extract_path
storage_bucket_name = option.bucket_name
service_account = option.service_account
target_packs = option.pack_names if option.pack_names else ""
build_number = option.ci_build_number if option.ci_build_number else str(uuid.uuid4())
override_all_packs = option.override_all_packs
signature_key = option.key_string
packs_dependencies_mapping = load_json(option.pack_dependencies) if option.pack_dependencies else {}
storage_base_path = option.storage_base_path
remove_test_playbooks = option.remove_test_playbooks
is_bucket_upload_flow = option.bucket_upload
private_bucket_name = option.private_bucket_name
ci_branch = option.ci_branch
force_upload = option.force_upload
# google cloud storage client initialized
storage_client = init_storage_client(service_account)
storage_bucket = storage_client.bucket(storage_bucket_name)
if storage_base_path:
GCPConfig.STORAGE_BASE_PATH = storage_base_path
# Relevant when triggering test upload flow
if storage_bucket_name:
GCPConfig.PRODUCTION_BUCKET = storage_bucket_name
# download and extract index from public bucket
index_folder_path, index_blob, index_generation = download_and_extract_index(storage_bucket,
extract_destination_path)
# content repo client initialized
content_repo = get_content_git_client(CONTENT_ROOT_PATH)
current_commit_hash, previous_commit_hash = get_recent_commits_data(content_repo, index_folder_path,
is_bucket_upload_flow, ci_branch)
# detect packs to upload
pack_names = get_packs_names(target_packs, previous_commit_hash)
extract_packs_artifacts(packs_artifacts_path, extract_destination_path)
packs_list = [Pack(pack_name, os.path.join(extract_destination_path, pack_name)) for pack_name in pack_names
if os.path.exists(os.path.join(extract_destination_path, pack_name))]
diff_files_list = content_repo.commit(current_commit_hash).diff(content_repo.commit(previous_commit_hash))
# taking care of private packs
is_private_content_updated, private_packs, updated_private_packs_ids = handle_private_content(
index_folder_path, private_bucket_name, extract_destination_path, storage_client, pack_names
)
if not option.override_all_packs:
check_if_index_is_updated(index_folder_path, content_repo, current_commit_hash, previous_commit_hash,
storage_bucket, is_private_content_updated)
# initiate the statistics handler for marketplace packs
statistics_handler = StatisticsHandler(service_account, index_folder_path)
# clean index and gcs from non existing or invalid packs
clean_non_existing_packs(index_folder_path, private_packs, storage_bucket)
# Packages that depend on new packs that are not in the previous index.json
packs_missing_dependencies = []
# starting iteration over packs
for pack in packs_list:
task_status, user_metadata = pack.load_user_metadata()
if not task_status:
pack.status = PackStatus.FAILED_LOADING_USER_METADATA.value
pack.cleanup()
continue
task_status = pack.collect_content_items()
if not task_status:
pack.status = PackStatus.FAILED_COLLECT_ITEMS.name
pack.cleanup()
continue
task_status = pack.upload_integration_images(storage_bucket, diff_files_list, True)
if not task_status:
pack.status = PackStatus.FAILED_IMAGES_UPLOAD.name
pack.cleanup()
continue
task_status = pack.upload_author_image(storage_bucket, diff_files_list, True)
if not task_status:
pack.status = PackStatus.FAILED_AUTHOR_IMAGE_UPLOAD.name
pack.cleanup()
continue
task_status, modified_pack_files_paths, pack_was_modified = pack.detect_modified(
content_repo, index_folder_path, current_commit_hash, previous_commit_hash)
if not task_status:
pack.status = PackStatus.FAILED_DETECTING_MODIFIED_FILES.name
pack.cleanup()
continue
task_status = pack.format_metadata(user_metadata, index_folder_path, packs_dependencies_mapping, build_number,
current_commit_hash, pack_was_modified, statistics_handler, pack_names)
if pack.is_missing_dependencies:
# If the pack is dependent on a new pack (which is not yet in the index.json)
# we will note that it is missing dependencies.
# And finally after updating all the packages in index.json.
# We will go over the pack again to add what was missing
packs_missing_dependencies.append(pack)
if not task_status:
pack.status = PackStatus.FAILED_METADATA_PARSING.name
pack.cleanup()
continue
task_status, not_updated_build = pack.prepare_release_notes(index_folder_path, build_number, pack_was_modified,
modified_pack_files_paths)
if not task_status:
pack.status = PackStatus.FAILED_RELEASE_NOTES.name
pack.cleanup()
continue
if not_updated_build:
pack.status = PackStatus.PACK_IS_NOT_UPDATED_IN_RUNNING_BUILD.name
pack.cleanup()
continue
task_status = pack.remove_unwanted_files(remove_test_playbooks)
if not task_status:
pack.status = PackStatus.FAILED_REMOVING_PACK_SKIPPED_FOLDERS
pack.cleanup()
continue
task_status = pack.sign_pack(signature_key)
if not task_status:
pack.status = PackStatus.FAILED_SIGNING_PACKS.name
pack.cleanup()
continue
task_status, zip_pack_path = pack.zip_pack()
if not task_status:
pack.status = PackStatus.FAILED_ZIPPING_PACK_ARTIFACTS.name
pack.cleanup()
continue
task_status, skipped_upload, _ = pack.upload_to_storage(zip_pack_path, pack.latest_version, storage_bucket,
override_all_packs or pack_was_modified)
if not task_status:
pack.status = PackStatus.FAILED_UPLOADING_PACK.name
pack.cleanup()
continue
task_status, exists_in_index = pack.check_if_exists_in_index(index_folder_path)
if not task_status:
pack.status = PackStatus.FAILED_SEARCHING_PACK_IN_INDEX.name
pack.cleanup()
continue
task_status = pack.prepare_for_index_upload()
if not task_status:
pack.status = PackStatus.FAILED_PREPARING_INDEX_FOLDER.name
pack.cleanup()
continue
task_status = update_index_folder(index_folder_path=index_folder_path, pack_name=pack.name, pack_path=pack.path,
pack_version=pack.latest_version, hidden_pack=pack.hidden)
if not task_status:
pack.status = PackStatus.FAILED_UPDATING_INDEX_FOLDER.name
pack.cleanup()
continue
# in case that pack already exist at cloud storage path and in index, don't show that the pack was changed
if skipped_upload and exists_in_index:
logging.info(f"{pack.name} pack status is {PackStatus.PACK_ALREADY_EXISTS.name}")
pack.status = PackStatus.PACK_ALREADY_EXISTS.name
pack.cleanup()
continue
pack.status = PackStatus.SUCCESS.name
logging.info(f"packs_missing_dependencies: {packs_missing_dependencies}")
# will go over all the packs what was marked as missing dependencies and will update them with the new index.json
for pack in packs_missing_dependencies:
task_status = pack.format_metadata(user_metadata, index_folder_path, packs_dependencies_mapping, build_number,
current_commit_hash, pack_was_modified, statistics_handler, pack_names, format_dependencies_only=True)
if not task_status:
pack.status = PackStatus.FAILED_METADATA_PARSING.name
pack.cleanup()
continue
task_status = update_index_folder(index_folder_path=index_folder_path, pack_name=pack.name, pack_path=pack.path,
pack_version=pack.latest_version, hidden_pack=pack.hidden)
if not task_status:
pack.status = PackStatus.FAILED_UPDATING_INDEX_FOLDER.name
pack.cleanup()
continue
pack.status = PackStatus.SUCCESS.name
# upload core packs json to bucket
create_corepacks_config(storage_bucket, build_number, index_folder_path,
artifacts_dir=os.path.dirname(packs_artifacts_path))
# finished iteration over content packs
upload_index_to_storage(index_folder_path=index_folder_path, extract_destination_path=extract_destination_path,
index_blob=index_blob, build_number=build_number, private_packs=private_packs,
current_commit_hash=current_commit_hash, index_generation=index_generation,
force_upload=force_upload, previous_commit_hash=previous_commit_hash,
landing_page_sections=statistics_handler.landing_page_sections,
artifacts_dir=os.path.dirname(packs_artifacts_path),
storage_bucket=storage_bucket,
)
# get the lists of packs divided by their status
successful_packs, skipped_packs, failed_packs = get_packs_summary(packs_list)
# Store successful and failed packs list in CircleCI artifacts - to be used in Upload Packs To Marketplace job
packs_results_file_path = os.path.join(os.path.dirname(packs_artifacts_path), BucketUploadFlow.PACKS_RESULTS_FILE)
store_successful_and_failed_packs_in_ci_artifacts(
packs_results_file_path, BucketUploadFlow.PREPARE_CONTENT_FOR_TESTING, successful_packs, failed_packs,
updated_private_packs_ids, images_data=get_images_data(packs_list)
)
# summary of packs status
print_packs_summary(successful_packs, skipped_packs, failed_packs, not is_bucket_upload_flow)
|
32,015 |
def search_ip_command(ip, reliability, create_relationships):
indicator_type = 'IP'
ip_list = argToList(ip)
command_results = []
relationships = []
for ip_address in ip_list:
ip_type = 'ipv6_address' if is_ipv6_valid(ip_address) else 'ipv4_address'
raw_res = search_indicator(ip_type, ip_address)
indicator = raw_res.get('indicator')
if indicator:
raw_tags = raw_res.get('tags')
score = calculate_dbot_score(indicator, indicator_type)
dbot_score = Common.DBotScore(
indicator=ip_address,
indicator_type=DBotScoreType.IP,
integration_name=VENDOR_NAME,
score=score,
reliability=reliability
)
if create_relationships:
relationships = create_relationships_list(entity_a=ip_address, entity_a_type=indicator_type, tags=raw_tags,
reliability=reliability)
ip = Common.IP(
ip=ip_address,
dbot_score=dbot_score,
malware_family=get_tags_for_tags_and_malware_family_fields(raw_tags, True),
tags=get_tags_for_tags_and_malware_family_fields(raw_tags),
relationships=relationships
)
autofocus_ip_output = parse_indicator_response(indicator, raw_tags, indicator_type)
# create human readable markdown for ip
tags = autofocus_ip_output.get('Tags')
table_name = f'{VENDOR_NAME} {indicator_type} reputation for: {ip_address}'
if tags:
indicators_data = autofocus_ip_output.copy()
del indicators_data['Tags']
md = tableToMarkdown(table_name, indicators_data)
md += tableToMarkdown('Indicator Tags:', tags)
else:
md = tableToMarkdown(table_name, autofocus_ip_output)
else:
dbot_score = Common.DBotScore(
indicator=ip_address,
indicator_type=DBotScoreType.IP,
integration_name=VENDOR_NAME,
score=0,
reliability=reliability
)
ip = Common.IP(
ip=ip_address,
dbot_score=dbot_score
)
md = f'### The IP indicator: {ip_address} was not found in AutoFocus'
autofocus_ip_output = {'IndicatorValue': ip_address}
command_results.append(CommandResults(
outputs_prefix='AutoFocus.IP',
outputs_key_field='IndicatorValue',
outputs=autofocus_ip_output,
readable_output=md,
raw_response=raw_res,
indicator=ip,
relationships=relationships
))
return command_results
|
def search_ip_command(ip, reliability, create_relationships):
indicator_type = 'IP'
ip_list = argToList(ip)
command_results = []
relationships = []
for ip_address in ip_list:
ip_type = 'ipv6_address' if is_ipv6_valid(ip_address) else 'ipv4_address'
raw_res = search_indicator(ip_type, ip_address)
indicator = raw_res.get('indicator')
if indicator:
raw_tags = raw_res.get('tags')
score = calculate_dbot_score(indicator, indicator_type)
dbot_score = Common.DBotScore(
indicator=ip_address,
indicator_type=DBotScoreType.IP,
integration_name=VENDOR_NAME,
score=score,
reliability=reliability
)
if create_relationships:
relationships = create_relationships_list(entity_a=ip_address, entity_a_type=indicator_type, tags=raw_tags,
reliability=reliability)
ip = Common.IP(
ip=ip_address,
dbot_score=dbot_score,
malware_family=get_tags_for_tags_and_malware_family_fields(raw_tags, True),
tags=get_tags_for_tags_and_malware_family_fields(raw_tags),
relationships=relationships
)
autofocus_ip_output = parse_indicator_response(indicator, raw_tags, indicator_type)
# create human readable markdown for ip
tags = autofocus_ip_output.get('Tags')
table_name = f'{VENDOR_NAME} {indicator_type} reputation for: {ip_address}'
if tags:
indicators_data = autofocus_ip_output.copy()
del indicators_data['Tags']
md = tableToMarkdown(table_name, indicators_data)
md += tableToMarkdown('Indicator Tags:', tags)
else:
md = tableToMarkdown(table_name, autofocus_ip_output)
else:
dbot_score = Common.DBotScore(
indicator=ip_address,
indicator_type=DBotScoreType.IP,
integration_name=VENDOR_NAME,
score=0,
reliability=reliability
)
ip = Common.IP(
ip=ip_address,
dbot_score=dbot_score,
)
md = f'### The IP indicator: {ip_address} was not found in AutoFocus'
autofocus_ip_output = {'IndicatorValue': ip_address}
command_results.append(CommandResults(
outputs_prefix='AutoFocus.IP',
outputs_key_field='IndicatorValue',
outputs=autofocus_ip_output,
readable_output=md,
raw_response=raw_res,
indicator=ip,
relationships=relationships
))
return command_results
|
19,049 |
def deactivate():
"""
Deactviate an environment and collect corresponding environment modifications
Returns:
spack.util.environment.EnvironmentModifications: Environment variables
modifications to activate environment.
"""
env_mods = EnvironmentModifications()
active = ev.active_environment()
if active is None:
return env_mods
if ev.default_view_name in active.views:
try:
with spack.store.db.read_transaction():
active.rm_default_view_from_env(env_mods)
except (spack.repo.UnknownPackageError,
spack.repo.UnknownNamespaceError) as e:
tty.warn(e)
tty.warn('Could not fully deactivate view due to missing package '
'or repo, shell environment may be corrupt.')
ev.deactivate()
return env_mods
|
def deactivate():
"""
Deactivate an environment and collect corresponding environment modifications
Returns:
spack.util.environment.EnvironmentModifications: Environment variables
modifications to activate environment.
"""
env_mods = EnvironmentModifications()
active = ev.active_environment()
if active is None:
return env_mods
if ev.default_view_name in active.views:
try:
with spack.store.db.read_transaction():
active.rm_default_view_from_env(env_mods)
except (spack.repo.UnknownPackageError,
spack.repo.UnknownNamespaceError) as e:
tty.warn(e)
tty.warn('Could not fully deactivate view due to missing package '
'or repo, shell environment may be corrupt.')
ev.deactivate()
return env_mods
|
40,466 |
def test_message_passing_int32_edge_index():
# Check that we can dispatch an int32 edge_index up to aggregation
x = torch.randn(4, 8)
edge_index = torch.tensor([[0, 1, 2, 3], [0, 0, 1, 1]], dtype=torch.int32)
edge_weight = torch.randn(edge_index.shape[1])
# Use a hook to promote the edge_index to long to workaround PyTorch CPU
# backend restriction to int64 for the index.
def cast_index_hook(module, inputs):
input_dict = inputs[-1]
input_dict['index'] = input_dict['index'].long()
return (input_dict, )
conv = MyConv(8, 32)
conv.register_aggregate_forward_pre_hook(cast_index_hook)
out = conv(x, edge_index, edge_weight)
assert out.size() == (4, 32)
|
def test_message_passing_int32_edge_index():
# Check that we can dispatch an int32 edge_index up to aggregation
x = torch.randn(4, 8)
edge_index = torch.tensor([[0, 1, 2, 3], [0, 0, 1, 1]], dtype=torch.int32)
edge_weight = torch.randn(edge_index.shape[1])
# Use a hook to promote the edge_index to long to workaround PyTorch CPU
# backend restriction to int64 for the index.
def cast_index_hook(module, inputs):
input_dict = inputs[-1]
input_dict['index'] = input_dict['index'].long()
return (input_dict, )
conv = MyConv(8, 32)
conv.register_aggregate_forward_pre_hook(cast_index_hook)
assert conv(x, edge_index, edge_weight).size() == (4, 32)
|
1,206 |
def test_recoder_1():
# simplest case, no aliases
codes = ((1,), (2,))
rc = Recoder(codes)
assert rc.code[1] == 1
assert rc.code[2] == 2
with pytest.raises(KeyError):
rc.code.__getitem__(3)
|
def test_recoder_1():
# simplest case, no aliases
codes = ((1,), (2,))
rc = Recoder(codes)
assert rc.code[1] == 1
assert rc.code[2] == 2
with pytest.raises(KeyError):
rc.code[3]
|
7,465 |
def write_table_hdf5(table, output, path=None, compression=False,
append=False, overwrite=False, serialize_meta=False,
compatibility_mode=False):
"""
Write a Table object to an HDF5 file
This requires `h5py <http://www.h5py.org/>`_ to be installed.
Parameters
----------
table : `~astropy.table.Table`
Data table that is to be written to file.
output : str or :class:`h5py:File` or :class:`h5py:Group`
If a string, the filename to write the table to. If an h5py object,
either the file or the group object to write the table to.
path : str
The path to which to write the table inside the HDF5 file.
This should be relative to the input file or group.
If not specified, defaults to __astropy__
compression : bool or str or int
Whether to compress the table inside the HDF5 file. If set to `True`,
``'gzip'`` compression is used. If a string is specified, it should be
one of ``'gzip'``, ``'szip'``, or ``'lzf'``. If an integer is
specified (in the range 0-9), ``'gzip'`` compression is used, and the
integer denotes the compression level.
append : bool
Whether to append the table to an existing HDF5 file.
overwrite : bool
Whether to overwrite any existing file without warning.
If ``append=True`` and ``overwrite=True`` then only the dataset will be
replaced; the file/group will not be overwritten.
"""
from astropy.table import meta
try:
import h5py
except ImportError:
raise Exception("h5py is required to read and write HDF5 files")
if path is None:
path = '__astropy__'
warnings.warn("table path was not set via the path= argument; "
"using default path __astropy__")
elif path.endswith('/'):
raise ValueError("table path should end with table name, not /")
if '/' in path:
group, name = path.rsplit('/', 1)
else:
group, name = None, path
if isinstance(output, (h5py.File, h5py.Group)):
if group:
try:
output_group = output[group]
except (KeyError, ValueError):
output_group = output.create_group(group)
else:
output_group = output
elif isinstance(output, str):
if os.path.exists(output) and not append:
if overwrite and not append:
os.remove(output)
else:
raise OSError("File exists: {0}".format(output))
# Open the file for appending or writing
f = h5py.File(output, 'a' if append else 'w')
# Recursively call the write function
try:
return write_table_hdf5(table, f, path=path,
compression=compression, append=append,
overwrite=overwrite,
serialize_meta=serialize_meta,
compatibility_mode=compatibility_mode)
finally:
f.close()
else:
raise TypeError('output should be a string or an h5py File or '
'Group object')
# Check whether table already exists
if name in output_group:
if append and overwrite:
# Delete only the dataset itself
del output_group[name]
else:
raise OSError("Table {0} already exists".format(path))
# Encode any mixin columns as plain columns + appropriate metadata
table = _encode_mixins(table)
# Table with numpy unicode strings can't be written in HDF5 so
# to write such a table a copy of table is made containing columns as
# bytestrings. Now this copy of the table can be written in HDF5.
if any(col.info.dtype.kind == 'U' for col in table.itercols()):
table = table.copy(copy_data=False)
table.convert_unicode_to_bytestring()
# Warn if information will be lost when serialize_meta=False. This is
# hardcoded to the set difference between column info attributes and what
# HDF5 can store natively (name, dtype) with no meta.
if serialize_meta is False:
for col in table.itercols():
for attr in ('unit', 'format', 'description', 'meta'):
if getattr(col.info, attr, None) not in (None, {}):
warnings.warn("table contains column(s) with defined 'unit', 'format',"
" 'description', or 'meta' info attributes. These will"
" be dropped since serialize_meta=False.",
AstropyUserWarning)
# Write the table to the file
if compression:
if compression is True:
compression = 'gzip'
dset = output_group.create_dataset(name, data=table.as_array(),
compression=compression)
else:
dset = output_group.create_dataset(name, data=table.as_array())
if serialize_meta:
header_yaml = meta.get_yaml_from_table(table)
header_encoded = [h.encode('utf-8') for h in header_yaml]
if compatibility_mode:
warnings.warn("compatibility mode for writing is deprecated",
AstropyDeprecationWarning)
try:
dset.attrs[META_KEY] = header_encoded
except Exception as e:
warnings.warn(
"Attributes could not be written to the output HDF5 "
"file: {0}".format(e))
else:
output_group.create_dataset(meta_path(name),
data=header_encoded)
else:
# Write the Table meta dict key:value pairs to the file as HDF5
# attributes. This works only for a limited set of scalar data types
# like numbers, strings, etc., but not any complex types. This path
# also ignores column meta like unit or format.
for key in table.meta:
val = table.meta[key]
try:
dset.attrs[key] = val
except TypeError:
warnings.warn("Attribute `{0}` of type {1} cannot be written to "
"HDF5 files - skipping. (Consider specifying "
"serialize_meta=True to write all meta data)".format(key, type(val)),
AstropyUserWarning)
|
def write_table_hdf5(table, output, path=None, compression=False,
append=False, overwrite=False, serialize_meta=False,
compatibility_mode=False):
"""
Write a Table object to an HDF5 file
This requires `h5py <http://www.h5py.org/>`_ to be installed.
Parameters
----------
table : `~astropy.table.Table`
Data table that is to be written to file.
output : str or :class:`h5py:File` or :class:`h5py:Group`
If a string, the filename to write the table to. If an h5py object,
either the file or the group object to write the table to.
path : str
The path to which to write the table inside the HDF5 file.
This should be relative to the input file or group.
If not specified, defaults to ``__astropy__``.
compression : bool or str or int
Whether to compress the table inside the HDF5 file. If set to `True`,
``'gzip'`` compression is used. If a string is specified, it should be
one of ``'gzip'``, ``'szip'``, or ``'lzf'``. If an integer is
specified (in the range 0-9), ``'gzip'`` compression is used, and the
integer denotes the compression level.
append : bool
Whether to append the table to an existing HDF5 file.
overwrite : bool
Whether to overwrite any existing file without warning.
If ``append=True`` and ``overwrite=True`` then only the dataset will be
replaced; the file/group will not be overwritten.
"""
from astropy.table import meta
try:
import h5py
except ImportError:
raise Exception("h5py is required to read and write HDF5 files")
if path is None:
path = '__astropy__'
warnings.warn("table path was not set via the path= argument; "
"using default path __astropy__")
elif path.endswith('/'):
raise ValueError("table path should end with table name, not /")
if '/' in path:
group, name = path.rsplit('/', 1)
else:
group, name = None, path
if isinstance(output, (h5py.File, h5py.Group)):
if group:
try:
output_group = output[group]
except (KeyError, ValueError):
output_group = output.create_group(group)
else:
output_group = output
elif isinstance(output, str):
if os.path.exists(output) and not append:
if overwrite and not append:
os.remove(output)
else:
raise OSError("File exists: {0}".format(output))
# Open the file for appending or writing
f = h5py.File(output, 'a' if append else 'w')
# Recursively call the write function
try:
return write_table_hdf5(table, f, path=path,
compression=compression, append=append,
overwrite=overwrite,
serialize_meta=serialize_meta,
compatibility_mode=compatibility_mode)
finally:
f.close()
else:
raise TypeError('output should be a string or an h5py File or '
'Group object')
# Check whether table already exists
if name in output_group:
if append and overwrite:
# Delete only the dataset itself
del output_group[name]
else:
raise OSError("Table {0} already exists".format(path))
# Encode any mixin columns as plain columns + appropriate metadata
table = _encode_mixins(table)
# Table with numpy unicode strings can't be written in HDF5 so
# to write such a table a copy of table is made containing columns as
# bytestrings. Now this copy of the table can be written in HDF5.
if any(col.info.dtype.kind == 'U' for col in table.itercols()):
table = table.copy(copy_data=False)
table.convert_unicode_to_bytestring()
# Warn if information will be lost when serialize_meta=False. This is
# hardcoded to the set difference between column info attributes and what
# HDF5 can store natively (name, dtype) with no meta.
if serialize_meta is False:
for col in table.itercols():
for attr in ('unit', 'format', 'description', 'meta'):
if getattr(col.info, attr, None) not in (None, {}):
warnings.warn("table contains column(s) with defined 'unit', 'format',"
" 'description', or 'meta' info attributes. These will"
" be dropped since serialize_meta=False.",
AstropyUserWarning)
# Write the table to the file
if compression:
if compression is True:
compression = 'gzip'
dset = output_group.create_dataset(name, data=table.as_array(),
compression=compression)
else:
dset = output_group.create_dataset(name, data=table.as_array())
if serialize_meta:
header_yaml = meta.get_yaml_from_table(table)
header_encoded = [h.encode('utf-8') for h in header_yaml]
if compatibility_mode:
warnings.warn("compatibility mode for writing is deprecated",
AstropyDeprecationWarning)
try:
dset.attrs[META_KEY] = header_encoded
except Exception as e:
warnings.warn(
"Attributes could not be written to the output HDF5 "
"file: {0}".format(e))
else:
output_group.create_dataset(meta_path(name),
data=header_encoded)
else:
# Write the Table meta dict key:value pairs to the file as HDF5
# attributes. This works only for a limited set of scalar data types
# like numbers, strings, etc., but not any complex types. This path
# also ignores column meta like unit or format.
for key in table.meta:
val = table.meta[key]
try:
dset.attrs[key] = val
except TypeError:
warnings.warn("Attribute `{0}` of type {1} cannot be written to "
"HDF5 files - skipping. (Consider specifying "
"serialize_meta=True to write all meta data)".format(key, type(val)),
AstropyUserWarning)
|
52,240 |
def get_public_ips(protocol=4):
"""Retrieve a list (sorted by frequency) of different public IP addresses from the IPmirrors"""
ip_url_yunohost_tab = settings_get("security.ipmirrors.v"+str(protocol)).split(",")
ip_count = {} # Count the number of times an IP has appeared
# Check URLS
for url in ip_url_yunohost_tab:
logger.debug("Fetching IP from %s " % url)
try:
ip = download_text(url, timeout=30).strip()
if ip in ip_count.keys():
ip_count[ip]+=1
else:
ip_count[ip]=1
except Exception as e:
logger.debug(
"Could not get public IPv%s from %s : %s" % (str(protocol), url, str(e))
)
ip_list_with_count = [ (ip,ip_count[ip]) for ip in ip_count ]
ip_list_with_count.sort(key=lambda x: x[1]) # Sort by frequency
return [ x[0] for x in ip_list_with_count ]
|
def get_public_ips(protocol=4):
"""Retrieve a list (sorted by frequency) of public IP addresses from the IPmirrors.
We request the IP on several IPmirrors to avoid resilience issues and some attacks.
In a classic way, those IPs are the same on the same protocol. However, in some cases
those public IPs could be different (attacks, several IPs on the server).
Note: this function doesn't guarantee to return all public IPs in use by the server.
"""
ip_url_yunohost_tab = settings_get("security.ipmirrors.v"+str(protocol)).split(",")
ip_count = {} # Count the number of times an IP has appeared
# Check URLS
for url in ip_url_yunohost_tab:
logger.debug("Fetching IP from %s " % url)
try:
ip = download_text(url, timeout=30).strip()
if ip in ip_count.keys():
ip_count[ip]+=1
else:
ip_count[ip]=1
except Exception as e:
logger.debug(
"Could not get public IPv%s from %s : %s" % (str(protocol), url, str(e))
)
ip_list_with_count = [ (ip,ip_count[ip]) for ip in ip_count ]
ip_list_with_count.sort(key=lambda x: x[1]) # Sort by frequency
return [ x[0] for x in ip_list_with_count ]
|
37,547 |
def level_3_pass_manager(pass_manager_config: PassManagerConfig) -> PassManager:
"""Level 3 pass manager: heavy optimization by noise adaptive qubit mapping and
gate cancellation using commutativity rules and unitary synthesis.
This pass manager applies the user-given initial layout. If none is given, a search
for a perfect layout (i.e. one that satisfies all 2-qubit interactions) is conducted.
If no such layout is found, and device calibration information is available, the
circuit is mapped to the qubits with best readouts and to CX gates with highest fidelity.
The pass manager then transforms the circuit to match the coupling constraints.
It is then unrolled to the basis, and any flipped cx directions are fixed.
Finally, optimizations in the form of commutative gate cancellation, resynthesis
of two-qubit unitary blocks, and redundant reset removal are performed.
Note:
In simulators where ``coupling_map=None``, only the unrolling and
optimization stages are done.
Args:
pass_manager_config: configuration of the pass manager.
Returns:
a level 3 pass manager.
Raises:
TranspilerError: if the passmanager config is invalid.
"""
basis_gates = pass_manager_config.basis_gates
inst_map = pass_manager_config.inst_map
coupling_map = pass_manager_config.coupling_map
initial_layout = pass_manager_config.initial_layout
layout_method = pass_manager_config.layout_method or "dense"
routing_method = pass_manager_config.routing_method or "stochastic"
translation_method = pass_manager_config.translation_method or "translator"
scheduling_method = pass_manager_config.scheduling_method
instruction_durations = pass_manager_config.instruction_durations
seed_transpiler = pass_manager_config.seed_transpiler
backend_properties = pass_manager_config.backend_properties
approximation_degree = pass_manager_config.approximation_degree
unitary_synthesis_method = pass_manager_config.unitary_synthesis_method
timing_constraints = pass_manager_config.timing_constraints or TimingConstraints()
# 1. Unroll to 1q or 2q gates
_unroll3q = [
# Use unitary synthesis for basis aware decomposition of UnitaryGates
UnitarySynthesis(
basis_gates,
approximation_degree=approximation_degree,
coupling_map=coupling_map,
backend_props=backend_properties,
method=unitary_synthesis_method,
min_qubits=3,
),
Unroll3qOrMore(),
]
# 2. Layout on good qubits if calibration info available, otherwise on dense links
_given_layout = SetLayout(initial_layout)
def _choose_layout_condition(property_set):
# layout hasn't been set yet
return not property_set["layout"]
def _csp_not_found_match(property_set):
# If a layout hasn't been set by the time we run csp we need to run layout
if property_set["layout"] is None:
return True
# if CSP layout stopped for any reason other than solution found we need
# to run layout since CSP didn't converge.
if (
property_set["CSPLayout_stop_reason"] is not None
and property_set["CSPLayout_stop_reason"] != "solution found"
):
return True
return False
# 2a. If layout method is not set, first try a trivial layout
_choose_layout_0 = (
[]
if pass_manager_config.layout_method
else [
TrivialLayout(coupling_map),
Layout2qDistance(coupling_map, property_name="trivial_layout_score"),
]
)
# 2b. If trivial layout wasn't perfect (ie no swaps are needed) then try
# using CSP layout to find a perfect layout
_choose_layout_1 = (
[]
if pass_manager_config.layout_method
else CSPLayout(coupling_map, call_limit=10000, time_limit=60, seed=seed_transpiler)
)
def _trivial_not_perfect(property_set):
# Verify that a trivial layout is perfect. If trivial_layout_score > 0
# the layout is not perfect. The layout property set is unconditionally
# set by trivial layout so we clear that before running CSP
if property_set["trivial_layout_score"] is not None:
if property_set["trivial_layout_score"] != 0:
property_set["layout"]._wrapped = None
return True
return False
# 2c. if CSP didn't converge on a solution use layout_method (dense).
if layout_method == "trivial":
_choose_layout_2 = TrivialLayout(coupling_map)
elif layout_method == "dense":
_choose_layout_2 = DenseLayout(coupling_map, backend_properties)
elif layout_method == "noise_adaptive":
_choose_layout_2 = NoiseAdaptiveLayout(backend_properties)
elif layout_method == "sabre":
_choose_layout_2 = SabreLayout(coupling_map, max_iterations=4, seed=seed_transpiler)
else:
raise TranspilerError("Invalid layout method %s." % layout_method)
# 3. Extend dag/layout with ancillas using the full coupling map
_embed = [FullAncillaAllocation(coupling_map), EnlargeWithAncilla(), ApplyLayout()]
# 4. Swap to fit the coupling map
_swap_check = CheckMap(coupling_map)
def _swap_condition(property_set):
return not property_set["is_swap_mapped"]
_swap = [BarrierBeforeFinalMeasurements()]
if routing_method == "basic":
_swap += [BasicSwap(coupling_map)]
elif routing_method == "stochastic":
_swap += [StochasticSwap(coupling_map, trials=200, seed=seed_transpiler)]
elif routing_method == "lookahead":
_swap += [LookaheadSwap(coupling_map, search_depth=5, search_width=6)]
elif routing_method == "sabre":
_swap += [SabreSwap(coupling_map, heuristic="decay", seed=seed_transpiler)]
elif routing_method == "none":
_swap += [
Error(
msg=(
"No routing method selected, but circuit is not routed to device. "
"CheckMap Error: {check_map_msg}"
),
action="raise",
)
]
else:
raise TranspilerError("Invalid routing method %s." % routing_method)
# 5. Unroll to the basis
if translation_method == "unroller":
_unroll = [Unroller(basis_gates)]
elif translation_method == "translator":
from qiskit.circuit.equivalence_library import SessionEquivalenceLibrary as sel
_unroll = [
UnitarySynthesis(
basis_gates,
approximation_degree=approximation_degree,
coupling_map=coupling_map,
backend_props=backend_properties,
method=unitary_synthesis_method,
),
UnrollCustomDefinitions(sel, basis_gates),
BasisTranslator(sel, basis_gates),
]
elif translation_method == "synthesis":
_unroll = [
UnitarySynthesis(
basis_gates,
approximation_degree=approximation_degree,
coupling_map=coupling_map,
backend_props=backend_properties,
method=unitary_synthesis_method,
min_qubits=3,
),
Unroll3qOrMore(),
Collect2qBlocks(),
ConsolidateBlocks(basis_gates=basis_gates),
UnitarySynthesis(
basis_gates,
approximation_degree=approximation_degree,
coupling_map=coupling_map,
backend_props=backend_properties,
method=unitary_synthesis_method,
),
]
else:
raise TranspilerError("Invalid translation method %s." % translation_method)
# 6. Fix any CX direction mismatch
_direction_check = [CheckGateDirection(coupling_map)]
def _direction_condition(property_set):
return not property_set["is_direction_mapped"]
_direction = [GateDirection(coupling_map)]
# 8. Optimize iteratively until no more change in depth. Removes useless gates
# after reset and before measure, commutes gates and optimizes contiguous blocks.
_depth_check = [Depth(), FixedPoint("depth")]
def _opt_control(property_set):
return not property_set["depth_fixed_point"]
_reset = [RemoveResetInZeroState()]
_meas = [OptimizeSwapBeforeMeasure(), RemoveDiagonalGatesBeforeMeasure()]
_opt = [
Collect2qBlocks(),
ConsolidateBlocks(basis_gates=basis_gates),
UnitarySynthesis(
basis_gates,
approximation_degree=approximation_degree,
coupling_map=coupling_map,
backend_props=backend_properties,
method=unitary_synthesis_method,
),
Optimize1qGatesDecomposition(basis_gates),
CommutativeCancellation(),
]
# 9. Unify all durations (either SI, or convert to dt if known)
# Schedule the circuit only when scheduling_method is supplied
_time_unit_setup = [ContainsInstruction("delay")]
_time_unit_conversion = [TimeUnitConversion(instruction_durations)]
def _contains_delay(property_set):
return property_set["contains_delay"]
_scheduling = []
if scheduling_method:
_scheduling += _time_unit_conversion
if scheduling_method in {"alap", "as_late_as_possible"}:
_scheduling += [ALAPSchedule(instruction_durations)]
elif scheduling_method in {"asap", "as_soon_as_possible"}:
_scheduling += [ASAPSchedule(instruction_durations)]
else:
raise TranspilerError("Invalid scheduling method %s." % scheduling_method)
# 10. Call measure alignment. Should come after scheduling.
if (
timing_constraints.granularity != 1
or timing_constraints.min_length != 1
or timing_constraints.acquire_alignment != 1
):
_alignments = [
ValidatePulseGates(
granularity=timing_constraints.granularity, min_length=timing_constraints.min_length
),
AlignMeasures(alignment=timing_constraints.acquire_alignment),
]
else:
_alignments = []
# Build pass manager
pm3 = PassManager()
pm3.append(_unroll3q)
pm3.append(_reset + _meas)
if coupling_map or initial_layout:
pm3.append(_given_layout)
pm3.append(_choose_layout_0, condition=_choose_layout_condition)
pm3.append(_choose_layout_1, condition=_trivial_not_perfect)
pm3.append(_choose_layout_2, condition=_csp_not_found_match)
pm3.append(_embed)
pm3.append(_swap_check)
pm3.append(_swap, condition=_swap_condition)
pm3.append(_unroll)
if coupling_map and not coupling_map.is_symmetric:
pm3.append(_direction_check)
pm3.append(_direction, condition=_direction_condition)
pm3.append(_reset)
pm3.append(_depth_check + _opt + _unroll, do_while=_opt_control)
if inst_map and inst_map.has_custom_gate():
pm3.append(PulseGates(inst_map=inst_map))
if scheduling_method:
pm3.append(_scheduling)
elif instruction_durations:
pm3.append(_time_unit_setup)
pm3.append(_time_unit_conversion, condition=_contains_delay)
pm3.append(_alignments)
return pm3
|
def level_3_pass_manager(pass_manager_config: PassManagerConfig) -> PassManager:
"""Level 3 pass manager: heavy optimization by noise adaptive qubit mapping and
gate cancellation using commutativity rules and unitary synthesis.
This pass manager applies the user-given initial layout. If none is given, a search
for a perfect layout (i.e. one that satisfies all 2-qubit interactions) is conducted.
If no such layout is found, and device calibration information is available, the
circuit is mapped to the qubits with best readouts and to CX gates with highest fidelity.
The pass manager then transforms the circuit to match the coupling constraints.
It is then unrolled to the basis, and any flipped cx directions are fixed.
Finally, optimizations in the form of commutative gate cancellation, resynthesis
of two-qubit unitary blocks, and redundant reset removal are performed.
Note:
In simulators where ``coupling_map=None``, only the unrolling and
optimization stages are done.
Args:
pass_manager_config: configuration of the pass manager.
Returns:
a level 3 pass manager.
Raises:
TranspilerError: if the passmanager config is invalid.
"""
basis_gates = pass_manager_config.basis_gates
inst_map = pass_manager_config.inst_map
coupling_map = pass_manager_config.coupling_map
initial_layout = pass_manager_config.initial_layout
layout_method = pass_manager_config.layout_method or "dense"
routing_method = pass_manager_config.routing_method or "stochastic"
translation_method = pass_manager_config.translation_method or "translator"
scheduling_method = pass_manager_config.scheduling_method
instruction_durations = pass_manager_config.instruction_durations
seed_transpiler = pass_manager_config.seed_transpiler
backend_properties = pass_manager_config.backend_properties
approximation_degree = pass_manager_config.approximation_degree
unitary_synthesis_method = pass_manager_config.unitary_synthesis_method
timing_constraints = pass_manager_config.timing_constraints or TimingConstraints()
# 1. Unroll to 1q or 2q gates
_unroll3q = [
# Use unitary synthesis for basis aware decomposition of UnitaryGates
UnitarySynthesis(
basis_gates,
approximation_degree=approximation_degree,
coupling_map=coupling_map,
backend_props=backend_properties,
method=unitary_synthesis_method,
min_qubits=3,
),
Unroll3qOrMore(),
]
# 2. Layout on good qubits if calibration info available, otherwise on dense links
_given_layout = SetLayout(initial_layout)
def _choose_layout_condition(property_set):
# layout hasn't been set yet
return not property_set["layout"]
def _csp_not_found_match(property_set):
# If a layout hasn't been set by the time we run csp we need to run layout
if property_set["layout"] is None:
return True
# if CSP layout stopped for any reason other than solution found we need
# to run layout since CSP didn't converge.
if (
property_set["CSPLayout_stop_reason"] is not None
and property_set["CSPLayout_stop_reason"] != "solution found"
):
return True
return False
# 2a. If layout method is not set, first try a trivial layout
_choose_layout_0 = (
[]
if pass_manager_config.layout_method
else [
TrivialLayout(coupling_map),
Layout2qDistance(coupling_map, property_name="trivial_layout_score"),
]
)
# 2b. If trivial layout wasn't perfect (ie no swaps are needed) then try
# using CSP layout to find a perfect layout
_choose_layout_1 = (
[]
if pass_manager_config.layout_method
else CSPLayout(coupling_map, call_limit=10000, time_limit=60, seed=seed_transpiler)
)
def _trivial_not_perfect(property_set):
# Verify that a trivial layout is perfect. If trivial_layout_score > 0
# the layout is not perfect. The layout property set is unconditionally
# set by trivial layout so we clear that before running CSP
if property_set["trivial_layout_score"] is not None:
if property_set["trivial_layout_score"] != 0:
property_set["layout"]._wrapped = None
return True
return False
# 2c. if CSP didn't converge on a solution use layout_method (dense).
if layout_method == "trivial":
_choose_layout_2 = TrivialLayout(coupling_map)
elif layout_method == "dense":
_choose_layout_2 = DenseLayout(coupling_map, backend_properties)
elif layout_method == "noise_adaptive":
_choose_layout_2 = NoiseAdaptiveLayout(backend_properties)
elif layout_method == "sabre":
_choose_layout_2 = SabreLayout(coupling_map, max_iterations=4, seed=seed_transpiler)
else:
raise TranspilerError("Invalid layout method %s." % layout_method)
# 3. Extend dag/layout with ancillas using the full coupling map
_embed = [FullAncillaAllocation(coupling_map), EnlargeWithAncilla(), ApplyLayout()]
# 4. Swap to fit the coupling map
_swap_check = CheckMap(coupling_map)
def _swap_condition(property_set):
return not property_set["is_swap_mapped"]
_swap = [BarrierBeforeFinalMeasurements()]
if routing_method == "basic":
_swap += [BasicSwap(coupling_map)]
elif routing_method == "stochastic":
_swap += [StochasticSwap(coupling_map, trials=200, seed=seed_transpiler)]
elif routing_method == "lookahead":
_swap += [LookaheadSwap(coupling_map, search_depth=5, search_width=6)]
elif routing_method == "sabre":
_swap += [SabreSwap(coupling_map, heuristic="decay", seed=seed_transpiler)]
elif routing_method == "none":
_swap += [
Error(
msg=(
"No routing method selected, but circuit is not routed to device. "
f"CheckMap Error: {check_map_msg}"
),
action="raise",
)
]
else:
raise TranspilerError("Invalid routing method %s." % routing_method)
# 5. Unroll to the basis
if translation_method == "unroller":
_unroll = [Unroller(basis_gates)]
elif translation_method == "translator":
from qiskit.circuit.equivalence_library import SessionEquivalenceLibrary as sel
_unroll = [
UnitarySynthesis(
basis_gates,
approximation_degree=approximation_degree,
coupling_map=coupling_map,
backend_props=backend_properties,
method=unitary_synthesis_method,
),
UnrollCustomDefinitions(sel, basis_gates),
BasisTranslator(sel, basis_gates),
]
elif translation_method == "synthesis":
_unroll = [
UnitarySynthesis(
basis_gates,
approximation_degree=approximation_degree,
coupling_map=coupling_map,
backend_props=backend_properties,
method=unitary_synthesis_method,
min_qubits=3,
),
Unroll3qOrMore(),
Collect2qBlocks(),
ConsolidateBlocks(basis_gates=basis_gates),
UnitarySynthesis(
basis_gates,
approximation_degree=approximation_degree,
coupling_map=coupling_map,
backend_props=backend_properties,
method=unitary_synthesis_method,
),
]
else:
raise TranspilerError("Invalid translation method %s." % translation_method)
# 6. Fix any CX direction mismatch
_direction_check = [CheckGateDirection(coupling_map)]
def _direction_condition(property_set):
return not property_set["is_direction_mapped"]
_direction = [GateDirection(coupling_map)]
# 8. Optimize iteratively until no more change in depth. Removes useless gates
# after reset and before measure, commutes gates and optimizes contiguous blocks.
_depth_check = [Depth(), FixedPoint("depth")]
def _opt_control(property_set):
return not property_set["depth_fixed_point"]
_reset = [RemoveResetInZeroState()]
_meas = [OptimizeSwapBeforeMeasure(), RemoveDiagonalGatesBeforeMeasure()]
_opt = [
Collect2qBlocks(),
ConsolidateBlocks(basis_gates=basis_gates),
UnitarySynthesis(
basis_gates,
approximation_degree=approximation_degree,
coupling_map=coupling_map,
backend_props=backend_properties,
method=unitary_synthesis_method,
),
Optimize1qGatesDecomposition(basis_gates),
CommutativeCancellation(),
]
# 9. Unify all durations (either SI, or convert to dt if known)
# Schedule the circuit only when scheduling_method is supplied
_time_unit_setup = [ContainsInstruction("delay")]
_time_unit_conversion = [TimeUnitConversion(instruction_durations)]
def _contains_delay(property_set):
return property_set["contains_delay"]
_scheduling = []
if scheduling_method:
_scheduling += _time_unit_conversion
if scheduling_method in {"alap", "as_late_as_possible"}:
_scheduling += [ALAPSchedule(instruction_durations)]
elif scheduling_method in {"asap", "as_soon_as_possible"}:
_scheduling += [ASAPSchedule(instruction_durations)]
else:
raise TranspilerError("Invalid scheduling method %s." % scheduling_method)
# 10. Call measure alignment. Should come after scheduling.
if (
timing_constraints.granularity != 1
or timing_constraints.min_length != 1
or timing_constraints.acquire_alignment != 1
):
_alignments = [
ValidatePulseGates(
granularity=timing_constraints.granularity, min_length=timing_constraints.min_length
),
AlignMeasures(alignment=timing_constraints.acquire_alignment),
]
else:
_alignments = []
# Build pass manager
pm3 = PassManager()
pm3.append(_unroll3q)
pm3.append(_reset + _meas)
if coupling_map or initial_layout:
pm3.append(_given_layout)
pm3.append(_choose_layout_0, condition=_choose_layout_condition)
pm3.append(_choose_layout_1, condition=_trivial_not_perfect)
pm3.append(_choose_layout_2, condition=_csp_not_found_match)
pm3.append(_embed)
pm3.append(_swap_check)
pm3.append(_swap, condition=_swap_condition)
pm3.append(_unroll)
if coupling_map and not coupling_map.is_symmetric:
pm3.append(_direction_check)
pm3.append(_direction, condition=_direction_condition)
pm3.append(_reset)
pm3.append(_depth_check + _opt + _unroll, do_while=_opt_control)
if inst_map and inst_map.has_custom_gate():
pm3.append(PulseGates(inst_map=inst_map))
if scheduling_method:
pm3.append(_scheduling)
elif instruction_durations:
pm3.append(_time_unit_setup)
pm3.append(_time_unit_conversion, condition=_contains_delay)
pm3.append(_alignments)
return pm3
|
40,146 |
def _checkout_github_project(github_path: str, folder_name: str):
clone_url = 'https://www.github.com/{}'.format(github_path)
git_p = subprocess.run('git clone {}'.format(clone_url), shell=True, stdout=DEVNULL, stderr=DEVNULL, text=True)
if git_p != 0:
raise InstallationError('Cloning from github failed for project {}\n {}'.format(github_path, clone_url))
if not Path('.', folder_name).exists():
raise InstallationError('Repository creation failed on folder {}\n {}'.format(folder_name, clone_url))
|
def _checkout_github_project(github_path: str, folder_name: str):
clone_url = 'https://www.github.com/{}'.format(github_path)
git_p = subprocess.run('git clone {}'.format(clone_url), shell=True, stdout=DEVNULL, stderr=DEVNULL, text=True)
if git_p.returncode != 0:
raise InstallationError('Cloning from github failed for project {}\n {}'.format(github_path, clone_url))
if not Path('.', folder_name).exists():
raise InstallationError('Repository creation failed on folder {}\n {}'.format(folder_name, clone_url))
|
4,480 |
def beer_lambert_law(raw, ppf=6.):
r"""Convert NIRS optical density data to haemoglobin concentration.
Parameters
----------
raw : instance of Raw
The optical density data.
ppf : float
The partial pathlength factor.
Returns
-------
raw : instance of Raw
The modified raw instance.
"""
from scipy import linalg
raw = raw.copy().load_data()
_validate_type(raw, BaseRaw, 'raw')
_validate_type(ppf, 'numeric', 'ppf')
ppf = float(ppf)
picks = _validate_nirs_info(raw.info, fnirs='od', which='Beer-lambert')
# This is the one place we *really* need the actual/accurate frequencies
freqs = np.array(
[raw.info['chs'][pick]['loc'][9] for pick in picks], float)
abs_coef = _load_absorption(freqs)
distances = source_detector_distances(raw.info)
if (distances == 0).any():
warn('Source-detector distances are zero, some resulting '
'concentrations will be zero. Consider setting a montage '
'with raw.set_montage.')
if (distances > 0.1).any():
warn('Source-detector distances are greater than 10 cm. '
'Large distances will result in invalid data, and are '
'likely due to optode locations being stored in a '
' unit other than meters.')
rename = dict()
for ii, jj in zip(picks[::2], picks[1::2]):
EL = abs_coef * distances[ii] * ppf
iEL = linalg.pinv(EL)
raw._data[[ii, jj]] = iEL @ raw._data[[ii, jj]] * 1e-3
# Update channel information
coil_dict = dict(hbo=FIFF.FIFFV_COIL_FNIRS_HBO,
hbr=FIFF.FIFFV_COIL_FNIRS_HBR)
for ki, kind in zip((ii, jj), ('hbo', 'hbr')):
ch = raw.info['chs'][ki]
ch.update(coil_type=coil_dict[kind], unit=FIFF.FIFF_UNIT_MOL)
new_name = f'{ch["ch_name"].split(" ")[0]} {kind}'
rename[ch['ch_name']] = new_name
raw.rename_channels(rename)
# Validate the format of data after transformation is valid
_validate_nirs_info(raw.info, fnirs='hbx')
return raw
|
def beer_lambert_law(raw, ppf=6.):
r"""Convert NIRS optical density data to haemoglobin concentration.
Parameters
----------
raw : instance of Raw
The optical density data.
ppf : float
The partial pathlength factor.
Returns
-------
raw : instance of Raw
The modified raw instance.
"""
from scipy import linalg
raw = raw.copy().load_data()
_validate_type(raw, BaseRaw, 'raw')
_validate_type(ppf, 'numeric', 'ppf')
ppf = float(ppf)
picks = _validate_nirs_info(raw.info, fnirs='od', which='Beer-lambert')
# This is the one place we *really* need the actual/accurate frequencies
freqs = np.array(
[raw.info['chs'][pick]['loc'][9] for pick in picks], float)
abs_coef = _load_absorption(freqs)
distances = source_detector_distances(raw.info)
if (distances == 0).any():
warn('Source-detector distances are zero, some resulting '
'concentrations will be zero. Consider setting a montage '
'with raw.set_montage.')
if (distances > 0.1).any():
warn('Source-detector distances are greater than 10 cm. '
'Large distances will result in invalid data, and are '
'likely due to optode locations being stored in a '
' unit other than meters.')
rename = dict()
for ii, jj in zip(picks[::2], picks[1::2]):
EL = abs_coef * distances[ii] * ppf
iEL = linalg.pinv(EL)
raw._data[[ii, jj]] = iEL @ raw._data[[ii, jj]] * 1e-3
# Update channel information
coil_dict = dict(hbo=FIFF.FIFFV_COIL_FNIRS_HBO,
hbr=FIFF.FIFFV_COIL_FNIRS_HBR)
for ki, kind in zip((ii, jj), ('hbo', 'hbr')):
ch = raw.info['chs'][ki]
ch.update(coil_type=coil_dict[kind], unit=FIFF.FIFF_UNIT_MOL)
new_name = f'{ch["ch_name"].split(" ")[0]} {kind}'
rename[ch['ch_name']] = new_name
raw.rename_channels(rename)
# Validate the format of data after transformation is valid
_validate_nirs_info(raw.info, fnirs='hb')
return raw
|
30,624 |
def get_whois_raw(domain, server="", previous=None, rfc3490=True, never_cut=False, with_server_list=False,
server_list=None):
previous = previous or []
server_list = server_list or []
# Sometimes IANA simply won't give us the right root WHOIS server
exceptions = {
".ac.uk": "whois.ja.net",
".ps": "whois.pnina.ps",
".buzz": "whois.nic.buzz",
".moe": "whois.nic.moe",
# The following is a bit hacky, but IANA won't return the right answer for example.com because it's a direct
# registration.
"example.com": "whois.verisign-grs.com"
}
if rfc3490:
if sys.version_info < (3, 0):
domain = encode(domain if type(domain) is unicode else decode(domain, "utf8"), "idna")
else:
domain = encode(domain, "idna").decode("ascii")
if len(previous) == 0 and server == "":
# Root query
is_exception = False
for exception, exc_serv in exceptions.items():
if domain.endswith(exception):
is_exception = True
target_server = exc_serv
break
if not is_exception:
target_server = get_root_server(domain)
else:
target_server = server
if target_server == "whois.jprs.jp":
request_domain = "%s/e" % domain # Suppress Japanese output
elif domain.endswith(".de") and (target_server == "whois.denic.de" or target_server == "de.whois-servers.net"):
request_domain = "-T dn,ace %s" % domain # regional specific stuff
elif target_server == "whois.verisign-grs.com":
request_domain = "=%s" % domain # Avoid partial matches
else:
request_domain = domain
# The following loop handles errno 104 - "connection reset by peer" by retry whois_request with the same arguments.
# If the request fails due to other cause - there will not be another try
for i in range(0, 3):
try:
response = whois_request(request_domain, target_server)
except socket.error as err:
if err.errno == errno.ECONNRESET:
continue
else:
raise err
break
# Executed only if the for loop ran to the full
# (3 tries led to errno.ECONNRESET)
else:
raise WhoisException('(104) Connection Reset By Peer')
if never_cut:
# If the caller has requested to 'never cut' responses, he will get the original response from the server (
# this is useful for callers that are only interested in the raw data). Otherwise, if the target is
# verisign-grs, we will select the data relevant to the requested domain, and discard the rest, so that in a
# multiple-option response the parsing code will only touch the information relevant to the requested domain.
# The side-effect of this is that when `never_cut` is set to False, any verisign-grs responses in the raw data
# will be missing header, footer, and alternative domain options (this is handled a few lines below,
# after the verisign-grs processing).
new_list = [response] + previous
if target_server == "whois.verisign-grs.com":
# VeriSign is a little... special. As it may return multiple full records and there's no way to do an exact query,
# we need to actually find the correct record in the list.
for record in response.split("\n\n"):
if re.search("Domain Name: %s\n" % domain.upper(), record):
response = record
break
if never_cut == False:
new_list = [response] + previous
server_list.append(target_server)
for line in [x.strip() for x in response.splitlines()]:
match = re.match("(refer|whois server|referral url|registrar whois(?: server)?):\s*([^\s]+\.[^\s]+)", line,
re.IGNORECASE)
if match is not None:
referal_server = match.group(2)
if referal_server != server and "://" not in referal_server: # We want to ignore anything non-WHOIS (eg. HTTP) for now.
# Referal to another WHOIS server...
return get_whois_raw(domain, referal_server, new_list, server_list=server_list,
with_server_list=with_server_list)
if with_server_list:
return new_list, server_list
else:
return new_list
|
def get_whois_raw(domain, server="", previous=None, rfc3490=True, never_cut=False, with_server_list=False,
server_list=None):
previous = previous or []
server_list = server_list or []
# Sometimes IANA simply won't give us the right root WHOIS server
exceptions = {
".ac.uk": "whois.ja.net",
".ps": "whois.pnina.ps",
".buzz": "whois.nic.buzz",
".moe": "whois.nic.moe",
# The following is a bit hacky, but IANA won't return the right answer for example.com because it's a direct
# registration.
"example.com": "whois.verisign-grs.com"
}
if rfc3490:
if sys.version_info < (3, 0):
domain = encode(domain if type(domain) is unicode else decode(domain, "utf8"), "idna")
else:
domain = encode(domain, "idna").decode("ascii")
if len(previous) == 0 and server == "":
# Root query
is_exception = False
for exception, exc_serv in exceptions.items():
if domain.endswith(exception):
is_exception = True
target_server = exc_serv
break
if not is_exception:
target_server = get_root_server(domain)
else:
target_server = server
if target_server == "whois.jprs.jp":
request_domain = "%s/e" % domain # Suppress Japanese output
elif domain.endswith(".de") and (target_server == "whois.denic.de" or target_server == "de.whois-servers.net"):
request_domain = "-T dn,ace %s" % domain # regional specific stuff
elif target_server == "whois.verisign-grs.com":
request_domain = "=%s" % domain # Avoid partial matches
else:
request_domain = domain
# The following loop handles errno 104 - "connection reset by peer" by retry whois_request with the same arguments.
# If the request fails due to other cause - there will not be another try
for i in range(0, 3):
try:
response = whois_request(request_domain, target_server)
except socket.error as err:
if err.errno == errno.ECONNRESET:
continue
else:
raise
break
# Executed only if the for loop ran to the full
# (3 tries led to errno.ECONNRESET)
else:
raise WhoisException('(104) Connection Reset By Peer')
if never_cut:
# If the caller has requested to 'never cut' responses, he will get the original response from the server (
# this is useful for callers that are only interested in the raw data). Otherwise, if the target is
# verisign-grs, we will select the data relevant to the requested domain, and discard the rest, so that in a
# multiple-option response the parsing code will only touch the information relevant to the requested domain.
# The side-effect of this is that when `never_cut` is set to False, any verisign-grs responses in the raw data
# will be missing header, footer, and alternative domain options (this is handled a few lines below,
# after the verisign-grs processing).
new_list = [response] + previous
if target_server == "whois.verisign-grs.com":
# VeriSign is a little... special. As it may return multiple full records and there's no way to do an exact query,
# we need to actually find the correct record in the list.
for record in response.split("\n\n"):
if re.search("Domain Name: %s\n" % domain.upper(), record):
response = record
break
if never_cut == False:
new_list = [response] + previous
server_list.append(target_server)
for line in [x.strip() for x in response.splitlines()]:
match = re.match("(refer|whois server|referral url|registrar whois(?: server)?):\s*([^\s]+\.[^\s]+)", line,
re.IGNORECASE)
if match is not None:
referal_server = match.group(2)
if referal_server != server and "://" not in referal_server: # We want to ignore anything non-WHOIS (eg. HTTP) for now.
# Referal to another WHOIS server...
return get_whois_raw(domain, referal_server, new_list, server_list=server_list,
with_server_list=with_server_list)
if with_server_list:
return new_list, server_list
else:
return new_list
|
43,885 |
def _convert_to_su4(U):
r"""Check unitarity of a 4x4 matrix and convert it to :math:`SU(4)` if possible.
Args:
U (array[complex]): A matrix, presumed to be :math:`4 \times 4` and unitary.
Returns:
array[complex]: A :math:`4 \times 4` matrix in :math:`SU(4)` that is
equivalent to U up to a global phase.
"""
# Check unitarity
if not math.allclose(math.dot(U, math.T(math.conj(U))), math.eye(4), atol=1e-7):
raise ValueError("Operator must be unitary.")
# Compute the determinant
det = math.linalg.det(U)
# Convert to SU(4) if it's not close to 1
if not math.allclose(det, 1.0):
exp_angle = -1j * math.cast_like(math.angle(det), 1j) / 4
U = math.cast_like(U, det) * qml.math.exp(exp_angle)
return U
|
def _convert_to_su4(U):
r"""Check unitarity of a 4x4 matrix and convert it to :math:`SU(4)` if the determinant is not 1.
Args:
U (array[complex]): A matrix, presumed to be :math:`4 \times 4` and unitary.
Returns:
array[complex]: A :math:`4 \times 4` matrix in :math:`SU(4)` that is
equivalent to U up to a global phase.
"""
# Check unitarity
if not math.allclose(math.dot(U, math.T(math.conj(U))), math.eye(4), atol=1e-7):
raise ValueError("Operator must be unitary.")
# Compute the determinant
det = math.linalg.det(U)
# Convert to SU(4) if it's not close to 1
if not math.allclose(det, 1.0):
exp_angle = -1j * math.cast_like(math.angle(det), 1j) / 4
U = math.cast_like(U, det) * qml.math.exp(exp_angle)
return U
|
13,203 |
def is_doi_locally_managed(doi_value):
"""Determine if a DOI value is locally managed."""
return any(doi_value.startswith(prefix+'/') for prefix in
current_app.config['ZENODO_LOCAL_DOI_PREFIXES'])
|
def is_doi_locally_managed(doi_value):
"""Determine if a DOI value is locally managed."""
return any(doi_value.startswith(prefix + '/') for prefix in
current_app.config['ZENODO_LOCAL_DOI_PREFIXES'])
|
43,397 |
def interferometer_uniform(n_wires, low=0, high=2 * pi, seed=None):
r"""Creates a list of three parameter arrays for :func:`~.Interferometer`, drawn from a uniform distribution.
The shape of one of the arrays is ``(n_wires,)``. It contains the initialized rotation angles :math:`\varphi` for the n_wires Rotation gates. The shape of other two arrays is ``(n_wires*(n_wires-1)/2,)``. These contain the initialized transmittivity angles :math:`\theta` and phase angles :math:`\phi` for the Beam Splitter gates.
The parameters are initialized uniformly from the interval ``[low, high]``.
Args:
n_wires (int): number of modes that the interferometer acts on
Keyword Args:
low (float): minimum value of uniformly drawn rotation angles
high (float): maximum value of uniformly drawn rotation angles
seed (int): seed used in sampling the parameters, makes function call deterministic
Returns:
list of parameter arrays
"""
if seed is not None:
np.random.seed(seed)
n_if = n_wires * (n_wires - 1) // 2
theta = np.random.uniform(low=low, high=high, size=(n_if,))
phi = np.random.uniform(low=low, high=high, size=(n_if,))
varphi = np.random.uniform(low=low, high=high, size=(n_wires,))
return [theta, phi, varphi]
|
def interferometer_uniform(n_wires, low=0, high=2 * pi, seed=None):
r"""Creates a list of three parameter arrays for :func:`~.Interferometer`, drawn from a uniform distribution.
The shape of one of the arrays is ``(n_wires,)``. It contains the initialized rotation angles :math:`\varphi` for the ``n_wires`` Rotation gates. The other two arrays have shapes ``(n_wires*(n_wires-1)/2,)``. These arrays contain the initialized transmittivity angles :math:`\theta` and phase angles :math:`\phi` for the Beam Splitter gates.
The parameters are initialized uniformly from the interval ``[low, high]``.
Args:
n_wires (int): number of modes that the interferometer acts on
Keyword Args:
low (float): minimum value of uniformly drawn rotation angles
high (float): maximum value of uniformly drawn rotation angles
seed (int): seed used in sampling the parameters, makes function call deterministic
Returns:
list of parameter arrays
"""
if seed is not None:
np.random.seed(seed)
n_if = n_wires * (n_wires - 1) // 2
theta = np.random.uniform(low=low, high=high, size=(n_if,))
phi = np.random.uniform(low=low, high=high, size=(n_if,))
varphi = np.random.uniform(low=low, high=high, size=(n_wires,))
return [theta, phi, varphi]
|
50,236 |
def validate_merchant_account(
notification: Dict[str, Any], gateway_config: "GatewayConfig"
):
merchant_account_code = notification.get("merchantAccountCode")
if merchant_account_code != gateway_config.connection_params.get(
"merchant_account"
):
return False
return True
|
def validate_merchant_account(
notification: Dict[str, Any], gateway_config: "GatewayConfig"
):
merchant_account_code = notification.get("merchantAccountCode")
return merchant_account_code == gateway_config.connection_params.get(
"merchant_account"
)
|
50,072 |
def mcsolve(H, psi0, tlist, c_ops=None, e_ops=None, ntraj=1, *,
args=None, options=None, seeds=None, target_tol=None, timeout=0):
r"""
Monte Carlo evolution of a state vector :math:`|\psi \rangle` for a
given Hamiltonian and sets of collapse operators. Options for the
underlying ODE solver are given by the Options class.
Parameters
----------
H : :class:`qutip.Qobj`, :class:`qutip.QobjEvo`, ``list``, callable.
System Hamiltonian as a Qobj, QobjEvo, can also be a function or list
that can be made into a Qobjevo. (See :class:`qutip.QobjEvo`'s
documentation). ``H`` can be a superoperator (liouvillian) if some
collapse operators are to be treated deterministically.
psi0 : :class:`qutip.Qobj`
Initial state vector
tlist : array_like
Times at which results are recorded.
ntraj : int
Maximum number of trajectories to run. Can be cut short if a time limit
is passed in options (per default, mcsolve will stop after 1e8 sec)::
``options.mcsolve['map_options']['timeout'] = max_sec``
Or if the target tolerance is reached, see ``target_tol``.
c_ops : ``list``
A ``list`` of collapse operators. They must be operators even if ``H``
is a superoperator.
e_ops : ``list``, [optional]
A ``list`` of operator as Qobj, QobjEvo or callable with signature of
(t, state: Qobj) for calculating expectation values. When no ``e_ops``
are given, the solver will default to save the states.
args : dict, [optional]
Arguments for time-dependent Hamiltonian and collapse operator terms.
options : SolverOptions, [optional]
Options for the evolution.
seeds : int, SeedSequence, list, [optional]
Seed for the random number generator. It can be a single seed used to
spawn seeds for each trajectories or a list of seed, one for each
trajectories. Seed are saved in the result, they can be reused with::
seeds=prev_result.seeds
target_tol : float, list, [optional]
Target tolerance of the evolution. The evolution will compute
trajectories until the error on the expectation values is lower than
this tolerance. The error is computed using jackknife resampling.
``target_tol`` can be an absolute tolerance, a pair of absolute and
relative tolerance, in that order. Lastly, it can be a list of pairs of
(atol, rtol) for each e_ops.
timeout : float [optional]
Maximum time for the evolution in second. When reached, no more
trajectories will be computed. Overwrite the option of the same name.
Returns
-------
results : :class:`qutip.solver.Result`
Object storing all results from the simulation. Which results is saved
depend on the presence of ``e_ops`` and the options used. ``collapse``
and ``photocurrent`` is available to Monte Carlo simulation results.
"""
H = QobjEvo(H, args=args, tlist=tlist)
c_ops = c_ops if c_ops is not None else []
if not isinstance(c_ops, (list, tuple)):
c_ops = [c_ops]
c_ops = [QobjEvo(c_op, args=args, tlist=tlist) for c_op in c_ops]
if len(c_ops) == 0:
return mesolve(H, psi0, tlist, e_ops=e_ops, args=args, options=options)
if isinstance(ntraj, list):
if isinstance(options, dict):
options = SolverOptions(**options)
options = copy(options) or SolverOptions()
options.results['keep_runs_results'] = True
max_ntraj = max(ntraj)
else:
max_ntraj = ntraj
mc = McSolver(H, c_ops, options=options)
result = mc.run(psi0, tlist=tlist, ntraj=max_ntraj, e_ops=e_ops,
seed=seeds, target_tol=target_tol, timeout=timeout)
if isinstance(ntraj, list):
result.traj_batch = ntraj
return result
|
def mcsolve(H, psi0, tlist, c_ops=None, e_ops=None, ntraj=1, *,
args=None, options=None, seeds=None, target_tol=None, timeout=0):
r"""
Monte Carlo evolution of a state vector :math:`|\psi \rangle` for a
given Hamiltonian and sets of collapse operators. Options for the
underlying ODE solver are given by the Options class.
Parameters
----------
H : :class:`qutip.Qobj`, :class:`qutip.QobjEvo`, ``list``, callable.
System Hamiltonian as a Qobj, QobjEvo. It can also be any input type that QobjEvo accepts
(see :class:`qutip.QobjEvo`'s documentation). ``H`` can also be a superoperator (liouvillian)
if some collapse operators are to be treated deterministically.
psi0 : :class:`qutip.Qobj`
Initial state vector
tlist : array_like
Times at which results are recorded.
ntraj : int
Maximum number of trajectories to run. Can be cut short if a time limit
is passed in options (per default, mcsolve will stop after 1e8 sec)::
``options.mcsolve['map_options']['timeout'] = max_sec``
Or if the target tolerance is reached, see ``target_tol``.
c_ops : ``list``
A ``list`` of collapse operators. They must be operators even if ``H``
is a superoperator.
e_ops : ``list``, [optional]
A ``list`` of operator as Qobj, QobjEvo or callable with signature of
(t, state: Qobj) for calculating expectation values. When no ``e_ops``
are given, the solver will default to save the states.
args : dict, [optional]
Arguments for time-dependent Hamiltonian and collapse operator terms.
options : SolverOptions, [optional]
Options for the evolution.
seeds : int, SeedSequence, list, [optional]
Seed for the random number generator. It can be a single seed used to
spawn seeds for each trajectories or a list of seed, one for each
trajectories. Seed are saved in the result, they can be reused with::
seeds=prev_result.seeds
target_tol : float, list, [optional]
Target tolerance of the evolution. The evolution will compute
trajectories until the error on the expectation values is lower than
this tolerance. The error is computed using jackknife resampling.
``target_tol`` can be an absolute tolerance, a pair of absolute and
relative tolerance, in that order. Lastly, it can be a list of pairs of
(atol, rtol) for each e_ops.
timeout : float [optional]
Maximum time for the evolution in second. When reached, no more
trajectories will be computed. Overwrite the option of the same name.
Returns
-------
results : :class:`qutip.solver.Result`
Object storing all results from the simulation. Which results is saved
depend on the presence of ``e_ops`` and the options used. ``collapse``
and ``photocurrent`` is available to Monte Carlo simulation results.
"""
H = QobjEvo(H, args=args, tlist=tlist)
c_ops = c_ops if c_ops is not None else []
if not isinstance(c_ops, (list, tuple)):
c_ops = [c_ops]
c_ops = [QobjEvo(c_op, args=args, tlist=tlist) for c_op in c_ops]
if len(c_ops) == 0:
return mesolve(H, psi0, tlist, e_ops=e_ops, args=args, options=options)
if isinstance(ntraj, list):
if isinstance(options, dict):
options = SolverOptions(**options)
options = copy(options) or SolverOptions()
options.results['keep_runs_results'] = True
max_ntraj = max(ntraj)
else:
max_ntraj = ntraj
mc = McSolver(H, c_ops, options=options)
result = mc.run(psi0, tlist=tlist, ntraj=max_ntraj, e_ops=e_ops,
seed=seeds, target_tol=target_tol, timeout=timeout)
if isinstance(ntraj, list):
result.traj_batch = ntraj
return result
|
28,561 |
def plot_loo_pit(
ax,
figsize,
ecdf,
loo_pit,
loo_pit_ecdf,
unif_ecdf,
p975,
p025,
fill_kwargs,
ecdf_fill,
use_hdi,
x_vals,
hdi_kwargs,
hdi_odds,
n_unif,
unif,
plot_unif_kwargs,
loo_pit_kde,
legend, # pylint: disable=unused-argument
y_hat,
y,
color,
textsize,
credible_interval,
plot_kwargs,
backend_kwargs,
show,
):
"""Bokeh loo pit plot."""
if backend_kwargs is None:
backend_kwargs = {}
backend_kwargs = {
**backend_kwarg_defaults(("dpi", "plot.bokeh.figure.dpi"),),
**backend_kwargs,
}
dpi = backend_kwargs.pop("dpi")
(figsize, *_, linewidth, _) = _scale_fig_size(figsize, textsize, 1, 1)
plot_kwargs = {} if plot_kwargs is None else plot_kwargs
plot_kwargs.setdefault("color", to_hex(color))
plot_kwargs.setdefault("linewidth", linewidth * 1.4)
if isinstance(y, str):
label = ("{} LOO-PIT ECDF" if ecdf else "{} LOO-PIT").format(y)
elif isinstance(y, DataArray):
label = ("{} LOO-PIT ECDF" if ecdf else "{} LOO-PIT").format(y.name)
elif isinstance(y_hat, str):
label = ("{} LOO-PIT ECDF" if ecdf else "{} LOO-PIT").format(y_hat)
elif isinstance(y_hat, DataArray):
label = ("{} LOO-PIT ECDF" if ecdf else "{} LOO-PIT").format(y_hat.name)
else:
label = "LOO-PIT ECDF" if ecdf else "LOO-PIT"
plot_kwargs.setdefault("legend_label", label)
plot_unif_kwargs = {} if plot_unif_kwargs is None else plot_unif_kwargs
light_color = rgb_to_hsv(to_rgb(plot_kwargs.get("color")))
light_color[1] /= 2 # pylint: disable=unsupported-assignment-operation
light_color[2] += (1 - light_color[2]) / 2 # pylint: disable=unsupported-assignment-operation
plot_unif_kwargs.setdefault("color", to_hex(hsv_to_rgb(light_color)))
plot_unif_kwargs.setdefault("alpha", 0.5)
plot_unif_kwargs.setdefault("linewidth", 0.6 * linewidth)
if ecdf:
n_data_points = loo_pit.size
plot_kwargs.setdefault("drawstyle", "steps-mid" if n_data_points < 100 else "default")
plot_unif_kwargs.setdefault("drawstyle", "steps-mid" if n_data_points < 100 else "default")
if ecdf_fill:
if fill_kwargs is None:
fill_kwargs = {}
fill_kwargs.setdefault("color", to_hex(hsv_to_rgb(light_color)))
fill_kwargs.setdefault("alpha", 0.5)
fill_kwargs.setdefault(
"step", "mid" if plot_kwargs["drawstyle"] == "steps-mid" else None
)
fill_kwargs.setdefault(
"legend_label", "{:.3g}% credible interval".format(credible_interval)
)
elif use_hdi:
if hdi_kwargs is None:
hdi_kwargs = {}
hdi_kwargs.setdefault("color", to_hex(hsv_to_rgb(light_color)))
hdi_kwargs.setdefault("alpha", 0.35)
if ax is None:
backend_kwargs.setdefault("width", int(figsize[0] * dpi))
backend_kwargs.setdefault("height", int(figsize[1] * dpi))
ax = bkp.figure(x_range=(0, 1), **backend_kwargs)
if ecdf:
if plot_kwargs.get("drawstyle") == "steps-mid":
ax.step(
np.hstack((0, loo_pit, 1)),
np.hstack((0, loo_pit - loo_pit_ecdf, 0)),
line_color=plot_kwargs.get("color", "black"),
line_alpha=plot_kwargs.get("alpha", 1.0),
line_width=plot_kwargs.get("linewidth", 3.0),
mode="center",
)
else:
ax.line(
np.hstack((0, loo_pit, 1)),
np.hstack((0, loo_pit - loo_pit_ecdf, 0)),
line_color=plot_kwargs.get("color", "black"),
line_alpha=plot_kwargs.get("alpha", 1.0),
line_width=plot_kwargs.get("linewidth", 3.0),
)
if ecdf_fill:
if fill_kwargs.get("drawstyle") == "steps-mid":
# use step patch when you find out how to do that
ax.patch(
np.concatenate((unif_ecdf, unif_ecdf[::-1])),
np.concatenate((p975 - unif_ecdf, (p025 - unif_ecdf)[::-1])),
fill_color=fill_kwargs.get("color"),
fill_alpha=fill_kwargs.get("alpha", 1.0),
)
else:
ax.patch(
np.concatenate((unif_ecdf, unif_ecdf[::-1])),
np.concatenate((p975 - unif_ecdf, (p025 - unif_ecdf)[::-1])),
fill_color=fill_kwargs.get("color"),
fill_alpha=fill_kwargs.get("alpha", 1.0),
)
else:
if fill_kwargs is not None and fill_kwargs.get("drawstyle") == "steps-mid":
ax.step(
unif_ecdf,
p975 - unif_ecdf,
line_color=plot_unif_kwargs.get("color", "black"),
line_alpha=plot_unif_kwargs.get("alpha", 1.0),
line_width=plot_kwargs.get("linewidth", 1.0),
mode="center",
)
ax.step(
unif_ecdf,
p025 - unif_ecdf,
line_color=plot_unif_kwargs.get("color", "black"),
line_alpha=plot_unif_kwargs.get("alpha", 1.0),
line_width=plot_unif_kwargs.get("linewidth", 1.0),
mode="center",
)
else:
ax.line(
unif_ecdf,
p975 - unif_ecdf,
line_color=plot_unif_kwargs.get("color", "black"),
line_alpha=plot_unif_kwargs.get("alpha", 1.0),
line_width=plot_unif_kwargs.get("linewidth", 1.0),
)
ax.line(
unif_ecdf,
p025 - unif_ecdf,
line_color=plot_unif_kwargs.get("color", "black"),
line_alpha=plot_unif_kwargs.get("alpha", 1.0),
line_width=plot_unif_kwargs.get("linewidth", 1.0),
)
else:
if use_hdi:
ax.add_layout(
BoxAnnotation(
bottom=hdi_odds[1],
top=hdi_odds[0],
fill_alpha=hdi_kwargs.pop("alpha"),
fill_color=hdi_kwargs.pop("color"),
**hdi_kwargs
)
)
else:
for idx in range(n_unif):
unif_density, xmin, xmax = _fast_kde(unif[idx, :])
x_s = np.linspace(xmin, xmax, len(unif_density))
ax.line(
x_s,
unif_density,
line_color=plot_unif_kwargs.get("color", "black"),
line_alpha=plot_unif_kwargs.get("alpha", 0.1),
line_width=plot_unif_kwargs.get("linewidth", 1.0),
)
ax.line(
x_vals,
loo_pit_kde,
line_color=plot_kwargs.get("color", "black"),
line_alpha=plot_kwargs.get("alpha", 1.0),
line_width=plot_kwargs.get("linewidth", 3.0),
)
show_layout(ax, show)
return ax
|
def plot_loo_pit(
ax,
figsize,
ecdf,
loo_pit,
loo_pit_ecdf,
unif_ecdf,
p975,
p025,
fill_kwargs,
ecdf_fill,
use_hdi,
x_vals,
hdi_kwargs,
hdi_odds,
n_unif,
unif,
plot_unif_kwargs,
loo_pit_kde,
legend, # pylint: disable=unused-argument
y_hat,
y,
color,
textsize,
credible_interval,
plot_kwargs,
backend_kwargs,
show,
):
"""Bokeh loo pit plot."""
if backend_kwargs is None:
backend_kwargs = {}
backend_kwargs = {
**backend_kwarg_defaults(("dpi", "plot.bokeh.figure.dpi"),),
**backend_kwargs,
}
dpi = backend_kwargs.pop("dpi")
(figsize, *_, linewidth, _) = _scale_fig_size(figsize, textsize, 1, 1)
plot_kwargs = {} if plot_kwargs is None else plot_kwargs
plot_kwargs.setdefault("color", to_hex(color))
plot_kwargs.setdefault("linewidth", linewidth * 1.4)
if isinstance(y, str):
label = ("{} LOO-PIT ECDF" if ecdf else "{} LOO-PIT").format(y)
elif isinstance(y, DataArray) and y.name is not None:
label = ("{} LOO-PIT ECDF" if ecdf else "{} LOO-PIT").format(y.name)
elif isinstance(y_hat, str):
label = ("{} LOO-PIT ECDF" if ecdf else "{} LOO-PIT").format(y_hat)
elif isinstance(y_hat, DataArray):
label = ("{} LOO-PIT ECDF" if ecdf else "{} LOO-PIT").format(y_hat.name)
else:
label = "LOO-PIT ECDF" if ecdf else "LOO-PIT"
plot_kwargs.setdefault("legend_label", label)
plot_unif_kwargs = {} if plot_unif_kwargs is None else plot_unif_kwargs
light_color = rgb_to_hsv(to_rgb(plot_kwargs.get("color")))
light_color[1] /= 2 # pylint: disable=unsupported-assignment-operation
light_color[2] += (1 - light_color[2]) / 2 # pylint: disable=unsupported-assignment-operation
plot_unif_kwargs.setdefault("color", to_hex(hsv_to_rgb(light_color)))
plot_unif_kwargs.setdefault("alpha", 0.5)
plot_unif_kwargs.setdefault("linewidth", 0.6 * linewidth)
if ecdf:
n_data_points = loo_pit.size
plot_kwargs.setdefault("drawstyle", "steps-mid" if n_data_points < 100 else "default")
plot_unif_kwargs.setdefault("drawstyle", "steps-mid" if n_data_points < 100 else "default")
if ecdf_fill:
if fill_kwargs is None:
fill_kwargs = {}
fill_kwargs.setdefault("color", to_hex(hsv_to_rgb(light_color)))
fill_kwargs.setdefault("alpha", 0.5)
fill_kwargs.setdefault(
"step", "mid" if plot_kwargs["drawstyle"] == "steps-mid" else None
)
fill_kwargs.setdefault(
"legend_label", "{:.3g}% credible interval".format(credible_interval)
)
elif use_hdi:
if hdi_kwargs is None:
hdi_kwargs = {}
hdi_kwargs.setdefault("color", to_hex(hsv_to_rgb(light_color)))
hdi_kwargs.setdefault("alpha", 0.35)
if ax is None:
backend_kwargs.setdefault("width", int(figsize[0] * dpi))
backend_kwargs.setdefault("height", int(figsize[1] * dpi))
ax = bkp.figure(x_range=(0, 1), **backend_kwargs)
if ecdf:
if plot_kwargs.get("drawstyle") == "steps-mid":
ax.step(
np.hstack((0, loo_pit, 1)),
np.hstack((0, loo_pit - loo_pit_ecdf, 0)),
line_color=plot_kwargs.get("color", "black"),
line_alpha=plot_kwargs.get("alpha", 1.0),
line_width=plot_kwargs.get("linewidth", 3.0),
mode="center",
)
else:
ax.line(
np.hstack((0, loo_pit, 1)),
np.hstack((0, loo_pit - loo_pit_ecdf, 0)),
line_color=plot_kwargs.get("color", "black"),
line_alpha=plot_kwargs.get("alpha", 1.0),
line_width=plot_kwargs.get("linewidth", 3.0),
)
if ecdf_fill:
if fill_kwargs.get("drawstyle") == "steps-mid":
# use step patch when you find out how to do that
ax.patch(
np.concatenate((unif_ecdf, unif_ecdf[::-1])),
np.concatenate((p975 - unif_ecdf, (p025 - unif_ecdf)[::-1])),
fill_color=fill_kwargs.get("color"),
fill_alpha=fill_kwargs.get("alpha", 1.0),
)
else:
ax.patch(
np.concatenate((unif_ecdf, unif_ecdf[::-1])),
np.concatenate((p975 - unif_ecdf, (p025 - unif_ecdf)[::-1])),
fill_color=fill_kwargs.get("color"),
fill_alpha=fill_kwargs.get("alpha", 1.0),
)
else:
if fill_kwargs is not None and fill_kwargs.get("drawstyle") == "steps-mid":
ax.step(
unif_ecdf,
p975 - unif_ecdf,
line_color=plot_unif_kwargs.get("color", "black"),
line_alpha=plot_unif_kwargs.get("alpha", 1.0),
line_width=plot_kwargs.get("linewidth", 1.0),
mode="center",
)
ax.step(
unif_ecdf,
p025 - unif_ecdf,
line_color=plot_unif_kwargs.get("color", "black"),
line_alpha=plot_unif_kwargs.get("alpha", 1.0),
line_width=plot_unif_kwargs.get("linewidth", 1.0),
mode="center",
)
else:
ax.line(
unif_ecdf,
p975 - unif_ecdf,
line_color=plot_unif_kwargs.get("color", "black"),
line_alpha=plot_unif_kwargs.get("alpha", 1.0),
line_width=plot_unif_kwargs.get("linewidth", 1.0),
)
ax.line(
unif_ecdf,
p025 - unif_ecdf,
line_color=plot_unif_kwargs.get("color", "black"),
line_alpha=plot_unif_kwargs.get("alpha", 1.0),
line_width=plot_unif_kwargs.get("linewidth", 1.0),
)
else:
if use_hdi:
ax.add_layout(
BoxAnnotation(
bottom=hdi_odds[1],
top=hdi_odds[0],
fill_alpha=hdi_kwargs.pop("alpha"),
fill_color=hdi_kwargs.pop("color"),
**hdi_kwargs
)
)
else:
for idx in range(n_unif):
unif_density, xmin, xmax = _fast_kde(unif[idx, :])
x_s = np.linspace(xmin, xmax, len(unif_density))
ax.line(
x_s,
unif_density,
line_color=plot_unif_kwargs.get("color", "black"),
line_alpha=plot_unif_kwargs.get("alpha", 0.1),
line_width=plot_unif_kwargs.get("linewidth", 1.0),
)
ax.line(
x_vals,
loo_pit_kde,
line_color=plot_kwargs.get("color", "black"),
line_alpha=plot_kwargs.get("alpha", 1.0),
line_width=plot_kwargs.get("linewidth", 3.0),
)
show_layout(ax, show)
return ax
|
41,041 |
def _entrate_sp(x, sm_window):
"""
Calculate the entropy rate of a stationary Gaussian random process using
spectrum estimation with smoothing window.
Parameters
----------
x :
sm_window :
Returns
-------
out :
"""
n = x.shape
# Normalize x_sb to be unit variance
x_std = np.std(np.reshape(x, (np.prod(n), 1)))
if x_std < 1e-10:
x_std = 1e-10
x = x / x_std
if (sm_window == 1):
M = [int(i) for i in np.ceil(np.array(n) / 10)]
if (x.ndim >= 3):
parzen_w_3 = np.zeros((2 * n[2] - 1, ))
parzen_w_3[(n[2] - M[2] - 1):(n[2] +
M[2])] = _parzen_win(2 * M[2] + 1)
if (x.ndim >= 2):
parzen_w_2 = np.zeros((2 * n[1] - 1, ))
parzen_w_2[(n[1] - M[1] - 1):(n[1] +
M[1])] = _parzen_win(2 * M[1] + 1)
if (x.ndim >= 1):
parzen_w_1 = np.zeros((2 * n[0] - 1, ))
parzen_w_1[(n[0] - M[0] - 1):(n[0] +
M[0])] = _parzen_win(2 * M[0] + 1)
if x.ndim == 2 and min(n) == 1: # 1D
xc = _autocorr(x)
xc = xc * parzen_w_1
xf = fftshift(fft(xc))
elif x.ndim == 2 and min(n) != 1: # 2D
xc = _autocorr(x) # default option: computes raw correlations with NO
# normalization -- Matlab help on xcorr
# Bias correction
v1 = np.hstack((np.arange(1, n[0] + 1), np.arange(n[0] - 1, 0,
-1)))[np.newaxis, :]
v2 = np.hstack((np.arange(1, n[1] + 1), np.arange(n[1] - 1, 0,
-1)))[np.newaxis, :]
vd = np.dot(v1.T, v2)
xc = xc / vd
parzen_window_2D = np.dot(parzen_w_1, parzen_w_2.T)
xc = xc * parzen_window_2D
xf = fftshift(fft2(xc))
elif x.ndim == 3 and min(n) != 1: # 3D
xc = np.zeros((2 * n[0] - 1, 2 * n[1] - 1, 2 * n[2] - 1))
for m3 in range(n[2] - 1):
temp = np.zeros((2 * n[0] - 1, 2 * n[1] - 1))
for k in range(n[2] - m3):
temp = temp + correlate2d(x[:, :, k + m3], x[:, :, k])
# default option:
# computes raw correlations with NO normalization
# -- Matlab help on xcorr
xc[:, :, (n[2] - 1) - m3] = temp
xc[:, :, (n[2] - 1) + m3] = temp
# Bias correction
v1 = np.hstack((np.arange(1, n[0] + 1), np.arange(n[0] - 1, 0,
-1)))[np.newaxis, :]
v2 = np.hstack((np.arange(1, n[1] + 1), np.arange(n[1] - 1, 0,
-1)))[np.newaxis, :]
v3 = np.arange(n[2], 0, -1)
vd = np.dot(v1.T, v2)
vcu = np.zeros((2 * n[0] - 1, 2 * n[1] - 1, 2 * n[2] - 1))
for m3 in range(n[2]):
vcu[:, :, (n[2] - 1) - m3] = vd * v3[m3]
vcu[:, :, (n[2] - 1) + m3] = vd * v3[m3]
# Possible source of NAN values
xc = xc / vcu
parzen_window_2D = np.dot(parzen_w_1[np.newaxis, :].T,
parzen_w_2[np.newaxis, :])
parzen_window_3D = np.zeros((2 * n[0] - 1, 2 * n[1] - 1, 2 * n[2] - 1))
for m3 in range(n[2] - 1):
parzen_window_3D[:, :, (n[2] - 1) - m3] = np.dot(
parzen_window_2D, parzen_w_3[n[2] - 1 - m3])
parzen_window_3D[:, :, (n[2] - 1) + m3] = np.dot(
parzen_window_2D, parzen_w_3[n[2] - 1 + m3])
xc = xc * parzen_window_3D
xf = fftshift(fftn(xc))
else:
raise ValueError('Unrecognized matrix dimension.')
xf = abs(xf)
xf[xf < 1e-4] = 1e-4
out = 0.5 * np.log(2 * np.pi * np.exp(1)) + _sumN(np.log(abs(
(xf)))) / 2 / _sumN(abs(xf))
return out
|
def _entrate_sp(x, sm_window):
"""
Calculate the entropy rate of a stationary Gaussian random process using
spectrum estimation with smoothing window.
Parameters
----------
x :
sm_window :
Returns
-------
out :
"""
n = x.shape
# Normalize x_sb to be unit variance
x_std = np.std(np.reshape(x, (np.prod(n), 1)))
if x_std < 1e-10:
x_std = 1e-10
x = x / x_std
if (sm_window == 1):
M = [int(i) for i in np.ceil(np.array(n) / 10)]
if (x.ndim >= 3):
parzen_w_3 = np.zeros((2 * n[2] - 1, ))
parzen_w_3[(n[2] - M[2] - 1):(n[2] +
M[2])] = _parzen_win(2 * M[2] + 1)
if x.ndim == 2:
parzen_w_2 = np.zeros((2 * n[1] - 1, ))
parzen_w_2[(n[1] - M[1] - 1):(n[1] +
M[1])] = _parzen_win(2 * M[1] + 1)
if (x.ndim >= 1):
parzen_w_1 = np.zeros((2 * n[0] - 1, ))
parzen_w_1[(n[0] - M[0] - 1):(n[0] +
M[0])] = _parzen_win(2 * M[0] + 1)
if x.ndim == 2 and min(n) == 1: # 1D
xc = _autocorr(x)
xc = xc * parzen_w_1
xf = fftshift(fft(xc))
elif x.ndim == 2 and min(n) != 1: # 2D
xc = _autocorr(x) # default option: computes raw correlations with NO
# normalization -- Matlab help on xcorr
# Bias correction
v1 = np.hstack((np.arange(1, n[0] + 1), np.arange(n[0] - 1, 0,
-1)))[np.newaxis, :]
v2 = np.hstack((np.arange(1, n[1] + 1), np.arange(n[1] - 1, 0,
-1)))[np.newaxis, :]
vd = np.dot(v1.T, v2)
xc = xc / vd
parzen_window_2D = np.dot(parzen_w_1, parzen_w_2.T)
xc = xc * parzen_window_2D
xf = fftshift(fft2(xc))
elif x.ndim == 3 and min(n) != 1: # 3D
xc = np.zeros((2 * n[0] - 1, 2 * n[1] - 1, 2 * n[2] - 1))
for m3 in range(n[2] - 1):
temp = np.zeros((2 * n[0] - 1, 2 * n[1] - 1))
for k in range(n[2] - m3):
temp = temp + correlate2d(x[:, :, k + m3], x[:, :, k])
# default option:
# computes raw correlations with NO normalization
# -- Matlab help on xcorr
xc[:, :, (n[2] - 1) - m3] = temp
xc[:, :, (n[2] - 1) + m3] = temp
# Bias correction
v1 = np.hstack((np.arange(1, n[0] + 1), np.arange(n[0] - 1, 0,
-1)))[np.newaxis, :]
v2 = np.hstack((np.arange(1, n[1] + 1), np.arange(n[1] - 1, 0,
-1)))[np.newaxis, :]
v3 = np.arange(n[2], 0, -1)
vd = np.dot(v1.T, v2)
vcu = np.zeros((2 * n[0] - 1, 2 * n[1] - 1, 2 * n[2] - 1))
for m3 in range(n[2]):
vcu[:, :, (n[2] - 1) - m3] = vd * v3[m3]
vcu[:, :, (n[2] - 1) + m3] = vd * v3[m3]
# Possible source of NAN values
xc = xc / vcu
parzen_window_2D = np.dot(parzen_w_1[np.newaxis, :].T,
parzen_w_2[np.newaxis, :])
parzen_window_3D = np.zeros((2 * n[0] - 1, 2 * n[1] - 1, 2 * n[2] - 1))
for m3 in range(n[2] - 1):
parzen_window_3D[:, :, (n[2] - 1) - m3] = np.dot(
parzen_window_2D, parzen_w_3[n[2] - 1 - m3])
parzen_window_3D[:, :, (n[2] - 1) + m3] = np.dot(
parzen_window_2D, parzen_w_3[n[2] - 1 + m3])
xc = xc * parzen_window_3D
xf = fftshift(fftn(xc))
else:
raise ValueError('Unrecognized matrix dimension.')
xf = abs(xf)
xf[xf < 1e-4] = 1e-4
out = 0.5 * np.log(2 * np.pi * np.exp(1)) + _sumN(np.log(abs(
(xf)))) / 2 / _sumN(abs(xf))
return out
|
57,658 |
def fetch_indicators_command(client, indicator_type, limit: Optional[int] = None):
"""Fetches indicators from the Recorded Future feeds.
Args:
client(Client): Recorded Future Feed client.
indicator_type(str): The indicator type
limit(int): Optional. The number of the indicators to fetch
Returns:
list. List of indicators from the feed
"""
indicators = []
for service in client.services:
iterator = client.build_iterator(service, indicator_type)
for item in itertools.islice(iterator, limit): # if limit is None the iterator will iterate all of the items.
raw_json = dict(item)
raw_json['value'] = value = item.get('Name')
raw_json['type'] = get_indicator_type(indicator_type, item)
score = 0
risk = item.get('Risk')
if isinstance(risk, str) and risk.isdigit():
raw_json['score'] = score = client.calculate_indicator_score(item['Risk'])
raw_json['Criticality Label'] = calculate_recorded_future_criticality_label(item['Risk'])
lower_case_evidence_details_keys = []
evidence_details = json.loads(item.get('EvidenceDetails', '{}')).get('EvidenceDetails', [])
if evidence_details:
raw_json['EvidenceDetails'] = evidence_details
for rule in evidence_details:
rule = dict((k.lower(), v) for k, v in rule.items())
lower_case_evidence_details_keys.append(rule)
risk_string = item.get('RiskString')
if isinstance(risk_string, str):
raw_json['RiskString'] = format_risk_string(risk_string)
indicators.append({
"value": value,
"type": raw_json['type'],
"rawJSON": raw_json,
"fields": {'recordedfutureevidencedetails': lower_case_evidence_details_keys},
"score": score
})
return indicators
|
def fetch_indicators_command(client, indicator_type, limit: Optional[int] = None):
"""Fetches indicators from the Recorded Future feeds.
Args:
client(Client): Recorded Future Feed client.
indicator_type(str): The indicator type
limit(int): Optional. The number of the indicators to fetch
Returns:
list. List of indicators from the feed
"""
indicators = []
for service in client.services:
iterator = client.build_iterator(service, indicator_type)
for item in itertools.islice(iterator, limit): # if limit is None the iterator will iterate all of the items.
raw_json = dict(item)
raw_json['value'] = value = item.get('Name')
raw_json['type'] = get_indicator_type(indicator_type, item)
score = 0
risk = item.get('Risk')
if isinstance(risk, str) and risk.isdigit():
raw_json['score'] = score = client.calculate_indicator_score(risk)
raw_json['Criticality Label'] = calculate_recorded_future_criticality_label(risk)
lower_case_evidence_details_keys = []
evidence_details = json.loads(item.get('EvidenceDetails', '{}')).get('EvidenceDetails', [])
if evidence_details:
raw_json['EvidenceDetails'] = evidence_details
for rule in evidence_details:
rule = dict((k.lower(), v) for k, v in rule.items())
lower_case_evidence_details_keys.append(rule)
risk_string = item.get('RiskString')
if isinstance(risk_string, str):
raw_json['RiskString'] = format_risk_string(risk_string)
indicators.append({
"value": value,
"type": raw_json['type'],
"rawJSON": raw_json,
"fields": {'recordedfutureevidencedetails': lower_case_evidence_details_keys},
"score": score
})
return indicators
|
25,378 |
def get_name() -> str:
name = ""
while len(name) == 0:
print(
"Please enter a name for your instance,"
" it will be used to run your bot from here on out.\n"
"This name is case-sensitive and can only include characters"
" A-z, numbers, underscores, and hyphens."
)
name = input("> ")
if re.fullmatch(r"[a-zA-Z0-9_][a-zA-Z0-9_\-]*", name) is None:
print(
"ERROR: Instance name can only include "
"characters A-z, numbers, underscores, and hyphens.\n"
"They can't start with a hyphen."
)
name = ""
return name
|
def get_name() -> str:
name = ""
while len(name) == 0:
print(
"Please enter a name for your instance,"
" it will be used to run your bot from here on out.\n"
"This name is case-sensitive and can only include characters"
" A-z, numbers, underscores, and hyphens."
)
name = input("> ")
if re.fullmatch(r"[a-zA-Z0-9]*", name) is None:
print(
"ERROR: Instance name can only include "
"characters A-z, numbers, underscores, and hyphens.\n"
"They can't start with a hyphen."
)
name = ""
return name
|
41,890 |
def _generate_contour_subplot(
trials: List[FrozenTrial],
x_param: str,
y_param: str,
direction: StudyDirection,
param_values_range: dict,
) -> Tuple["Contour", "Scatter"]:
x_indices = sorted(list({t.params[x_param] for t in trials if x_param in t.params}))
y_indices = sorted(list({t.params[y_param] for t in trials if y_param in t.params}))
if len(x_indices) < 2:
_logger.warning("Param {} unique value length is less than 2.".format(x_param))
return go.Contour(), go.Scatter()
if len(y_indices) < 2:
_logger.warning("Param {} unique value length is less than 2.".format(y_param))
return go.Contour(), go.Scatter()
x_range = param_values_range[x_param]
x_indices = [x_range[0]] + x_indices + [x_range[1]]
y_range = param_values_range[y_param]
y_indices = [y_range[0]] + y_indices + [y_range[1]]
z = [[float("nan") for _ in range(len(x_indices))] for _ in range(len(y_indices))]
x_values = []
y_values = []
for trial in trials:
if x_param not in trial.params or y_param not in trial.params:
continue
x_values.append(trial.params[x_param])
y_values.append(trial.params[y_param])
x_i = x_indices.index(trial.params[x_param])
y_i = y_indices.index(trial.params[y_param])
if isinstance(trial.value, int):
value = float(trial.value)
elif isinstance(trial.value, float):
value = trial.value
else:
raise ValueError(
"Trial{} has COMPLETE state, but its value is non-numeric.".format(trial.number)
)
z[y_i][x_i] = value
# TODO(Yanase): Use reversescale argument to reverse colorscale if Plotly's bug is fixed.
# If contours_coloring='heatmap' is specified, reversesecale argument of go.Contour does not
# work correctly. See https://github.com/pfnet/optuna/issues/606.
colorscale = plotly.colors.PLOTLY_SCALES["Blues"]
if direction == StudyDirection.MINIMIZE:
colorscale = [[1 - t[0], t[1]] for t in colorscale]
colorscale.reverse()
contour = go.Contour(
x=x_indices,
y=y_indices,
z=z,
colorbar={"title": "Objective Value"},
colorscale=colorscale,
connectgaps=True,
contours_coloring="heatmap",
hoverinfo="none",
line_smoothing=1.3,
)
scatter = go.Scatter(
x=x_values,
y=y_values,
marker={"line": {"width": 0.5, "color": "Grey"}, "color": "black"},
mode="markers",
showlegend=False,
)
return (contour, scatter)
|
def _generate_contour_subplot(
trials: List[FrozenTrial],
x_param: str,
y_param: str,
direction: StudyDirection,
param_values_range: Optional[Dict[str, Tuple[float, float]]] = None,
) -> Tuple["Contour", "Scatter"]:
x_indices = sorted(list({t.params[x_param] for t in trials if x_param in t.params}))
y_indices = sorted(list({t.params[y_param] for t in trials if y_param in t.params}))
if len(x_indices) < 2:
_logger.warning("Param {} unique value length is less than 2.".format(x_param))
return go.Contour(), go.Scatter()
if len(y_indices) < 2:
_logger.warning("Param {} unique value length is less than 2.".format(y_param))
return go.Contour(), go.Scatter()
x_range = param_values_range[x_param]
x_indices = [x_range[0]] + x_indices + [x_range[1]]
y_range = param_values_range[y_param]
y_indices = [y_range[0]] + y_indices + [y_range[1]]
z = [[float("nan") for _ in range(len(x_indices))] for _ in range(len(y_indices))]
x_values = []
y_values = []
for trial in trials:
if x_param not in trial.params or y_param not in trial.params:
continue
x_values.append(trial.params[x_param])
y_values.append(trial.params[y_param])
x_i = x_indices.index(trial.params[x_param])
y_i = y_indices.index(trial.params[y_param])
if isinstance(trial.value, int):
value = float(trial.value)
elif isinstance(trial.value, float):
value = trial.value
else:
raise ValueError(
"Trial{} has COMPLETE state, but its value is non-numeric.".format(trial.number)
)
z[y_i][x_i] = value
# TODO(Yanase): Use reversescale argument to reverse colorscale if Plotly's bug is fixed.
# If contours_coloring='heatmap' is specified, reversesecale argument of go.Contour does not
# work correctly. See https://github.com/pfnet/optuna/issues/606.
colorscale = plotly.colors.PLOTLY_SCALES["Blues"]
if direction == StudyDirection.MINIMIZE:
colorscale = [[1 - t[0], t[1]] for t in colorscale]
colorscale.reverse()
contour = go.Contour(
x=x_indices,
y=y_indices,
z=z,
colorbar={"title": "Objective Value"},
colorscale=colorscale,
connectgaps=True,
contours_coloring="heatmap",
hoverinfo="none",
line_smoothing=1.3,
)
scatter = go.Scatter(
x=x_values,
y=y_values,
marker={"line": {"width": 0.5, "color": "Grey"}, "color": "black"},
mode="markers",
showlegend=False,
)
return (contour, scatter)
|
5,506 |
def test_extract_complex_social(complex_user, client):
html = client.get(complex_user.get_absolute_url(), follow=True).content
source = UserSource(complex_user.username, social=True)
data = source.extract_data(html)
expected_data = {
"username": "JillDeveloper",
"fullname": "Jill Developer",
"title": "Web Developer",
"organization": "Acme, Inc.",
"location": "Springfield, USA",
"twitter_url": "http://twitter.com/jilldev1999",
"github_url": "https://github.com/jilldev1999",
"stackoverflow_url": "http://stackoverflow.com/users/1/jilldev1999",
"linkedin_url": "http://www.linkedin.com/in/jilldev1999",
"pmo_url": "http://people.mozilla.org/u/jilldev/",
"date_joined": datetime(1999, 1, 1, 10, 40, 23),
}
assert data == expected_data
|
def test_extract_complex_social(complex_user, client):
html = client.get(complex_user.get_absolute_url(), follow=True).content
source = UserSource(complex_user.username, social=True)
data = source.extract_data(html)
expected_data = {
"username": "JillDeveloper",
"fullname": "Jill Developer",
"title": "Web Developer",
"organization": "Acme, Inc.",
"location": "Springfield, USA",
"twitter_url": "http://twitter.com/jilldev1999",
"github_url": "https://github.com/jilldev1999",
"stackoverflow_url": "http://stackoverflow.com/users/1/jilldev1999",
"linkedin_url": "http://www.linkedin.com/in/jilldev1999",
"pmo_url": "https://people.mozilla.org/p/jilldev/",
"date_joined": datetime(1999, 1, 1, 10, 40, 23),
}
assert data == expected_data
|
58,524 |
def allreduce_multigpu(tensor_list: list,
group_name: str = "default",
op=types.ReduceOp.SUM):
"""Collective allrecue a list of tensors across the group.
Args:
tensor_list (List[tensor]): list of tensors to be allreduced,
each on a GPU.
group_name (str): the collective group name to perform allreduce.
Returns:
None
"""
if not types.cupy_available():
raise RuntimeError("Multigpu calls requires NCCL and Cupy.")
_check_tensor_list_input(tensor_list)
g = _check_and_get_group(group_name)
opts = types.AllReduceOptions
opts.reduceOp = op
g.allreduce(tensor_list, opts)
|
def allreduce_multigpu(tensor_list: list,
group_name: str = "default",
op=types.ReduceOp.SUM):
"""Collective allrecue a list of tensors across the group.
Args:
tensor_list (List[tensor]): list of tensors to be allreduced,
each on a GPU.
group_name (str): the collective group name to perform allreduce.
Returns:
None
"""
if not types.cupy_available():
raise RuntimeError("Multigpu calls requires NCCL and Cupy.")
_check_tensor_list_input(tensor_list)
g = _check_and_get_group(group_name)
opts = types.AllReduceOptions
opts.reduceOp = op
g.allreduce(tensor_list, opts)
|
24,848 |
def my_func(self): # [missing-return-type-doc]
"""This is a docstring.
:returns: Always False
"""
return False
|
def my_func(self): # [missing-return-type-doc]
"""Warn partial sphinx returns
:returns: Always False
"""
return False
|
36,280 |
def measure_observables(qc: QuantumComputer, tomo_experiment: TomographyExperiment,
n_shots: int = 10000, progress_callback=None, active_reset=False,
symmetrize_readout: Optional[Union[str, int]] = 'exhaustive',
calibrate_readout: Optional[str] = 'plus-eig',
readout_symmetrize: Optional[str] = None):
"""
Measure all the observables in a TomographyExperiment.
:param qc: A QuantumComputer which can run quantum programs
:param tomo_experiment: A suite of tomographic observables to measure
:param n_shots: The number of shots to take per ExperimentSetting
:param progress_callback: If not None, this function is called each time a group of
settings is run with arguments ``f(i, len(tomo_experiment)`` such that the progress
is ``i / len(tomo_experiment)``.
:param active_reset: Whether to actively reset qubits instead of waiting several
times the coherence length for qubits to decay to ``|0>`` naturally. Setting this
to True is much faster but there is a ~1% error per qubit in the reset operation.
Thermal noise from "traditional" reset is not routinely characterized but is of the same
order.
:param symmetrize_readout: the level of readout symmetrization to perform for the estimation
and optional calibration of each observable. Specifying the string `exhaustive` is
equivalent to -1 below, whereas None is equivalent to 0 below. The following integer
levels are currently supported:
* -1 -- exhaustive symmetrization uses every possible combination of flips
* 0 -- no symmetrization
* 1 -- symmetrization using an OA with strength 1
* 2 -- symmetrization using an OA with strength 2
* 3 -- symmetrization using an OA with strength 3
Note that (default) exhaustive symmetrization requires a number of QPU calls exponential in
the number of qubits in the union of the support of the observables in any group of settings
in tomo_expt; the number of shots may need to be increased to accommodate this.
see :func:`run_symmetrized_readout` in api._quantum_computer for more information.
:param calibrate_readout: Method used to calibrate the readout results. Currently, the only
method supported is normalizing against the operator's expectation value in its +1
eigenstate, which can be specified by setting this variable to 'plus-eig' (default value).
The preceding symmetrization and this step together yield a more accurate estimation of the observable. Set to `None` if no calibration is desired.
"""
if readout_symmetrize is not None:
warnings.warn("'readout_symmetrize' has been renamed to 'symmetrize_readout'",
DeprecationWarning)
symmetrize_readout = readout_symmetrize
symm_type = 0
if symmetrize_readout is not None:
if symmetrize_readout == 'exhaustive':
symm_type = -1
elif symmetrize_readout in [-1, 0, 1, 2, 3]:
symm_type = symmetrize_readout
else:
raise ValueError("Readout symmetrization must be an int from -1 to 3 inclusive.")
# calibration readout only works with symmetrization turned on
if calibrate_readout is not None and symm_type != -1:
raise ValueError("Readout calibration only currently works with exhaustive readout "
"symmetrization turned on.")
# Outer loop over a collection of grouped settings for which we can simultaneously
# estimate.
for i, settings in enumerate(tomo_experiment):
log.info(f"Collecting bitstrings for the {len(settings)} settings: {settings}")
# 1.1 Prepare a state according to the amalgam of all setting.in_state
total_prog = Program()
if active_reset:
total_prog += RESET()
max_weight_in_state = _max_weight_state(setting.in_state for setting in settings)
for oneq_state in max_weight_in_state.states:
total_prog += _one_q_state_prep(oneq_state)
# 1.2 Add in the program
total_prog += tomo_experiment.program
# 1.3 Measure the state according to setting.out_operator
max_weight_out_op = _max_weight_operator(setting.out_operator for setting in settings)
for qubit, op_str in max_weight_out_op:
total_prog += _local_pauli_eig_meas(op_str, qubit)
qubits = max_weight_out_op.get_qubits()
# we don't need to do any actual measurement if the combined operator is simply the
# identity, i.e. weight=0. We handle this specially below.
if len(qubits) > 0:
bitstrings = qc.run_symmetrized_readout(total_prog, n_shots, symm_type, qubits)
if progress_callback is not None:
progress_callback(i, len(tomo_experiment))
# 3. Post-process
# Inner loop over the grouped settings. They only differ in which qubits' measurements
# we include in the post-processing. For example, if `settings` is Z1, Z2, Z1Z2 and we
# measure (n_shots, n_qubits=2) obs_strings then the full operator value involves selecting
# either the first column, second column, or both and multiplying along the row.
for setting in settings:
# 3.1 Get the term's coefficient so we can multiply it in later.
coeff = complex(setting.out_operator.coefficient)
if not np.isclose(coeff.imag, 0):
raise ValueError(f"{setting}'s out_operator has a complex coefficient.")
coeff = coeff.real
# 3.2 Special case for measuring the "identity" operator, which doesn't make much
# sense but should happen perfectly.
if is_identity(setting.out_operator):
yield ExperimentResult(
setting=setting,
expectation=coeff,
std_err=0.0,
total_counts=n_shots,
)
continue
# 3.3 Obtain statistics from result of experiment
obs_mean, obs_var = _stats_from_measurements(bitstrings,
{q: idx for idx, q in enumerate(qubits)},
setting, n_shots, coeff)
if calibrate_readout == 'plus-eig':
# 4 Readout calibration
# 4.1 Obtain calibration program
calibr_prog = _calibration_program(qc, tomo_experiment, setting)
calibr_qubs = setting.out_operator.get_qubits()
calibr_qub_dict = {q: idx for idx, q in enumerate(calibr_qubs)}
# 4.2 Perform symmetrization on the calibration program
calibr_results = qc.run_symmetrized_readout(calibr_prog, n_shots, -1, calibr_qubs)
# 4.3 Obtain statistics from the measurement process
obs_calibr_mean, obs_calibr_var = _stats_from_measurements(calibr_results,
calibr_qub_dict,
setting, n_shots)
# 4.3 Calibrate the readout results
corrected_mean = obs_mean / obs_calibr_mean
corrected_var = ratio_variance(obs_mean, obs_var, obs_calibr_mean, obs_calibr_var)
yield ExperimentResult(
setting=setting,
expectation=corrected_mean.item(),
std_err=np.sqrt(corrected_var).item(),
total_counts=len(bitstrings),
raw_expectation=obs_mean.item(),
raw_std_err=np.sqrt(obs_var).item(),
calibration_expectation=obs_calibr_mean.item(),
calibration_std_err=np.sqrt(obs_calibr_var).item(),
calibration_counts=len(calibr_results),
)
elif calibrate_readout is None:
# No calibration
yield ExperimentResult(
setting=setting,
expectation=obs_mean.item(),
std_err=np.sqrt(obs_var).item(),
total_counts=len(bitstrings),
)
else:
raise ValueError("Calibration readout method must be either 'plus-eig' or None")
|
def measure_observables(qc: QuantumComputer, tomo_experiment: TomographyExperiment,
n_shots: int = 10000, progress_callback=None, active_reset=False,
symmetrize_readout: Optional[Union[str, int]] = 'exhaustive',
calibrate_readout: Optional[str] = 'plus-eig',
readout_symmetrize: Optional[str] = None):
"""
Measure all the observables in a TomographyExperiment.
:param qc: A QuantumComputer which can run quantum programs
:param tomo_experiment: A suite of tomographic observables to measure
:param n_shots: The number of shots to take per ExperimentSetting
:param progress_callback: If not None, this function is called each time a group of
settings is run with arguments ``f(i, len(tomo_experiment)`` such that the progress
is ``i / len(tomo_experiment)``.
:param active_reset: Whether to actively reset qubits instead of waiting several
times the coherence length for qubits to decay to ``|0>`` naturally. Setting this
to True is much faster but there is a ~1% error per qubit in the reset operation.
Thermal noise from "traditional" reset is not routinely characterized but is of the same
order.
:param symmetrize_readout: the level of readout symmetrization to perform for the estimation
and optional calibration of each observable. Specifying the string `exhaustive` is
equivalent to -1 below, whereas None is equivalent to 0 below. The following integer
levels are currently supported:
* -1 -- exhaustive symmetrization uses every possible combination of flips
* 0 -- no symmetrization
* 1 -- symmetrization using an OA with strength 1
* 2 -- symmetrization using an OA with strength 2
* 3 -- symmetrization using an OA with strength 3
Note that (default) exhaustive symmetrization requires a number of QPU calls exponential in
the number of qubits in the union of the support of the observables in any group of settings
in ``tomo_experiment``; the number of shots may need to be increased to accommodate this.
see :func:`run_symmetrized_readout` in api._quantum_computer for more information.
:param calibrate_readout: Method used to calibrate the readout results. Currently, the only
method supported is normalizing against the operator's expectation value in its +1
eigenstate, which can be specified by setting this variable to 'plus-eig' (default value).
The preceding symmetrization and this step together yield a more accurate estimation of the observable. Set to `None` if no calibration is desired.
"""
if readout_symmetrize is not None:
warnings.warn("'readout_symmetrize' has been renamed to 'symmetrize_readout'",
DeprecationWarning)
symmetrize_readout = readout_symmetrize
symm_type = 0
if symmetrize_readout is not None:
if symmetrize_readout == 'exhaustive':
symm_type = -1
elif symmetrize_readout in [-1, 0, 1, 2, 3]:
symm_type = symmetrize_readout
else:
raise ValueError("Readout symmetrization must be an int from -1 to 3 inclusive.")
# calibration readout only works with symmetrization turned on
if calibrate_readout is not None and symm_type != -1:
raise ValueError("Readout calibration only currently works with exhaustive readout "
"symmetrization turned on.")
# Outer loop over a collection of grouped settings for which we can simultaneously
# estimate.
for i, settings in enumerate(tomo_experiment):
log.info(f"Collecting bitstrings for the {len(settings)} settings: {settings}")
# 1.1 Prepare a state according to the amalgam of all setting.in_state
total_prog = Program()
if active_reset:
total_prog += RESET()
max_weight_in_state = _max_weight_state(setting.in_state for setting in settings)
for oneq_state in max_weight_in_state.states:
total_prog += _one_q_state_prep(oneq_state)
# 1.2 Add in the program
total_prog += tomo_experiment.program
# 1.3 Measure the state according to setting.out_operator
max_weight_out_op = _max_weight_operator(setting.out_operator for setting in settings)
for qubit, op_str in max_weight_out_op:
total_prog += _local_pauli_eig_meas(op_str, qubit)
qubits = max_weight_out_op.get_qubits()
# we don't need to do any actual measurement if the combined operator is simply the
# identity, i.e. weight=0. We handle this specially below.
if len(qubits) > 0:
bitstrings = qc.run_symmetrized_readout(total_prog, n_shots, symm_type, qubits)
if progress_callback is not None:
progress_callback(i, len(tomo_experiment))
# 3. Post-process
# Inner loop over the grouped settings. They only differ in which qubits' measurements
# we include in the post-processing. For example, if `settings` is Z1, Z2, Z1Z2 and we
# measure (n_shots, n_qubits=2) obs_strings then the full operator value involves selecting
# either the first column, second column, or both and multiplying along the row.
for setting in settings:
# 3.1 Get the term's coefficient so we can multiply it in later.
coeff = complex(setting.out_operator.coefficient)
if not np.isclose(coeff.imag, 0):
raise ValueError(f"{setting}'s out_operator has a complex coefficient.")
coeff = coeff.real
# 3.2 Special case for measuring the "identity" operator, which doesn't make much
# sense but should happen perfectly.
if is_identity(setting.out_operator):
yield ExperimentResult(
setting=setting,
expectation=coeff,
std_err=0.0,
total_counts=n_shots,
)
continue
# 3.3 Obtain statistics from result of experiment
obs_mean, obs_var = _stats_from_measurements(bitstrings,
{q: idx for idx, q in enumerate(qubits)},
setting, n_shots, coeff)
if calibrate_readout == 'plus-eig':
# 4 Readout calibration
# 4.1 Obtain calibration program
calibr_prog = _calibration_program(qc, tomo_experiment, setting)
calibr_qubs = setting.out_operator.get_qubits()
calibr_qub_dict = {q: idx for idx, q in enumerate(calibr_qubs)}
# 4.2 Perform symmetrization on the calibration program
calibr_results = qc.run_symmetrized_readout(calibr_prog, n_shots, -1, calibr_qubs)
# 4.3 Obtain statistics from the measurement process
obs_calibr_mean, obs_calibr_var = _stats_from_measurements(calibr_results,
calibr_qub_dict,
setting, n_shots)
# 4.3 Calibrate the readout results
corrected_mean = obs_mean / obs_calibr_mean
corrected_var = ratio_variance(obs_mean, obs_var, obs_calibr_mean, obs_calibr_var)
yield ExperimentResult(
setting=setting,
expectation=corrected_mean.item(),
std_err=np.sqrt(corrected_var).item(),
total_counts=len(bitstrings),
raw_expectation=obs_mean.item(),
raw_std_err=np.sqrt(obs_var).item(),
calibration_expectation=obs_calibr_mean.item(),
calibration_std_err=np.sqrt(obs_calibr_var).item(),
calibration_counts=len(calibr_results),
)
elif calibrate_readout is None:
# No calibration
yield ExperimentResult(
setting=setting,
expectation=obs_mean.item(),
std_err=np.sqrt(obs_var).item(),
total_counts=len(bitstrings),
)
else:
raise ValueError("Calibration readout method must be either 'plus-eig' or None")
|
5,415 |
def test_random():
# make sure the right liberty is used for random
if HAS_CYPTODOME:
assert CryptodomeRandom is salt.utils.crypt.Random
elif HAS_CRYPTO:
assert CryptoRandom is salt.utils.crypt.Random
|
def test_random():
# make sure the right library is used for random
if HAS_CYPTODOME:
assert CryptodomeRandom is salt.utils.crypt.Random
elif HAS_CRYPTO:
assert CryptoRandom is salt.utils.crypt.Random
|
28,052 |
def convert(reports: List[Report]) -> List[str]:
""" Convert the given reports to CodeChecker baseline format.
Returns a list of sorted unique report hashes.
"""
return sorted(set([r.report_hash for r in reports]))
|
def convert(reports: Iterable[Report]) -> List[str]:
""" Convert the given reports to CodeChecker baseline format.
Returns a list of sorted unique report hashes.
"""
return sorted(set([r.report_hash for r in reports]))
|
33,530 |
def test_stack_time_attributes(cfn_client, deploy_cfn_template):
bucket_name = "test"
deployed = deploy_cfn_template(
template_path=os.path.join(os.path.dirname(__file__), "../templates/template5.yaml"),
parameters={"LocalParam": bucket_name},
)
stack_name = deployed.stack_name
stack_id = deployed.stack_id
assert "CreationTime" in cfn_client.describe_stacks(StackName=stack_name)["Stacks"][0]
cfn_client.update_stack(
StackName=stack_name,
TemplateBody=load_template_raw("template5.yaml"),
Parameters=[{"ParameterKey": "CustomTag", "ParameterValue": bucket_name}],
)
def wait_stack_done():
return cfn_client.describe_stacks(StackName=stack_id)["Stacks"][0]["StackStatus"] in [
"CREATE_COMPLETE",
"UPDATE_COMPLETE",
]
assert wait_until(wait_stack_done)
assert "LastUpdatedTime" in cfn_client.describe_stacks(StackName=stack_name)["Stacks"][0]
cfn_client.delete_stack(
StackName=stack_name,
)
assert "DeletionTime" in cfn_client.describe_stacks(StackName=stack_name)["Stacks"][0]
|
def test_stack_time_attributes(cfn_client, deploy_cfn_template):
bucket_name = "test"
deployed = deploy_cfn_template(
template_path=os.path.join(os.path.dirname(__file__), "../templates/template5.yaml"),
parameters={"LocalParam": bucket_name},
)
stack_name = deployed.stack_name
stack_id = deployed.stack_id
assert "CreationTime" in cfn_client.describe_stacks(StackName=stack_name)["Stacks"][0]
cfn_client.update_stack(
StackName=stack_name,
TemplateBody=load_template_file(os.path.join(os.path.dirname(__file__), "../templates/template5.yaml")),
Parameters=[{"ParameterKey": "CustomTag", "ParameterValue": bucket_name}],
)
def wait_stack_done():
return cfn_client.describe_stacks(StackName=stack_id)["Stacks"][0]["StackStatus"] in [
"CREATE_COMPLETE",
"UPDATE_COMPLETE",
]
assert wait_until(wait_stack_done)
assert "LastUpdatedTime" in cfn_client.describe_stacks(StackName=stack_name)["Stacks"][0]
cfn_client.delete_stack(
StackName=stack_name,
)
assert "DeletionTime" in cfn_client.describe_stacks(StackName=stack_name)["Stacks"][0]
|
49,575 |
def setitem_array(out_name, array, indices, value):
"""Master function for array assignment.
This function, that is intended to be called by
`Array.__setitem__`, creates a new dask that assigns values to
each block that is touched by the indices, leaving other blocks
unchanged.
Each block that overlaps the indices is assigned from the
approriate part of the assignment value. The dasks of these value
parts are included in the output dask dictionary, as are the dasks
of any 1-d dask array indices. This ensures that the dask array
assignment value and any dask array indices are not computed until
the `Array.__setitem__` operation is computed.
The part of the assignment value applies to block is created as a
"getitem" slice of the full asignment value.
Parameters
----------
out_name : `str`
The dask variable output name.
array : dask array
The dask array that is being assigned to.
indices : numpy-style indices
Indices to array defining the elements to be assigned.
value : dask array
The assignment value, i.e. the values which will be assigned
to elements of array.
Returns
-------
dsk : `dict`
A dictionary where the keys are new unique tokens for each
block of the form
(out_name, dim_index[, dim_index[, ...]])
and the values are either
(key,)
or
(setitem, key, v_key, block_indices)
where key is an existing top-level dask key of array.
The first case occurs when the block represented by key does
not overlap the indices.
The second case occurs when the block represented by key does
overlap the indices. setitem is the chunk assignment function;
v_key is the dask key of the the part of the assignment value
that corresponds to the block; and block_indices are the
assigment indices that apply to the block.
The dictionary also includes any additional key/value pairs
needed to define v_key, as well as any any additional
key/value pairs needed to define dask keys contained in the
block_indices list as references to dask array indices.
"""
@functools.lru_cache()
def block_index_from_1d_index(dim, loc0, loc1, is_bool):
"""The positions of index elements in the range values loc0 and loc1.
The index is the input assignment index that is defined in the
namespace of the caller. It is assumed that negative elements
of an integer array have already been posified.
The non-hashable dsk is the output dask dictionary that is
defined in the namespace of the caller.
Parameters
----------
dim : `int`
The dimension position of the index that is used as a proxy
for the non-hashable index to define the LRU cache key.
loc0 : `int`
The start index of the block along the dimension.
loc1 : `int`
The stop index of the block along the dimension.
is_bool : `bool`
Whether or not the index is of boolean data type.
Returns
-------
numpy array or `str`
If index is a numpy array then a numpy array is
returned.
If index is a dask array then the dask of the block index
is inserted into the output dask dictionary, and its
unique top-layer key is returned.
"""
if is_bool:
# Boolean array (dask or numpy)
i = index[loc0:loc1]
elif is_dask_collection(index):
# Integer dask array
#
# Check for values in [loc0,loc1).
#
# Use the 3-argument "where" to insert place-holder
# elements that will be searched for and removed in the
# `setitem` function at compute time. The place-holder
# value must be the size of the block, i.e. loc1-loc0. We
# can't use a 1-argument "where" here because that won't
# work if index has unknown chunk sizes.
i = np.where((loc0 <= index) & (index < loc1), index, loc1)
i -= loc0
else:
# Integer numpy array
#
# Check for positive values in [loc0,loc1).
i = np.where((loc0 <= index) & (index < loc1))[0]
i = index[i] - loc0
if is_dask_collection(i):
# Return dask key intead of dask array
i = concatenate_array_chunks(i)
dsk.update(dict(i.dask))
i = next(flatten(i.__dask_keys__()))
return i
@functools.lru_cache()
def block_index_shape_from_1d_bool_index(dim, loc0, loc1):
"""Number of True index elements between positions loc0 and loc1.
The index is the input assignment index that is defined in the
namespace of the caller.
Parameters
----------
dim : `int`
The dimension position of the index that is used as a proxy
for the non-hashable index to define the LRU cache key.
loc0 : `int`
The start index of the block along the dimension.
loc1 : `int`
The stop index of the block along the dimension.
Returns
-------
numpy array or dask array
If index is a numpy array then a numpy array is
returned.
If index is dask array then a dask array is returned.
"""
return np.sum(index[loc0:loc1])
@functools.lru_cache()
def n_preceeding_from_1d_bool_index(dim, loc0):
"""Number of True index elements preceeding position loc0.
The index is the input assignment index that is defined in the
namespace of the caller.
Parameters
----------
dim : `int`
The dimension position of the index that is used as a proxy
for the non-hashable index to define the LRU cache key.
loc0 : `int`
The start index of the block along the dimension.
Returns
-------
numpy array or dask array
If index is a numpy array then a numpy array is
returned.
If index is dask array then a dask array is returned.
"""
return np.sum(index[:loc0])
@functools.lru_cache()
def value_indices_from_1d_int_index(dim, vsize, loc0, loc1):
"""Value indices for index elements between loc0 and loc1.
The index is the input assignment index that is defined in the
namespace of the caller. It is assumed that negative elements
have already been posified.
Parameters
----------
dim : `int`
The dimension position of the index that is used as a proxy
for the non-hashable index to define the LRU cache key.
vsize : `int`
The full size of the dimension of the assignment value.
loc0 : `int`
The start index of the block along the dimension.
loc1 : `int`
The stop index of the block along the dimension.
Returns
-------
numpy array or dask array
If index is a numpy array then a numpy array is
returned.
If index is dask array then a dask array is returned.
"""
# Check for values in [loc0,loc1)
if is_dask_collection(index):
if math.isnan(index.size):
# Integer dask array with unknown size.
#
# The 1-argument "where" won't work, so use the
# 3-argument "where" and convert to a boolean
# array. We concatenate the resulting boolean index
# and set the chunk size (which must be the full size
# of the dimension of the assignment value) which
# allows the returned array to be used as a
# __getitem__ index to the assignment value.
i = np.where((loc0 <= index) & (index < loc1), True, False)
i = concatenate_array_chunks(i)
i._chunks = ((vsize,),)
else:
# Integer dask array with known size
i = np.where((loc0 <= index) & (index < loc1))[0]
i = concatenate_array_chunks(i)
else:
# Integer numpy array.
i = np.where((loc0 <= index) & (index < loc1))[0]
return i
from ..core import flatten
array_shape = array.shape
value_shape = value.shape
value_ndim = len(value_shape)
# Reformat input indices
indices, indices_shape, reverse = parse_assignment_indices(indices, array_shape)
# Empty slices can only be assigned size 1 values
if 0 in indices_shape and value_shape and max(value_shape) > 1:
raise ValueError(
f"shape mismatch: value array of shape {value_shape} "
"could not be broadcast to indexing result "
f"of shape {tuple(indices_shape)}"
)
# Set variables needed when creating the part of the assignment
# value that applies to each block.
#
# offset: The additive offset to the assignment value dimension
# positions that results in the positions of the
# corresponding dimensions in the array. offset is a
# non-negative integer, and a positive value means that
# the array has more dimensions than the assignment
# value.
#
# value_offset: The additive offset to the array dimension
# positions that results in the positions of the
# corresponding dimensions in the assignment
# value. value_offset is a non-negative integer,
# and a positive value means that the assignment
# value has more dimensions than the array.
#
# For example:
#
# array.shape value.shape offset value_offset
# ------------ ------------ ------ ------------
# (3, 4) (3, 4) 0 0
# (1, 1, 3, 4) (3, 4) 2 0
# (3, 4) (1, 1, 3, 4) 0 2
# ------------ ------------ ------ ------------
#
# array_common_shape: The shape of those dimensions of array
# which correspond to dimensions of the
# assignment value.
#
# value_common_shape: The shape of those dimensions of the
# assignment value which correspond to
# dimensions of the array.
#
# base_value_indices: The indices used for initialising the
# selection of the part of the assignment
# value that applies to each block of
# array. An element of `None` will end up
# being replaced by an appropriate slice on a
# block-by-block basis.
#
# non_broadcast_dimensions: The integer positions of
# array_common_shape which do not
# correspond to broadcast dimensions in
# the assignment value.
#
# Note that array_common_shape and value_common_shape may be
# different if there are any size 1 dimensions being brodacast.
offset = len(indices_shape) - value_ndim
if offset >= 0:
# The array has the same number or more dimensions than the
# assignment value
array_common_shape = indices_shape[offset:]
value_common_shape = value_shape
value_offset = 0
reverse = [i - offset for i in reverse if i >= offset]
else:
# The assigmment value has more dimensions than the array
value_offset = -offset
array_common_shape = indices_shape
value_common_shape = value_shape[value_offset:]
offset = 0
# All of the extra leading dimensions must have size 1
if value_shape[:value_offset] != (1,) * value_offset:
raise ValueError(
"could not broadcast input array from shape"
f"{value_shape} into shape {tuple(indices_shape)}"
)
base_value_indices = []
non_broadcast_dimensions = []
for i, (a, b) in enumerate(zip(array_common_shape, value_common_shape)):
if b == 1:
base_value_indices.append(slice(None))
elif a == b:
base_value_indices.append(None)
non_broadcast_dimensions.append(i)
elif math.isnan(a):
base_value_indices.append(None)
non_broadcast_dimensions.append(i)
else:
# Can't check ...
raise ValueError(
f"Can't broadcast data with shape {value_common_shape} "
f"across shape {tuple(indices_shape)}"
)
# Translate chunks tuple to a set of array locations in product
# order
chunks = array.chunks
cumdims = [cached_cumsum(bds, initial_zero=True) for bds in chunks]
array_locations = [
[(s, s + dim) for s, dim in zip(starts, shapes)]
for starts, shapes in zip(cumdims, chunks)
]
array_locations = product(*array_locations)
# Get the dask keys of the most recent layer in the same order as
# the array locations.
in_keys = list(flatten(array.__dask_keys__()))
# Create a new "setitem" dask entry for each block in the array
dsk = {}
out_name = (out_name,)
for in_key, locations in zip(in_keys, array_locations):
# Now loop round each block dimension.
#
# If the block overlaps the indices then set the following
# (which will be used to define a new dask entry):
#
# block_indices: The indices that will be used to assign to
# this block.
#
# block_indices_shape: The shape implied by block_indices.
#
# block_preceeding_sizes: How many assigned elements precede
# this block along each dimension that
# doesn't have an integer. It is
# assumed that a slice will have a
# positive step, as will be the case
# for reformatted indices. `None` is
# used for dimensions with 1-d integer
# arrays.
block_indices = []
block_indices_shape = []
block_preceeding_sizes = []
local_offset = offset
# Assume, until demonstrated otherwise, that this block
# overlaps the assignment indices.
overlaps = True
# Note which dimension, if any, has 1-d integer array index
dim_1d_int_index = None
j = -1
for dim, (index, full_size, (loc0, loc1)) in enumerate(
zip(
indices,
array_shape,
locations,
)
):
j += 1
integer_index = isinstance(index, int)
if isinstance(index, slice):
# Index is a slice
stop = loc1 - loc0
if index.stop < loc1:
stop -= loc1 - index.stop
start = index.start - loc0
if start < 0:
# Make start positive
start %= index.step
if start >= stop:
# This block does not overlap the slice index
overlaps = False
break
step = index.step
block_index = slice(start, stop, step)
block_index_size, rem = divmod(stop - start, step)
if rem:
block_index_size += 1
pre = index.indices(loc0)
n_preceeding, rem = divmod(pre[1] - pre[0], step)
if rem:
n_preceeding += 1
elif integer_index:
# Index is an integer
local_offset += 1
if not loc0 <= index < loc1:
# This block does not overlap the integer index
overlaps = False
break
block_index = index - loc0
else:
# Index is a 1-d array
is_bool = index.dtype == bool
block_index = block_index_from_1d_index(dim, loc0, loc1, is_bool)
if is_bool:
block_index_size = block_index_shape_from_1d_bool_index(
dim, loc0, loc1
)
n_preceeding = n_preceeding_from_1d_bool_index(dim, loc0)
else:
block_index_size = None
n_preceeding = None
dim_1d_int_index = dim
loc0_loc1 = loc0, loc1
if not is_dask_collection(index) and not block_index.size:
# This block does not overlap the 1-d numpy array
# index
overlaps = False
break
# Note: When the 1-d array index is a dask array then
# we can't tell if this block overlaps it, so we
# assume that it does. If it in fact doesn't
# overlap then the part of the assignment value
# that cooresponds to this block will have zero
# size which, at compute time, will indicate to
# the `setitem` function to pass the block
# through unchanged.
# Still here? This block overlaps the index for this
# dimension.
block_indices.append(block_index)
if not integer_index:
block_indices_shape.append(block_index_size)
block_preceeding_sizes.append(n_preceeding)
# The new dask key
out_key = out_name + in_key[1:]
if not overlaps:
# This block does not overlap the indices for all
# dimensions, so pass the block through unchanged.
dsk[out_key] = in_key
continue
# Still here? Then this block overlaps the indices for all
# dimensions and so needs to have some of its elements
# assigned.
# Initialise the indices of the assignment value that define
# the parts of it which are to be assigned to this block
value_indices = base_value_indices[:]
for i in non_broadcast_dimensions:
j = i + offset
if j == dim_1d_int_index:
# Index is a 1-d integer array
#
# Define index in the current namespace for use in
# `value_indices_from_1d_int_index`
index = indices[j]
value_indices[i] = value_indices_from_1d_int_index(
dim_1d_int_index,
value_shape[i + value_offset],
*loc0_loc1,
)
else:
# Index is a slice or 1-d boolean array
start = block_preceeding_sizes[j]
value_indices[i] = slice(start, start + block_indices_shape[j])
# If required as a consequence of reformatting any slice
# objects of the original indices to have a positive steps,
# reverse the indices to assignment value.
for i in reverse:
size = value_common_shape[i]
start, stop, step = value_indices[i].indices(size)
size -= 1
start = size - start
stop = size - stop
if stop < 0:
stop = None
value_indices[i] = slice(start, stop, -1)
if value_ndim > len(indices):
# The assignment value has more dimensions than array, so
# add a leading Ellipsis to the indices of value.
value_indices.insert(0, Ellipsis)
# Create the part of the full assignment value that is to be
# assigned to elements of this block and make sure that it has
# just one chunk (so we can represent it with a single key in
# the argument list of setitem).
v = value[tuple(value_indices)]
v = concatenate_array_chunks(v)
v_key = next(flatten(v.__dask_keys__()))
# Insert into the output dask dictionary the dask of the part
# of assignment value for this block (not minding when we
# overwrite any existing keys as the values will be the same).
dsk = merge(dict(v.dask), dsk)
# Define the assignment function for this block.
dsk[out_key] = (setitem, in_key, v_key, block_indices)
block_index_from_1d_index.cache_clear()
block_index_shape_from_1d_bool_index.cache_clear()
n_preceeding_from_1d_bool_index.cache_clear()
value_indices_from_1d_int_index.cache_clear()
return dsk
|
def setitem_array(out_name, array, indices, value):
"""Master function for array assignment.
This function, that is intended to be called by
`Array.__setitem__`, creates a new dask that assigns values to
each block that is touched by the indices, leaving other blocks
unchanged.
Each block that overlaps the indices is assigned from the
approriate part of the assignment value. The dasks of these value
parts are included in the output dask dictionary, as are the dasks
of any 1-d dask array indices. This ensures that the dask array
assignment value and any dask array indices are not computed until
the `Array.__setitem__` operation is computed.
The part of the assignment value applies to block is created as a
"getitem" slice of the full asignment value.
Parameters
----------
out_name : `str`
The dask variable output name.
array : dask array
The dask array that is being assigned to.
indices : numpy-style indices
Indices to array defining the elements to be assigned.
value : dask array
The assignment value, i.e. the values which will be assigned
to elements of array.
Returns
-------
dsk : `dict`
A dictionary where the keys are new unique tokens for each
block of the form
(out_name, dim_index[, dim_index[, ...]])
and the values are either
(key,)
or
(setitem, key, v_key, block_indices)
where key is an existing top-level dask key of array.
The first case occurs when the block represented by key does
not overlap the indices.
The second case occurs when the block represented by key does
overlap the indices. setitem is the chunk assignment function;
v_key is the dask key of the the part of the assignment value
that corresponds to the block; and block_indices are the
assigment indices that apply to the block.
The dictionary also includes any additional key/value pairs
needed to define v_key, as well as any any additional
key/value pairs needed to define dask keys contained in the
block_indices list as references to dask array indices.
"""
@functools.lru_cache()
def block_index_from_1d_index(dim, loc0, loc1, is_bool):
"""The positions of index elements in the range values loc0 and loc1.
The index is the input assignment index that is defined in the
namespace of the caller. It is assumed that negative elements
of an integer array have already been posified.
The non-hashable dsk is the output dask dictionary that is
defined in the namespace of the caller.
Parameters
----------
dim : `int`
The dimension position of the index that is used as a proxy
for the non-hashable index to define the LRU cache key.
loc0 : `int`
The start index of the block along the dimension.
loc1 : `int`
The stop index of the block along the dimension.
is_bool : `bool`
Whether or not the index is of boolean data type.
Returns
-------
numpy array or `str`
If index is a numpy array then a numpy array is
returned.
If index is a dask array then the dask of the block index
is inserted into the output dask dictionary, and its
unique top-layer key is returned.
"""
if is_bool:
# Boolean array (dask or numpy)
i = index[loc0:loc1]
elif is_dask_collection(index):
# Integer dask array
#
# Check for values in [loc0,loc1).
#
# Use the 3-argument "where" to insert place-holder
# elements that will be searched for and removed in the
# `setitem` function at compute time. The place-holder
# value must be the size of the block, i.e. loc1-loc0. We
# can't use a 1-argument "where" here because that won't
# work if index has unknown chunk sizes.
i = np.where((loc0 <= index) & (index < loc1), index, loc1)
i -= loc0
else:
# Integer numpy array
#
# Check for positive values in [loc0,loc1).
i = np.where((loc0 <= index) & (index < loc1))[0]
i = index[i] - loc0
if is_dask_collection(i):
# Return dask key intead of dask array
i = concatenate_array_chunks(i)
dsk.update(dict(i.dask))
i = next(flatten(i.__dask_keys__()))
return i
@functools.lru_cache()
def block_index_shape_from_1d_bool_index(dim, loc0, loc1):
"""Number of True index elements between positions loc0 and loc1.
The index is the input assignment index that is defined in the
namespace of the caller.
Parameters
----------
dim : `int`
The dimension position of the index that is used as a proxy
for the non-hashable index to define the LRU cache key.
loc0 : `int`
The start index of the block along the dimension.
loc1 : `int`
The stop index of the block along the dimension.
Returns
-------
numpy array or dask array
If index is a numpy array then a numpy array is
returned.
If index is dask array then a dask array is returned.
"""
return np.sum(index[loc0:loc1])
@functools.lru_cache()
def n_preceeding_from_1d_bool_index(dim, loc0):
"""Number of True index elements preceeding position loc0.
The index is the input assignment index that is defined in the
namespace of the caller.
Parameters
----------
dim : `int`
The dimension position of the index that is used as a proxy
for the non-hashable index to define the LRU cache key.
loc0 : `int`
The start index of the block along the dimension.
Returns
-------
numpy array or dask array
If index is a numpy array then a numpy array is
returned.
If index is dask array then a dask array is returned.
"""
return np.sum(index[:loc0])
@functools.lru_cache()
def value_indices_from_1d_int_index(dim, vsize, loc0, loc1):
"""Value indices for index elements between loc0 and loc1.
The index is the input assignment index that is defined in the
namespace of the caller. It is assumed that negative elements
have already been posified.
Parameters
----------
dim : `int`
The dimension position of the index that is used as a proxy
for the non-hashable index to define the LRU cache key.
vsize : `int`
The full size of the dimension of the assignment value.
loc0 : `int`
The start index of the block along the dimension.
loc1 : `int`
The stop index of the block along the dimension.
Returns
-------
numpy array or dask array
If index is a numpy array then a numpy array is
returned.
If index is dask array then a dask array is returned.
"""
# Check for values in [loc0,loc1)
if is_dask_collection(index):
if math.isnan(index.size):
# Integer dask array with unknown size.
#
# The 1-argument "where" won't work, so use the
# 3-argument "where" and convert to a boolean
# array. We concatenate the resulting boolean index
# and set the chunk size (which must be the full size
# of the dimension of the assignment value) which
# allows the returned array to be used as a
# __getitem__ index to the assignment value.
i = np.where((loc0 <= index) & (index < loc1), True, False)
i = concatenate_array_chunks(i)
i._chunks = ((vsize,),)
else:
# Integer dask array with known size
i = np.where((loc0 <= index) & (index < loc1))[0]
i = concatenate_array_chunks(i)
else:
# Integer numpy array.
i = np.where((loc0 <= index) & (index < loc1))[0]
return i
from ..core import flatten
array_shape = array.shape
value_shape = value.shape
value_ndim = len(value_shape)
# Reformat input indices
indices, indices_shape, reverse = parse_assignment_indices(indices, array_shape)
# Empty slices can only be assigned size 1 values
if 0 in indices_shape and value_shape and max(value_shape) > 1:
raise ValueError(
f"shape mismatch: value array of shape {value_shape} "
"could not be broadcast to indexing result "
f"of shape {tuple(indices_shape)}"
)
# Set variables needed when creating the part of the assignment
# value that applies to each block.
#
# offset: The additive offset to the assignment value dimension
# positions that results in the positions of the
# corresponding dimensions in the array. offset is a
# non-negative integer, and a positive value means that
# the array has more dimensions than the assignment
# value.
#
# value_offset: The additive offset to the array dimension
# positions that results in the positions of the
# corresponding dimensions in the assignment
# value. value_offset is a non-negative integer,
# and a positive value means that the assignment
# value has more dimensions than the array.
#
# For example:
#
# array.shape value.shape offset value_offset
# ------------ ------------ ------ ------------
# (3, 4) (3, 4) 0 0
# (1, 1, 3, 4) (3, 4) 2 0
# (3, 4) (1, 1, 3, 4) 0 2
# ------------ ------------ ------ ------------
#
# array_common_shape: The shape of those dimensions of array
# which correspond to dimensions of the
# assignment value.
#
# value_common_shape: The shape of those dimensions of the
# assignment value which correspond to
# dimensions of the array.
#
# base_value_indices: The indices used for initialising the
# selection of the part of the assignment
# value that applies to each block of
# array. An element of `None` will end up
# being replaced by an appropriate slice on a
# block-by-block basis.
#
# non_broadcast_dimensions: The integer positions of
# array_common_shape which do not
# correspond to broadcast dimensions in
# the assignment value.
#
# Note that array_common_shape and value_common_shape may be
# different if there are any size 1 dimensions being brodacast.
offset = len(indices_shape) - value_ndim
if offset >= 0:
# The array has the same number or more dimensions than the
# assignment value
array_common_shape = indices_shape[offset:]
value_common_shape = value_shape
value_offset = 0
reverse = [i - offset for i in reverse if i >= offset]
else:
# The assigmment value has more dimensions than the array
value_offset = -offset
array_common_shape = indices_shape
value_common_shape = value_shape[value_offset:]
offset = 0
# All of the extra leading dimensions must have size 1
if value_shape[:value_offset] != (1,) * value_offset:
raise ValueError(
"could not broadcast input array from shape"
f"{value_shape} into shape {tuple(indices_shape)}"
)
base_value_indices = []
non_broadcast_dimensions = []
for i, (a, b) in enumerate(zip(array_common_shape, value_common_shape)):
if b == 1:
base_value_indices.append(slice(None))
elif a == b:
base_value_indices.append(None)
non_broadcast_dimensions.append(i)
elif math.isnan(a):
base_value_indices.append(None)
non_broadcast_dimensions.append(i)
else:
# Can't check ...
raise ValueError(
f"Can't broadcast data with shape {value_common_shape} "
f"across shape {tuple(indices_shape)}"
)
# Translate chunks tuple to a set of array locations in product
# order
chunks = array.chunks
cumdims = [cached_cumsum(bds, initial_zero=True) for bds in chunks]
array_locations = [
[(s, s + dim) for s, dim in zip(starts, shapes)]
for starts, shapes in zip(cumdims, chunks)
]
array_locations = product(*array_locations)
# Get the dask keys of the most recent layer in the same order as
# the array locations.
in_keys = list(flatten(array.__dask_keys__()))
# Create a new "setitem" dask entry for each block in the array
dsk = {}
out_name = (out_name,)
for in_key, locations in zip(in_keys, array_locations):
# Now loop round each block dimension.
#
# If the block overlaps the indices then set the following
# (which will be used to define a new dask entry):
#
# block_indices: The indices that will be used to assign to
# this block.
#
# block_indices_shape: The shape implied by block_indices.
#
# block_preceeding_sizes: How many assigned elements precede
# this block along each dimension that
# doesn't have an integer. It is
# assumed that a slice will have a
# positive step, as will be the case
# for reformatted indices. `None` is
# used for dimensions with 1-d integer
# arrays.
block_indices = []
block_indices_shape = []
block_preceeding_sizes = []
local_offset = offset
# Assume, until demonstrated otherwise, that this block
# overlaps the assignment indices.
overlaps = True
# Note which dimension, if any, has 1-d integer array index
dim_1d_int_index = None
for dim, (index, full_size, (loc0, loc1)) in enumerate(
zip(
indices,
array_shape,
locations,
)
):
integer_index = isinstance(index, int)
if isinstance(index, slice):
# Index is a slice
stop = loc1 - loc0
if index.stop < loc1:
stop -= loc1 - index.stop
start = index.start - loc0
if start < 0:
# Make start positive
start %= index.step
if start >= stop:
# This block does not overlap the slice index
overlaps = False
break
step = index.step
block_index = slice(start, stop, step)
block_index_size, rem = divmod(stop - start, step)
if rem:
block_index_size += 1
pre = index.indices(loc0)
n_preceeding, rem = divmod(pre[1] - pre[0], step)
if rem:
n_preceeding += 1
elif integer_index:
# Index is an integer
local_offset += 1
if not loc0 <= index < loc1:
# This block does not overlap the integer index
overlaps = False
break
block_index = index - loc0
else:
# Index is a 1-d array
is_bool = index.dtype == bool
block_index = block_index_from_1d_index(dim, loc0, loc1, is_bool)
if is_bool:
block_index_size = block_index_shape_from_1d_bool_index(
dim, loc0, loc1
)
n_preceeding = n_preceeding_from_1d_bool_index(dim, loc0)
else:
block_index_size = None
n_preceeding = None
dim_1d_int_index = dim
loc0_loc1 = loc0, loc1
if not is_dask_collection(index) and not block_index.size:
# This block does not overlap the 1-d numpy array
# index
overlaps = False
break
# Note: When the 1-d array index is a dask array then
# we can't tell if this block overlaps it, so we
# assume that it does. If it in fact doesn't
# overlap then the part of the assignment value
# that cooresponds to this block will have zero
# size which, at compute time, will indicate to
# the `setitem` function to pass the block
# through unchanged.
# Still here? This block overlaps the index for this
# dimension.
block_indices.append(block_index)
if not integer_index:
block_indices_shape.append(block_index_size)
block_preceeding_sizes.append(n_preceeding)
# The new dask key
out_key = out_name + in_key[1:]
if not overlaps:
# This block does not overlap the indices for all
# dimensions, so pass the block through unchanged.
dsk[out_key] = in_key
continue
# Still here? Then this block overlaps the indices for all
# dimensions and so needs to have some of its elements
# assigned.
# Initialise the indices of the assignment value that define
# the parts of it which are to be assigned to this block
value_indices = base_value_indices[:]
for i in non_broadcast_dimensions:
j = i + offset
if j == dim_1d_int_index:
# Index is a 1-d integer array
#
# Define index in the current namespace for use in
# `value_indices_from_1d_int_index`
index = indices[j]
value_indices[i] = value_indices_from_1d_int_index(
dim_1d_int_index,
value_shape[i + value_offset],
*loc0_loc1,
)
else:
# Index is a slice or 1-d boolean array
start = block_preceeding_sizes[j]
value_indices[i] = slice(start, start + block_indices_shape[j])
# If required as a consequence of reformatting any slice
# objects of the original indices to have a positive steps,
# reverse the indices to assignment value.
for i in reverse:
size = value_common_shape[i]
start, stop, step = value_indices[i].indices(size)
size -= 1
start = size - start
stop = size - stop
if stop < 0:
stop = None
value_indices[i] = slice(start, stop, -1)
if value_ndim > len(indices):
# The assignment value has more dimensions than array, so
# add a leading Ellipsis to the indices of value.
value_indices.insert(0, Ellipsis)
# Create the part of the full assignment value that is to be
# assigned to elements of this block and make sure that it has
# just one chunk (so we can represent it with a single key in
# the argument list of setitem).
v = value[tuple(value_indices)]
v = concatenate_array_chunks(v)
v_key = next(flatten(v.__dask_keys__()))
# Insert into the output dask dictionary the dask of the part
# of assignment value for this block (not minding when we
# overwrite any existing keys as the values will be the same).
dsk = merge(dict(v.dask), dsk)
# Define the assignment function for this block.
dsk[out_key] = (setitem, in_key, v_key, block_indices)
block_index_from_1d_index.cache_clear()
block_index_shape_from_1d_bool_index.cache_clear()
n_preceeding_from_1d_bool_index.cache_clear()
value_indices_from_1d_int_index.cache_clear()
return dsk
|
21,958 |
def seq_closurePhase(SLC_list, date12_list_all, ifgram_stack, ref_phase, n, box):
"""
Input parameters:
SLC_list : list of SLC dates
date12_list_all: date12 of all the interferograms stored in the ifgramstack file
ifgram_stack: stack file
refphase : reference phase
n : connection level of the closure phase
box : bounding box for the patch
Output: cp_w : stack of wrapped sequential closure phases of connection n
"""
cp_idx = []
NSLC = len(SLC_list)
for i in range(NSLC-n):
ifgram = []
flag = True
for j in range(n):
ifgram.append('{}_{}'.format(SLC_list[i+j],SLC_list[i+j+1]))
ifgram.append('{}_{}'.format(SLC_list[i],SLC_list[i+n]))
for ifgram_name in ifgram:
if ifgram_name not in date12_list_all:
flag = False # if missing an interferogram, we won't make the corresponding closure phase
if flag:
cp_idx.append([date12_list_all.index(ifgram[j]) for j in range(n+1)])
cp_idx = np.array(cp_idx, np.int16)
cp_idx = np.unique(cp_idx, axis = 0)
num_cp = len(cp_idx)
print('Number of closure measurements expected, ', len(SLC_list)-n)
print('Number of closure measurements found, ', num_cp)
if num_cp < len(SLC_list)-n:
print('Missing interferograms, abort')
raise Exception("Some interferograms are missing")
box_width = box[2] - box[0]
box_length = box[3] - box[1]
phase = readfile.read(ifgram_stack, box=box,print_msg=False)[0]
cp_w = np.zeros((num_cp, box_length, box_width), np.float32)
for i in range(num_cp):
cp0_w = np.zeros ((box_length, box_width), np.float32)
for j in range(n):
idx = cp_idx[i,j]
cp0_w = cp0_w + phase[idx,:,:] - ref_phase[idx]
idx = cp_idx[i,n]
cp0_w = cp0_w - (phase[idx,:,:]-ref_phase[idx])
cp_w[i,:,:] = np.angle(np.exp(1j*cp0_w))
return cp_w
|
def seq_closurePhase(SLC_list, date12_list_all, ifgram_stack, ref_phase, n, box):
"""
Input parameters:
SLC_list : list of SLC dates
date12_list_all: date12 of all the interferograms stored in the ifgramstack file
ifgram_stack: stack file
refphase : reference phase
n : connection level of the closure phase
box : bounding box for the patch
Output: cp_w : stack of wrapped sequential closure phases of connection n
"""
cp_idx = []
NSLC = len(SLC_list)
for i in range(NSLC-n):
ifgram = []
flag = True
for j in range(n):
ifgram.append('{}_{}'.format(SLC_list[i+j],SLC_list[i+j+1]))
ifgram.append('{}_{}'.format(SLC_list[i],SLC_list[i+n]))
for ifgram_name in ifgram:
if ifgram_name not in date12_list_all:
flag = False # if missing an interferogram, we won't make the corresponding closure phase
if flag:
cp_idx.append([date12_list_all.index(ifgram[j]) for j in range(n+1)])
cp_idx = np.array(cp_idx, np.int16)
cp_idx = np.unique(cp_idx, axis = 0)
num_cp = len(cp_idx)
print('Number of closure measurements expected, ', len(SLC_list)-n)
print('Number of closure measurements found, ', num_cp)
if num_cp < nslc-n:
print('Missing interferograms, abort')
raise Exception("Some interferograms are missing")
box_width = box[2] - box[0]
box_length = box[3] - box[1]
phase = readfile.read(ifgram_stack, box=box,print_msg=False)[0]
cp_w = np.zeros((num_cp, box_length, box_width), np.float32)
for i in range(num_cp):
cp0_w = np.zeros ((box_length, box_width), np.float32)
for j in range(n):
idx = cp_idx[i,j]
cp0_w = cp0_w + phase[idx,:,:] - ref_phase[idx]
idx = cp_idx[i,n]
cp0_w = cp0_w - (phase[idx,:,:]-ref_phase[idx])
cp_w[i,:,:] = np.angle(np.exp(1j*cp0_w))
return cp_w
|
39,872 |
def test_coexisting_configurations(click_runner,
custom_filepath,
mock_primary_registry_filepath,
testerchain):
# Parse node addresses
deployer, alice, ursula, another_ursula, *all_yall = testerchain.interface.w3.eth.accounts
envvars = {'NUCYPHER_KEYRING_PASSWORD': INSECURE_DEVELOPMENT_PASSWORD,
'NUCYPHER_MINER_ESCROW_SECRET': INSECURE_DEVELOPMENT_PASSWORD,
'NUCYPHER_POLICY_MANAGER_SECRET': INSECURE_DEVELOPMENT_PASSWORD,
'NUCYPHER_USER_ESCROW_PROXY_SECRET': INSECURE_DEVELOPMENT_PASSWORD,
'NUCYPHER_FELIX_DB_SECRET': INSECURE_DEVELOPMENT_PASSWORD}
# Future configuration filepaths for assertions...
public_keys_dir = os.path.join(custom_filepath, 'keyring', 'public')
known_nodes_dir = os.path.join(custom_filepath, 'known_nodes')
# ... Ensure they do not exist to begin with.
assert not os.path.isdir(public_keys_dir)
assert not os.path.isfile(known_nodes_dir)
# Deploy contracts
deploy_args = ('contracts',
'--registry-outfile', mock_primary_registry_filepath,
'--provider-uri', TEST_PROVIDER_URI,
'--deployer-address', deployer,
'--config-root', custom_filepath,
'--poa')
result = click_runner.invoke(deploy.deploy, deploy_args, input='Y', catch_exceptions=False, env=envvars)
assert result.exit_code == 0
# No keys have been generated...
with pytest.raises(FileNotFoundError):
assert len(os.listdir(public_keys_dir)) == 0
# No known nodes exist...
with pytest.raises(FileNotFoundError):
assert len(os.listdir(known_nodes_dir)) == 0
# Just the configuration root...
assert os.path.isdir(custom_filepath)
# and the fresh registry.
assert os.path.isfile(mock_primary_registry_filepath)
#
# Create
#
# Expected config files
felix_file_location = os.path.join(custom_filepath, 'felix.config')
alice_file_location = os.path.join(custom_filepath, 'alice.config')
ursula_file_location = os.path.join(custom_filepath, 'ursula.config')
another_ursula_configuration_file_location = os.path.join(custom_filepath, f'ursula-{another_ursula[:6]}.config')
# Felix creates a system configuration
felix_init_args = ('felix', 'init',
'--config-root', custom_filepath,
'--network', TEMPORARY_DOMAIN,
'--provider-uri', TEST_PROVIDER_URI,
'--checksum-address', deployer,
'--registry-filepath', mock_primary_registry_filepath
)
result = click_runner.invoke(nucypher_cli, felix_init_args, catch_exceptions=False, env=envvars)
assert result.exit_code == 0
assert os.path.isfile(felix_file_location)
assert len(os.listdir(public_keys_dir)) == 3
# Use a custom local filepath to init an persistent Alice
alice_init_args = ('alice', 'init',
'--network', TEMPORARY_DOMAIN,
'--provider-uri', TEST_PROVIDER_URI,
'--checksum-address', alice,
'--registry-filepath', mock_primary_registry_filepath,
'--config-root', custom_filepath)
result = click_runner.invoke(nucypher_cli, alice_init_args, catch_exceptions=False, env=envvars)
assert result.exit_code == 0
assert os.path.isfile(alice_file_location)
assert len(os.listdir(public_keys_dir)) == 5
# Use the same local filepath to init an persistent Ursula
init_args = ('ursula', 'init',
'--network', TEMPORARY_DOMAIN,
'--provider-uri', TEST_PROVIDER_URI,
'--checksum-address', ursula,
'--rest-host', MOCK_IP_ADDRESS,
'--registry-filepath', mock_primary_registry_filepath,
'--config-root', custom_filepath)
result = click_runner.invoke(nucypher_cli, init_args, catch_exceptions=False, env=envvars)
assert result.exit_code == 0
assert len(os.listdir(public_keys_dir)) == 8
assert os.path.isfile(ursula_file_location)
# Use the same local filepath to init another persistent Ursula
init_args = ('ursula', 'init',
'--network', TEMPORARY_DOMAIN,
'--checksum-address', another_ursula,
'--rest-host', MOCK_IP_ADDRESS_2,
'--registry-filepath', mock_primary_registry_filepath,
'--provider-uri', TEST_PROVIDER_URI,
'--config-root', custom_filepath)
result = click_runner.invoke(nucypher_cli, init_args, catch_exceptions=False, env=envvars)
assert result.exit_code == 0
assert os.path.isfile(ursula_file_location)
assert os.path.isfile(another_ursula_configuration_file_location)
assert len(os.listdir(public_keys_dir)) == 11
#
# Destroy
#
another_ursula_destruction_args = ('ursula', 'destroy', '--force',
'--config-file', another_ursula_configuration_file_location)
result = click_runner.invoke(nucypher_cli, another_ursula_destruction_args, catch_exceptions=False, env=envvars)
assert result.exit_code == 0
assert len(os.listdir(public_keys_dir)) == 8
assert not os.path.isfile(another_ursula_configuration_file_location)
ursula_destruction_args = ('ursula', 'destroy', '--config-file', ursula_file_location)
result = click_runner.invoke(nucypher_cli, ursula_destruction_args, input='Y', catch_exceptions=False, env=envvars)
assert result.exit_code == 0
assert 'y/N' in result.output
assert len(os.listdir(public_keys_dir)) == 5
assert not os.path.isfile(ursula_file_location)
felix_destruction_args = ('alice', 'destroy', '--force', '--config-file', alice_file_location)
result = click_runner.invoke(nucypher_cli, felix_destruction_args, catch_exceptions=False, env=envvars)
assert result.exit_code == 0
assert len(os.listdir(public_keys_dir)) == 3
assert not os.path.isfile(alice_file_location)
felix_destruction_args = ('felix', 'destroy', '--force', '--config-file', felix_file_location)
result = click_runner.invoke(nucypher_cli, felix_destruction_args, catch_exceptions=False, env=envvars)
assert result.exit_code == 0
assert len(os.listdir(public_keys_dir)) == 0
assert not os.path.isfile(felix_file_location)
|
def test_coexisting_configurations(click_runner,
custom_filepath,
mock_primary_registry_filepath,
testerchain):
# Parse node addresses
deployer, alice, ursula, another_ursula, *all_yall = testerchain.interface.w3.eth.accounts
envvars = {'NUCYPHER_KEYRING_PASSWORD': INSECURE_DEVELOPMENT_PASSWORD,
'NUCYPHER_MINER_ESCROW_SECRET': INSECURE_DEVELOPMENT_PASSWORD,
'NUCYPHER_POLICY_MANAGER_SECRET': INSECURE_DEVELOPMENT_PASSWORD,
'NUCYPHER_USER_ESCROW_PROXY_SECRET': INSECURE_DEVELOPMENT_PASSWORD,
'NUCYPHER_FELIX_DB_SECRET': INSECURE_DEVELOPMENT_PASSWORD}
# Future configuration filepaths for assertions...
public_keys_dir = os.path.join(custom_filepath, 'keyring', 'public')
known_nodes_dir = os.path.join(custom_filepath, 'known_nodes')
# ... Ensure they do not exist to begin with.
assert not os.path.isdir(public_keys_dir)
assert not os.path.isfile(known_nodes_dir)
# Deploy contracts
deploy_args = ('contracts',
'--registry-outfile', mock_primary_registry_filepath,
'--provider-uri', TEST_PROVIDER_URI,
'--deployer-address', deployer,
'--config-root', custom_filepath,
'--poa')
result = click_runner.invoke(deploy.deploy, deploy_args, input='Y', catch_exceptions=False, env=envvars)
assert result.exit_code == 0
# No keys have been generated...
with pytest.raises(FileNotFoundError):
assert len(os.listdir(public_keys_dir)) == 0
# No known nodes exist...
with pytest.raises(FileNotFoundError):
assert len(os.listdir(known_nodes_dir)) == 0
# Just the configuration root...
assert os.path.isdir(custom_filepath)
# and the fresh registry.
assert os.path.isfile(mock_primary_registry_filepath)
#
# Create
#
# Expected config files
felix_file_location = os.path.join(custom_filepath, 'felix.config')
alice_file_location = os.path.join(custom_filepath, 'alice.config')
ursula_file_location = os.path.join(custom_filepath, 'ursula.config')
another_ursula_configuration_file_location = os.path.join(custom_filepath, f'ursula-{another_ursula[:6]}.config')
# Felix creates a system configuration
felix_init_args = ('felix', 'init',
'--config-root', custom_filepath,
'--network', TEMPORARY_DOMAIN,
'--provider-uri', TEST_PROVIDER_URI,
'--checksum-address', deployer,
'--registry-filepath', mock_primary_registry_filepath
)
result = click_runner.invoke(nucypher_cli, felix_init_args, catch_exceptions=False, env=envvars)
assert result.exit_code == 0
assert os.path.isfile(felix_file_location)
assert len(os.listdir(public_keys_dir)) == 3
# Use a custom local filepath to init an persistent Alice
alice_init_args = ('alice', 'init',
'--network', TEMPORARY_DOMAIN,
'--provider-uri', TEST_PROVIDER_URI,
'--checksum-address', alice,
'--registry-filepath', mock_primary_registry_filepath,
'--config-root', custom_filepath)
result = click_runner.invoke(nucypher_cli, alice_init_args, catch_exceptions=False, env=envvars)
assert result.exit_code == 0
assert os.path.isfile(alice_file_location)
assert len(os.listdir(public_keys_dir)) == 5
# Use the same local filepath to init an persistent Ursula
init_args = ('ursula', 'init',
# Use the same local filepath to init a persistent Ursula
'--provider-uri', TEST_PROVIDER_URI,
'--checksum-address', ursula,
'--rest-host', MOCK_IP_ADDRESS,
'--registry-filepath', mock_primary_registry_filepath,
'--config-root', custom_filepath)
result = click_runner.invoke(nucypher_cli, init_args, catch_exceptions=False, env=envvars)
assert result.exit_code == 0
assert len(os.listdir(public_keys_dir)) == 8
assert os.path.isfile(ursula_file_location)
# Use the same local filepath to init another persistent Ursula
init_args = ('ursula', 'init',
'--network', TEMPORARY_DOMAIN,
'--checksum-address', another_ursula,
'--rest-host', MOCK_IP_ADDRESS_2,
'--registry-filepath', mock_primary_registry_filepath,
'--provider-uri', TEST_PROVIDER_URI,
'--config-root', custom_filepath)
result = click_runner.invoke(nucypher_cli, init_args, catch_exceptions=False, env=envvars)
assert result.exit_code == 0
assert os.path.isfile(ursula_file_location)
assert os.path.isfile(another_ursula_configuration_file_location)
assert len(os.listdir(public_keys_dir)) == 11
#
# Destroy
#
another_ursula_destruction_args = ('ursula', 'destroy', '--force',
'--config-file', another_ursula_configuration_file_location)
result = click_runner.invoke(nucypher_cli, another_ursula_destruction_args, catch_exceptions=False, env=envvars)
assert result.exit_code == 0
assert len(os.listdir(public_keys_dir)) == 8
assert not os.path.isfile(another_ursula_configuration_file_location)
ursula_destruction_args = ('ursula', 'destroy', '--config-file', ursula_file_location)
result = click_runner.invoke(nucypher_cli, ursula_destruction_args, input='Y', catch_exceptions=False, env=envvars)
assert result.exit_code == 0
assert 'y/N' in result.output
assert len(os.listdir(public_keys_dir)) == 5
assert not os.path.isfile(ursula_file_location)
felix_destruction_args = ('alice', 'destroy', '--force', '--config-file', alice_file_location)
result = click_runner.invoke(nucypher_cli, felix_destruction_args, catch_exceptions=False, env=envvars)
assert result.exit_code == 0
assert len(os.listdir(public_keys_dir)) == 3
assert not os.path.isfile(alice_file_location)
felix_destruction_args = ('felix', 'destroy', '--force', '--config-file', felix_file_location)
result = click_runner.invoke(nucypher_cli, felix_destruction_args, catch_exceptions=False, env=envvars)
assert result.exit_code == 0
assert len(os.listdir(public_keys_dir)) == 0
assert not os.path.isfile(felix_file_location)
|
4,245 |
def link_brains(brains, time=True, camera=False):
"""Plot multiple SourceEstimate objects with PyVista.
Parameters
----------
brains : list, tuple or np.ndarray
The collection of brains to plot.
time : bool
If True, link the time controllers. Defaults to True.
camera : bool
If True, link the camera controls. Defaults to True.
"""
from .backends.renderer import _get_3d_backend
if _get_3d_backend() != 'pyvista':
raise NotImplementedError("Expected 3d backend is pyvista but"
" {} was given.".format(_get_3d_backend()))
from ._brain import _Brain, _TimeViewer, _LinkViewer
if not isinstance(brains, Iterable):
brains = [brains]
if len(brains) == 0:
raise ValueError("The collection of brains is empty.")
for brain in brains:
if isinstance(brain, _Brain):
# check if the _TimeViewer wrapping is not already applied
if not hasattr(brain, 'time_viewer') or brain.time_viewer is None:
brain = _TimeViewer(brain)
else:
raise TypeError("Expected type is Brain but"
" {} was given.".format(type(brain)))
# link brains properties
_LinkViewer(brains, time, camera)
|
def link_brains(brains, time=True, camera=False):
"""Plot multiple SourceEstimate objects with PyVista.
Parameters
----------
brains : list, tuple or np.ndarray
The collection of brains to plot.
time : bool
If True, link the time controllers. Defaults to True.
camera : bool
If True, link the camera controls. Defaults to False.
"""
from .backends.renderer import _get_3d_backend
if _get_3d_backend() != 'pyvista':
raise NotImplementedError("Expected 3d backend is pyvista but"
" {} was given.".format(_get_3d_backend()))
from ._brain import _Brain, _TimeViewer, _LinkViewer
if not isinstance(brains, Iterable):
brains = [brains]
if len(brains) == 0:
raise ValueError("The collection of brains is empty.")
for brain in brains:
if isinstance(brain, _Brain):
# check if the _TimeViewer wrapping is not already applied
if not hasattr(brain, 'time_viewer') or brain.time_viewer is None:
brain = _TimeViewer(brain)
else:
raise TypeError("Expected type is Brain but"
" {} was given.".format(type(brain)))
# link brains properties
_LinkViewer(brains, time, camera)
|
3,746 |
def deprecate_with_doc(msg):
"""
Returns new _Deprecate class object.
The object can be used to Issue a DeprecationWarning, by passing `func`
as arguement,this adds warning to `old_name`'s docstring, rebinds
``old_name.__name__`` and returns the new function object.
This function may also be used as a decorator.
See Also
--------
deprecate`
Parameters
----------
message : str
Additional explanation of the deprecation. Displayed in the
docstring after the warning.
Returns
-------
_Deprecate object : object
The _Deprecate class object.
Examples
--------
Note that ``olduint`` returns a value after printing DeprecationWarning
with msg:
>>>oldobj = np.deprecate_with_doc("Use np.int_ instead.")
>>>olduint = oldobj(np.uint)
>>>DeprecationWarning: `uint64` is deprecated! #may vary
... Use np.int_ instead.
>>>olduint(6)
>>>6
"""
return _Deprecate(message=msg)
|
def deprecate_with_doc(msg):
"""
Returns new _Deprecate class object.
The object can be used to Issue a DeprecationWarning, by passing `func`
as argument, this adds warning to `old_name`'s docstring, rebinds
``old_name.__name__`` and returns the new function object.
This function may also be used as a decorator.
See Also
--------
deprecate`
Parameters
----------
message : str
Additional explanation of the deprecation. Displayed in the
docstring after the warning.
Returns
-------
_Deprecate object : object
The _Deprecate class object.
Examples
--------
Note that ``olduint`` returns a value after printing DeprecationWarning
with msg:
>>>oldobj = np.deprecate_with_doc("Use np.int_ instead.")
>>>olduint = oldobj(np.uint)
>>>DeprecationWarning: `uint64` is deprecated! #may vary
... Use np.int_ instead.
>>>olduint(6)
>>>6
"""
return _Deprecate(message=msg)
|
42,755 |
def test_search_assets(rotkehlchen_api_server):
"""Test that searching for assets using a keyword works."""
response = requests.post(
api_url_for(
rotkehlchen_api_server,
'assetssearchresource',
),
json={
'value': 'Bitcoin',
'search_column': 'name',
'limit': 50,
'order_by_attributes': ['name'],
'ascending': [True],
},
)
result = assert_proper_response_with_result(response)
assert len(result) <= 50
for entry in result:
assert 'bitcoin' in entry['name'].lower()
assert_asset_result_order(data=result, is_ascending=True, order_field='name')
# use a different keyword
response = requests.post(
api_url_for(
rotkehlchen_api_server,
'assetssearchresource',
),
json={
'value': 'eth',
'search_column': 'symbol',
'limit': 10,
'order_by_attributes': ['symbol'],
'ascending': [False],
},
)
result = assert_proper_response_with_result(response)
assert len(result) <= 10
for entry in result:
assert 'eth' in entry['symbol'].lower()
assert_asset_result_order(data=result, is_ascending=False, order_field='symbol')
# check that searching for a non-existent asset returns nothing
response = requests.post(
api_url_for(
rotkehlchen_api_server,
'assetssearchresource',
),
json={
'value': 'idontexist',
'search_column': 'name',
'limit': 50,
'order_by_attributes': ['name'],
'ascending': [True],
},
)
result = assert_proper_response_with_result(response)
assert len(result) == 0
# use the return_exact_matches flag
response = requests.post(
api_url_for(
rotkehlchen_api_server,
'assetssearchresource',
),
json={
'value': 'ETH',
'search_column': 'symbol',
'limit': 10,
'return_exact_matches': True,
'order_by_attributes': ['name'],
'ascending': [False],
},
)
result = assert_proper_response_with_result(response)
assert len(result) == 3
assert any(['Ethereum' == entry['name'] for entry in result])
for entry in result:
assert entry['symbol'] == 'ETH'
assert_asset_result_order(data=result, is_ascending=False, order_field='name')
# check that treat_eth2_as_eth` setting is respected
# using the test above.
db = rotkehlchen_api_server.rest_api.rotkehlchen.data.db
with db.user_write() as cursor:
db.set_settings(cursor, ModifiableDBSettings(treat_eth2_as_eth=True))
response = requests.post(
api_url_for(
rotkehlchen_api_server,
'assetssearchresource',
),
json={
'value': 'ETH',
'search_column': 'symbol',
'limit': 10,
'return_exact_matches': True,
'order_by_attributes': ['name'],
'ascending': [True],
},
)
result = assert_proper_response_with_result(response)
assert len(result) == 2
assert any(['Ethereum' == entry['name'] for entry in result])
for entry in result:
assert entry['symbol'] == 'ETH'
assert entry['identifier'] != 'ETH2'
if entry['name'] != 'Binance-Peg Ethereum Token':
assert 'chain' not in entry
else:
assert entry['chain'] == 'binance'
assert_asset_result_order(data=result, is_ascending=True, order_field='name')
# search using a column that is not allowed
response = requests.post(
api_url_for(
rotkehlchen_api_server,
'assetssearchresource',
),
json={
'value': 'idontexist',
'search_column': 'identifier',
'limit': 50,
'order_by_attributes': ['name'],
'ascending': [True],
},
)
assert_error_response(response, contained_in_msg='Must be one of: name, symbol.')
# test that the chain column is included
response = requests.post(
api_url_for(
rotkehlchen_api_server,
'assetssearchresource',
),
json={
'value': 'DAI',
'search_column': 'symbol',
'limit': 10,
'return_exact_matches': True,
'order_by_attributes': ['name'],
'ascending': [True],
},
)
result = assert_proper_response_with_result(response)
assert {asset['chain'] for asset in result} == {'matic', 'optimism', 'ethereum', 'arbitrum', 'binance'} # noqa: E501
# check that using chain filter works.
response = requests.post(
api_url_for(
rotkehlchen_api_server,
'assetssearchresource',
),
json={
'value': 'DAI',
'search_column': 'symbol',
'limit': 50,
'chain': 'ethereum',
'order_by_attributes': ['name'],
'ascending': [True],
},
)
result = assert_proper_response_with_result(response)
assert 50 >= len(result) > 10
assert all(['ethereum' == entry['chain'] for entry in result])
for entry in result:
assert 'DAI' in entry['symbol']
assert_asset_result_order(data=result, is_ascending=True, order_field='name')
# check that using an unsupported chain fails
response = requests.post(
api_url_for(
rotkehlchen_api_server,
'assetssearchresource',
),
json={
'value': 'dai',
'search_column': 'symbol',
'limit': 50,
'chain': 'near',
'order_by_attributes': ['name'],
'ascending': [True],
},
)
assert_error_response(response, contained_in_msg='Failed to deserialize ChainID value near')
|
def test_search_assets(rotkehlchen_api_server):
"""Test that searching for assets using a keyword works."""
response = requests.post(
api_url_for(
rotkehlchen_api_server,
'assetssearchresource',
),
json={
'value': 'Bitcoin',
'search_column': 'name',
'limit': 50,
'order_by_attributes': ['name'],
'ascending': [True],
},
)
result = assert_proper_response_with_result(response)
assert len(result) <= 50
for entry in result:
assert 'bitcoin' in entry['name'].lower()
assert_asset_result_order(data=result, is_ascending=True, order_field='name')
# use a different keyword
response = requests.post(
api_url_for(
rotkehlchen_api_server,
'assetssearchresource',
),
json={
'value': 'eth',
'search_column': 'symbol',
'limit': 10,
'order_by_attributes': ['symbol'],
'ascending': [False],
},
)
result = assert_proper_response_with_result(response)
assert len(result) <= 10
for entry in result:
assert 'eth' in entry['symbol'].lower()
assert_asset_result_order(data=result, is_ascending=False, order_field='symbol')
# check that searching for a non-existent asset returns nothing
response = requests.post(
api_url_for(
rotkehlchen_api_server,
'assetssearchresource',
),
json={
'value': 'idontexist',
'search_column': 'name',
'limit': 50,
'order_by_attributes': ['name'],
'ascending': [True],
},
)
result = assert_proper_response_with_result(response)
assert len(result) == 0
# use the return_exact_matches flag
response = requests.post(
api_url_for(
rotkehlchen_api_server,
'assetssearchresource',
),
json={
'value': 'ETH',
'search_column': 'symbol',
'limit': 10,
'return_exact_matches': True,
'order_by_attributes': ['name'],
'ascending': [False],
},
)
result = assert_proper_response_with_result(response)
assert len(result) == 3
assert any(['Ethereum' == entry['name'] for entry in result])
for entry in result:
assert entry['symbol'] == 'ETH'
assert_asset_result_order(data=result, is_ascending=False, order_field='name')
# check that treat_eth2_as_eth` setting is respected
# using the test above.
db = rotkehlchen_api_server.rest_api.rotkehlchen.data.db
with db.user_write() as cursor:
db.set_settings(cursor, ModifiableDBSettings(treat_eth2_as_eth=True))
response = requests.post(
api_url_for(
rotkehlchen_api_server,
'assetssearchresource',
),
json={
'value': 'ETH',
'search_column': 'symbol',
'limit': 10,
'return_exact_matches': True,
'order_by_attributes': ['name'],
'ascending': [True],
},
)
result = assert_proper_response_with_result(response)
assert len(result) == 2
assert any(['Ethereum' == entry['name'] for entry in result])
for entry in result:
assert entry['symbol'] == 'ETH'
assert entry['identifier'] != 'ETH2'
if entry['name'] != 'Binance-Peg Ethereum Token':
assert 'chain' not in entry
else:
assert entry['chain'] == 'binance'
assert_asset_result_order(data=result, is_ascending=True, order_field='name')
# search using a column that is not allowed
response = requests.post(
api_url_for(
rotkehlchen_api_server,
'assetssearchresource',
),
json={
'value': 'idontexist',
'search_column': 'identifier',
'limit': 50,
'order_by_attributes': ['name'],
'ascending': [True],
},
)
assert_error_response(response, contained_in_msg='Must be one of: name, symbol.')
# test that the chain column is included
response = requests.post(
api_url_for(
rotkehlchen_api_server,
'assetssearchresource',
),
json={
'value': 'DAI',
'search_column': 'symbol',
'limit': 10,
'return_exact_matches': True,
'order_by_attributes': ['name'],
'ascending': [True],
},
)
result = assert_proper_response_with_result(response)
assert {asset['chain'] for asset in result} == {'matic', 'optimism', 'ethereum', 'arbitrum', 'binance'} # noqa: E501
# check that using chain filter works.
response = requests.post(
api_url_for(
rotkehlchen_api_server,
'assetssearchresource',
),
json={
'value': 'DAI',
'search_column': 'symbol',
'limit': 50,
'chain': 'ethereum',
'order_by_attributes': ['name'],
'ascending': [True],
},
)
result = assert_proper_response_with_result(response)
assert 50 >= len(result) > 10
assert all(['ethereum' == entry['chain'] and 'DAI' in entry['symbol'] for entry in result])
assert_asset_result_order(data=result, is_ascending=True, order_field='name')
# check that using an unsupported chain fails
response = requests.post(
api_url_for(
rotkehlchen_api_server,
'assetssearchresource',
),
json={
'value': 'dai',
'search_column': 'symbol',
'limit': 50,
'chain': 'near',
'order_by_attributes': ['name'],
'ascending': [True],
},
)
assert_error_response(response, contained_in_msg='Failed to deserialize ChainID value near')
|
30,787 |
def main() -> None:
"""
PARSE AND VALIDATE INTEGRATION PARAMS
"""
# Commands dict
commands = {
'pt-ssl-cert-search': ssl_cert_search_command,
'pt-get-pdns-details': get_pdns_details_command,
'pt-whois-search': pt_whois_search_command,
'pt-get-components': get_components_command,
'pt-get-trackers': get_trackers_command,
'pt-get-host-pairs': get_host_pairs_command
}
command = demisto.command()
LOG(f'Command being called is {command}')
try:
# Retrieve XSOAR params
base_url = demisto.params().get('url')
username = demisto.params().get('username')
secret = demisto.params().get('secret')
verify_certificate = not demisto.params().get('insecure', False)
proxy = demisto.params().get('proxy', False)
request_timeout = get_request_timeout()
# prepare client class object
client = Client(base_url=base_url, request_timeout=request_timeout, verify=verify_certificate, proxy=proxy,
auth=(username, secret))
# This is the call made when pressing the integration Test button.
if demisto.command() == 'test-module':
result = test_function(client)
demisto.results(result)
elif command in commands:
return_results(commands[command](client, demisto.args()))
# Log exceptions
except Exception as e:
demisto.error(traceback.format_exc()) # print the traceback
return_error(f'Error: {str(e)}')
|
def main() -> None:
"""
PARSE AND VALIDATE INTEGRATION PARAMS
"""
# Commands dict
commands = {
'pt-ssl-cert-search': ssl_cert_search_command,
'pt-get-pdns-details': get_pdns_details_command,
'pt-whois-search': pt_whois_search_command,
'pt-get-components': get_components_command,
'pt-get-trackers': get_trackers_command,
'pt-get-host-pairs': get_host_pairs_command
}
command = demisto.command()
demisto.info(f'Command being called is {command}')
try:
# Retrieve XSOAR params
base_url = demisto.params().get('url')
username = demisto.params().get('username')
secret = demisto.params().get('secret')
verify_certificate = not demisto.params().get('insecure', False)
proxy = demisto.params().get('proxy', False)
request_timeout = get_request_timeout()
# prepare client class object
client = Client(base_url=base_url, request_timeout=request_timeout, verify=verify_certificate, proxy=proxy,
auth=(username, secret))
# This is the call made when pressing the integration Test button.
if demisto.command() == 'test-module':
result = test_function(client)
demisto.results(result)
elif command in commands:
return_results(commands[command](client, demisto.args()))
# Log exceptions
except Exception as e:
demisto.error(traceback.format_exc()) # print the traceback
return_error(f'Error: {str(e)}')
|
40,129 |
def dnf_install_packages(*packages: str):
'''
Install packages on Fedora / RedHat / Cent systems.
:param packages: Iterable containing packages to install.
'''
log_current_packages(packages)
return _run_shell_command_raise_on_return_code(f"sudo dnf install -y {' '.join(packages)}", f"Error in installation of package(s) {' '.join(packages)}", True)
|
def dnf_install_packages(*packages: str):
'''
Install packages on Fedora / RedHat / Cent systems.
:param packages: Iterable containing packages to install.
'''
return _run_shell_command_raise_on_return_code(f'sudo dnf install -y {" ".join(packages)}', f'Error in installation of package(s) {" ".join(packages)}', True)
return _run_shell_command_raise_on_return_code(f"sudo dnf install -y {' '.join(packages)}", f"Error in installation of package(s) {' '.join(packages)}", True)
|
42,082 |
def _get_skipped_trial_numbers(
trials: List[FrozenTrial], used_param_names: Sequence[str]
) -> Set[int]:
"""Utility function for ``plot_parallel_coordinate``.
If trial's parameters does not contain a parameter in ``used_param_names``,
``plot_parallel_coordinate`` methods do not use such trails.
Args:
trials:
List of ``FrozenTrials``.
used_param_names:
The parameter names used in ``plot_parallel_coordinate``.
Returns:
A list of invalid trial numbers.
"""
skipped_trial_numbers = set()
for trial in trials:
for used_param in used_param_names:
if used_param not in trial.params.keys():
skipped_trial_numbers.add(trial.number)
break
return skipped_trial_numbers
|
def _get_skipped_trial_numbers(
trials: List[FrozenTrial], used_param_names: Sequence[str]
) -> Set[int]:
"""Utility function for ``plot_parallel_coordinate``.
If trial's parameters does not contain a parameter in ``used_param_names``,
``plot_parallel_coordinate`` methods do not use such trails.
Args:
trials:
List of ``FrozenTrials``.
used_param_names:
The parameter names used in ``plot_parallel_coordinate``.
Returns:
A set of invalid trial numbers.
"""
skipped_trial_numbers = set()
for trial in trials:
for used_param in used_param_names:
if used_param not in trial.params.keys():
skipped_trial_numbers.add(trial.number)
break
return skipped_trial_numbers
|
56,590 |
def plot_ppc(
data,
kind="kde",
alpha=None,
mean=True,
observed=True,
color="C0",
figsize=None,
textsize=None,
data_pairs=None,
var_names=None,
filter_vars=None,
coords=None,
flatten=None,
flatten_pp=None,
num_pp_samples=None,
random_seed=None,
jitter=None,
animated=False,
animation_kwargs=None,
legend=True,
ax=None,
backend=None,
backend_kwargs=None,
group="posterior",
show=None,
):
"""
Plot for posterior/prior predictive checks.
Parameters
----------
data: az.InferenceData object
InferenceData object containing the observed and posterior/prior predictive data.
kind: str
Type of plot to display (kde, cumulative, or scatter). Defaults to kde.
alpha: float
Opacity of posterior/prior predictive density curves.
Defaults to 0.2 for kind = kde and cumulative, for scatter defaults to 0.7
mean: bool
Whether or not to plot the mean posterior/prior predictive distribution. Defaults to True
mean: bool
Whether or not to plot the observed data. Defaults to True
color: str
Valid matplotlib color. Defaults to C0
figsize: tuple
Figure size. If None it will be defined automatically.
textsize: float
Text size scaling factor for labels, titles and lines. If None it will be
autoscaled based on figsize.
data_pairs: dict
Dictionary containing relations between observed data and posterior/prior predictive data.
Dictionary structure:
- key = data var_name
- value = posterior/prior predictive var_name
For example, `data_pairs = {'y' : 'y_hat'}`
If None, it will assume that the observed data and the posterior/prior
predictive data have the same variable name.
var_names: list of variable names
Variables to be plotted, if `None` all variable are plotted. Prefix the
variables by `~` when you want to exclude them from the plot.
filter_vars: {None, "like", "regex"}, optional, default=None
If `None` (default), interpret var_names as the real variables names. If "like",
interpret var_names as substrings of the real variables names. If "regex",
interpret var_names as regular expressions on the real variables names. A la
`pandas.filter`.
coords: dict
Dictionary mapping dimensions to selected coordinates to be plotted.
Dimensions without a mapping specified will include all coordinates for
that dimension. Defaults to including all coordinates for all
dimensions if None.
flatten: list
List of dimensions to flatten in observed_data. Only flattens across the coordinates
specified in the coords argument. Defaults to flattening all of the dimensions.
flatten_pp: list
List of dimensions to flatten in posterior_predictive/prior_predictive. Only flattens
across the coordinates specified in the coords argument. Defaults to flattening all
of the dimensions. Dimensions should match flatten excluding dimensions for data_pairs
parameters. If flatten is defined and flatten_pp is None, then `flatten_pp=flatten`.
num_pp_samples: int
The number of posterior/prior predictive samples to plot. For `kind` = 'scatter' and
`animation = False` if defaults to a maximum of 5 samples and will set jitter to 0.7
unless defined otherwise. Otherwise it defaults to all provided samples.
random_seed: int
Random number generator seed passed to numpy.random.seed to allow
reproducibility of the plot. By default, no seed will be provided
and the plot will change each call if a random sample is specified
by `num_pp_samples`.
jitter: float
If kind is "scatter", jitter will add random uniform noise to the height
of the ppc samples and observed data. By default 0.
animated: bool
Create an animation of one posterior/prior predictive sample per frame. Defaults to False.
Only works with matploblib backend.
To run animations inside a notebook you have to use the `nbAgg` matplotlib's backend.
Try with `%matplotlib notebook` or `%matplotlib nbAgg`. You can switch back to the
default matplotlib's backend with `%matplotlib inline` or `%matplotlib auto`.
If switching back and forth between matplotlib's backend, you may need to run twice the cell
with the animation.
If you experience problems rendering the animation try setting
`animation_kwargs({'blit':False}) or changing the matplotlib's backend (e.g. to TkAgg)
If you run the animation from a script write `ax, ani = az.plot_ppc(.)`
animation_kwargs : dict
Keywords passed to `animation.FuncAnimation`. Ignored with matploblib backend.
legend : bool
Add legend to figure. By default True.
ax: numpy array-like of matplotlib axes or bokeh figures, optional
A 2D array of locations into which to plot the densities. If not supplied, Arviz will create
its own array of plot areas (and return it).
backend: str, optional
Select plotting backend {"matplotlib","bokeh"}. Default "matplotlib".
backend_kwargs: bool, optional
These are kwargs specific to the backend being used. For additional documentation
check the plotting method of the backend.
group: {"prior", "posterior"}, optional
Specifies which InferenceData group should be plotted. Defaults to 'posterior'.
Other value can be 'prior'.
show: bool, optional
Call backend show function.
Returns
-------
axes: matplotlib axes or bokeh figures
Examples
--------
Plot the observed data KDE overlaid on posterior predictive KDEs.
.. plot::
:context: close-figs
>>> import arviz as az
>>> data = az.load_arviz_data('radon')
>>> az.plot_ppc(data, data_pairs={"y":"y"})
Plot the overlay with empirical CDFs.
.. plot::
:context: close-figs
>>> az.plot_ppc(data, kind='cumulative')
Use the coords and flatten parameters to plot selected variable dimensions
across multiple plots. We will now modify the dimension `obs_id` to contain
indicate the name of the county where the measure was taken. The change has to
be done on both ``posterior_predictive`` and ``observed_data`` groups, which is
why we will use :meth:`~arviz.InferenceData.map` to apply the same function to
both groups. Afterwards, we will select the counties to be plotted with the
``coords`` arg.
.. plot::
:context: close-figs
>>> obs_county = data.posterior["County"][data.constant_data["county_idx"]]
>>> data = data.assign_coords(obs_id=obs_county, groups="observed_vars")
>>> az.plot_ppc(data, coords={'obs_id': ['ANOKA', 'BELTRAMI']}, flatten=[])
Plot the overlay using a stacked scatter plot that is particularly useful
when the sample sizes are small.
.. plot::
:context: close-figs
>>> az.plot_ppc(data, kind='scatter', flatten=[],
>>> coords={'obs_id': ['AITKIN', 'BELTRAMI']})
Plot random posterior predictive sub-samples.
.. plot::
:context: close-figs
>>> az.plot_ppc(data, num_pp_samples=30, random_seed=7)
"""
if group not in ("posterior", "prior"):
raise TypeError("`group` argument must be either `posterior` or `prior`")
for groups in ("{}_predictive".format(group), "observed_data"):
if not hasattr(data, groups):
raise TypeError(
'`data` argument must have the group "{group}" for ppcplot'.format(group=groups)
)
if kind.lower() not in ("kde", "cumulative", "scatter"):
raise TypeError("`kind` argument must be either `kde`, `cumulative`, or `scatter`")
if data_pairs is None:
data_pairs = {}
if backend is None:
backend = rcParams["plot.backend"]
backend = backend.lower()
if backend == "bokeh":
if animated:
raise TypeError("Animation option is only supported with matplotlib backend.")
observed_data = data.observed_data
if group == "posterior":
predictive_dataset = data.posterior_predictive
elif group == "prior":
predictive_dataset = data.prior_predictive
if var_names is None:
var_names = list(observed_data.data_vars)
var_names = _var_names(var_names, observed_data, filter_vars)
pp_var_names = [data_pairs.get(var, var) for var in var_names]
pp_var_names = _var_names(pp_var_names, predictive_dataset, filter_vars)
if flatten_pp is None and flatten is None:
flatten_pp = list(predictive_dataset.dims.keys())
elif flatten_pp is None:
flatten_pp = flatten
if flatten is None:
flatten = list(observed_data.dims.keys())
if coords is None:
coords = {}
if random_seed is not None:
np.random.seed(random_seed)
total_pp_samples = predictive_dataset.sizes["chain"] * predictive_dataset.sizes["draw"]
if num_pp_samples is None:
if kind == "scatter" and not animated:
num_pp_samples = min(5, total_pp_samples)
else:
num_pp_samples = total_pp_samples
if (
not isinstance(num_pp_samples, Integral)
or num_pp_samples < 1
or num_pp_samples > total_pp_samples
):
raise TypeError(
"`num_pp_samples` must be an integer between 1 and "
+ "{limit}.".format(limit=total_pp_samples)
)
pp_sample_ix = np.random.choice(total_pp_samples, size=num_pp_samples, replace=False)
for key in coords.keys():
coords[key] = np.where(np.in1d(observed_data[key], coords[key]))[0]
obs_plotters = filter_plotters_list(
list(
xarray_var_iter(
observed_data.isel(coords),
skip_dims=set(flatten),
var_names=var_names,
combined=True,
)
),
"plot_ppc",
)
length_plotters = len(obs_plotters)
pp_plotters = [
tup
for _, tup in zip(
range(length_plotters),
xarray_var_iter(
predictive_dataset.isel(coords),
var_names=pp_var_names,
skip_dims=set(flatten_pp),
combined=True,
),
)
]
rows, cols = default_grid(length_plotters)
ppcplot_kwargs = dict(
ax=ax,
length_plotters=length_plotters,
rows=rows,
cols=cols,
figsize=figsize,
animated=animated,
obs_plotters=obs_plotters,
pp_plotters=pp_plotters,
predictive_dataset=predictive_dataset,
pp_sample_ix=pp_sample_ix,
kind=kind,
alpha=alpha,
color=color,
jitter=jitter,
textsize=textsize,
mean=mean,
observed=observed,
total_pp_samples=total_pp_samples,
legend=legend,
group=group,
animation_kwargs=animation_kwargs,
num_pp_samples=num_pp_samples,
backend_kwargs=backend_kwargs,
show=show,
)
# TODO: Add backend kwargs
plot = get_plotting_function("plot_ppc", "ppcplot", backend)
axes = plot(**ppcplot_kwargs)
return axes
|
def plot_ppc(
data,
kind="kde",
alpha=None,
mean=True,
observed=True,
color="C0",
figsize=None,
textsize=None,
data_pairs=None,
var_names=None,
filter_vars=None,
coords=None,
flatten=None,
flatten_pp=None,
num_pp_samples=None,
random_seed=None,
jitter=None,
animated=False,
animation_kwargs=None,
legend=True,
ax=None,
backend=None,
backend_kwargs=None,
group="posterior",
show=None,
):
"""
Plot for posterior/prior predictive checks.
Parameters
----------
data: az.InferenceData object
InferenceData object containing the observed and posterior/prior predictive data.
kind: str
Type of plot to display (kde, cumulative, or scatter). Defaults to kde.
alpha: float
Opacity of posterior/prior predictive density curves.
Defaults to 0.2 for kind = kde and cumulative, for scatter defaults to 0.7
mean: bool
Whether or not to plot the mean posterior/prior predictive distribution. Defaults to True
observed: bool, default True
Whether or not to plot the observed data.
color: str
Valid matplotlib color. Defaults to C0
figsize: tuple
Figure size. If None it will be defined automatically.
textsize: float
Text size scaling factor for labels, titles and lines. If None it will be
autoscaled based on figsize.
data_pairs: dict
Dictionary containing relations between observed data and posterior/prior predictive data.
Dictionary structure:
- key = data var_name
- value = posterior/prior predictive var_name
For example, `data_pairs = {'y' : 'y_hat'}`
If None, it will assume that the observed data and the posterior/prior
predictive data have the same variable name.
var_names: list of variable names
Variables to be plotted, if `None` all variable are plotted. Prefix the
variables by `~` when you want to exclude them from the plot.
filter_vars: {None, "like", "regex"}, optional, default=None
If `None` (default), interpret var_names as the real variables names. If "like",
interpret var_names as substrings of the real variables names. If "regex",
interpret var_names as regular expressions on the real variables names. A la
`pandas.filter`.
coords: dict
Dictionary mapping dimensions to selected coordinates to be plotted.
Dimensions without a mapping specified will include all coordinates for
that dimension. Defaults to including all coordinates for all
dimensions if None.
flatten: list
List of dimensions to flatten in observed_data. Only flattens across the coordinates
specified in the coords argument. Defaults to flattening all of the dimensions.
flatten_pp: list
List of dimensions to flatten in posterior_predictive/prior_predictive. Only flattens
across the coordinates specified in the coords argument. Defaults to flattening all
of the dimensions. Dimensions should match flatten excluding dimensions for data_pairs
parameters. If flatten is defined and flatten_pp is None, then `flatten_pp=flatten`.
num_pp_samples: int
The number of posterior/prior predictive samples to plot. For `kind` = 'scatter' and
`animation = False` if defaults to a maximum of 5 samples and will set jitter to 0.7
unless defined otherwise. Otherwise it defaults to all provided samples.
random_seed: int
Random number generator seed passed to numpy.random.seed to allow
reproducibility of the plot. By default, no seed will be provided
and the plot will change each call if a random sample is specified
by `num_pp_samples`.
jitter: float
If kind is "scatter", jitter will add random uniform noise to the height
of the ppc samples and observed data. By default 0.
animated: bool
Create an animation of one posterior/prior predictive sample per frame. Defaults to False.
Only works with matploblib backend.
To run animations inside a notebook you have to use the `nbAgg` matplotlib's backend.
Try with `%matplotlib notebook` or `%matplotlib nbAgg`. You can switch back to the
default matplotlib's backend with `%matplotlib inline` or `%matplotlib auto`.
If switching back and forth between matplotlib's backend, you may need to run twice the cell
with the animation.
If you experience problems rendering the animation try setting
`animation_kwargs({'blit':False}) or changing the matplotlib's backend (e.g. to TkAgg)
If you run the animation from a script write `ax, ani = az.plot_ppc(.)`
animation_kwargs : dict
Keywords passed to `animation.FuncAnimation`. Ignored with matploblib backend.
legend : bool
Add legend to figure. By default True.
ax: numpy array-like of matplotlib axes or bokeh figures, optional
A 2D array of locations into which to plot the densities. If not supplied, Arviz will create
its own array of plot areas (and return it).
backend: str, optional
Select plotting backend {"matplotlib","bokeh"}. Default "matplotlib".
backend_kwargs: bool, optional
These are kwargs specific to the backend being used. For additional documentation
check the plotting method of the backend.
group: {"prior", "posterior"}, optional
Specifies which InferenceData group should be plotted. Defaults to 'posterior'.
Other value can be 'prior'.
show: bool, optional
Call backend show function.
Returns
-------
axes: matplotlib axes or bokeh figures
Examples
--------
Plot the observed data KDE overlaid on posterior predictive KDEs.
.. plot::
:context: close-figs
>>> import arviz as az
>>> data = az.load_arviz_data('radon')
>>> az.plot_ppc(data, data_pairs={"y":"y"})
Plot the overlay with empirical CDFs.
.. plot::
:context: close-figs
>>> az.plot_ppc(data, kind='cumulative')
Use the coords and flatten parameters to plot selected variable dimensions
across multiple plots. We will now modify the dimension `obs_id` to contain
indicate the name of the county where the measure was taken. The change has to
be done on both ``posterior_predictive`` and ``observed_data`` groups, which is
why we will use :meth:`~arviz.InferenceData.map` to apply the same function to
both groups. Afterwards, we will select the counties to be plotted with the
``coords`` arg.
.. plot::
:context: close-figs
>>> obs_county = data.posterior["County"][data.constant_data["county_idx"]]
>>> data = data.assign_coords(obs_id=obs_county, groups="observed_vars")
>>> az.plot_ppc(data, coords={'obs_id': ['ANOKA', 'BELTRAMI']}, flatten=[])
Plot the overlay using a stacked scatter plot that is particularly useful
when the sample sizes are small.
.. plot::
:context: close-figs
>>> az.plot_ppc(data, kind='scatter', flatten=[],
>>> coords={'obs_id': ['AITKIN', 'BELTRAMI']})
Plot random posterior predictive sub-samples.
.. plot::
:context: close-figs
>>> az.plot_ppc(data, num_pp_samples=30, random_seed=7)
"""
if group not in ("posterior", "prior"):
raise TypeError("`group` argument must be either `posterior` or `prior`")
for groups in ("{}_predictive".format(group), "observed_data"):
if not hasattr(data, groups):
raise TypeError(
'`data` argument must have the group "{group}" for ppcplot'.format(group=groups)
)
if kind.lower() not in ("kde", "cumulative", "scatter"):
raise TypeError("`kind` argument must be either `kde`, `cumulative`, or `scatter`")
if data_pairs is None:
data_pairs = {}
if backend is None:
backend = rcParams["plot.backend"]
backend = backend.lower()
if backend == "bokeh":
if animated:
raise TypeError("Animation option is only supported with matplotlib backend.")
observed_data = data.observed_data
if group == "posterior":
predictive_dataset = data.posterior_predictive
elif group == "prior":
predictive_dataset = data.prior_predictive
if var_names is None:
var_names = list(observed_data.data_vars)
var_names = _var_names(var_names, observed_data, filter_vars)
pp_var_names = [data_pairs.get(var, var) for var in var_names]
pp_var_names = _var_names(pp_var_names, predictive_dataset, filter_vars)
if flatten_pp is None and flatten is None:
flatten_pp = list(predictive_dataset.dims.keys())
elif flatten_pp is None:
flatten_pp = flatten
if flatten is None:
flatten = list(observed_data.dims.keys())
if coords is None:
coords = {}
if random_seed is not None:
np.random.seed(random_seed)
total_pp_samples = predictive_dataset.sizes["chain"] * predictive_dataset.sizes["draw"]
if num_pp_samples is None:
if kind == "scatter" and not animated:
num_pp_samples = min(5, total_pp_samples)
else:
num_pp_samples = total_pp_samples
if (
not isinstance(num_pp_samples, Integral)
or num_pp_samples < 1
or num_pp_samples > total_pp_samples
):
raise TypeError(
"`num_pp_samples` must be an integer between 1 and "
+ "{limit}.".format(limit=total_pp_samples)
)
pp_sample_ix = np.random.choice(total_pp_samples, size=num_pp_samples, replace=False)
for key in coords.keys():
coords[key] = np.where(np.in1d(observed_data[key], coords[key]))[0]
obs_plotters = filter_plotters_list(
list(
xarray_var_iter(
observed_data.isel(coords),
skip_dims=set(flatten),
var_names=var_names,
combined=True,
)
),
"plot_ppc",
)
length_plotters = len(obs_plotters)
pp_plotters = [
tup
for _, tup in zip(
range(length_plotters),
xarray_var_iter(
predictive_dataset.isel(coords),
var_names=pp_var_names,
skip_dims=set(flatten_pp),
combined=True,
),
)
]
rows, cols = default_grid(length_plotters)
ppcplot_kwargs = dict(
ax=ax,
length_plotters=length_plotters,
rows=rows,
cols=cols,
figsize=figsize,
animated=animated,
obs_plotters=obs_plotters,
pp_plotters=pp_plotters,
predictive_dataset=predictive_dataset,
pp_sample_ix=pp_sample_ix,
kind=kind,
alpha=alpha,
color=color,
jitter=jitter,
textsize=textsize,
mean=mean,
observed=observed,
total_pp_samples=total_pp_samples,
legend=legend,
group=group,
animation_kwargs=animation_kwargs,
num_pp_samples=num_pp_samples,
backend_kwargs=backend_kwargs,
show=show,
)
# TODO: Add backend kwargs
plot = get_plotting_function("plot_ppc", "ppcplot", backend)
axes = plot(**ppcplot_kwargs)
return axes
|
20,483 |
def compare_xml_sets(old_records, new_records):
reprs = collections.defaultdict(list)
def match_updates(match_fields):
old_updated, new_updated = {}, {}
for column in copy.copy(old_records):
found_all = search(column, old_records, match_fields, True)
for found in found_all:
old_records.remove(found)
found['old'] = True
found['upd'] = True
found['domain'] = False
found['noupdate_switched'] = False
if found['name'] not in old_updated:
old_updated[found['name']] = []
old_updated[found['name']] += [found]
for column in copy.copy(new_records):
found_all = search(column, new_records, match_fields, True)
for found in found_all:
new_records.remove(found)
found['new'] = True
found['upd'] = True
found['domain'] = False
found['noupdate_switched'] = False
if found['name'] not in new_updated:
new_updated[found['name']] = []
new_updated[found['name']] += [found]
for old_name, old_found in old_updated.items():
for new_name, new_found in new_updated.items():
if old_name == new_name:
old_modules = [x['module'] for x in old_found]
new_modules = [x['module'] for x in new_found]
old_modules_str = ','.join([x for x in old_modules])
new_modules_str = ','.join([x for x in new_modules])
for found in old_found:
found['moved'] = new_modules_str
found['potent'] = True
for found in new_found:
found['moved'] = old_modules_str
found['potent'] = True
matched_records = list(old_updated.values()) + list(
new_updated.values())
matched_records = [y for x in matched_records for y in x]
return matched_records
def match(match_fields, match_type='direct'):
matched_records = []
for column in copy.copy(old_records):
found = search(column, new_records, match_fields)
if found:
old_records.remove(column)
new_records.remove(found)
if match_type != 'direct':
column['old'] = True
found['new'] = True
column[match_type] = found['module']
found[match_type] = column['module']
found['domain'] = column['domain'] != found['domain'] and \
column['domain'] != '[]' and found['domain'] is False
column['domain'] = False
column['noupdate_switched'] = False
found['noupdate_switched'] = \
column['noupdate'] != found['noupdate']
if match_type != 'direct':
matched_records.append(column)
matched_records.append(found)
elif (match_type == 'direct' and found['domain']) or \
found['noupdate_switched']:
matched_records.append(found)
return matched_records
# direct match
modified_records = match(['module', 'model', 'name'])
# unmatched updated records
updated_records = match_updates(['model', 'name'])
# other module, same full xmlid
moved_records = match(['model', 'name'], 'moved')
# other module, same suffix, other prefix
renamed_records = match(['model', 'suffix', 'other_prefix'], 'renamed')
for record in old_records:
record['old'] = True
record['domain'] = False
record['noupdate_switched'] = False
for record in new_records:
record['new'] = True
record['domain'] = False
record['noupdate_switched'] = False
sorted_records = sorted(
old_records + new_records + moved_records + renamed_records +
modified_records + updated_records,
key=lambda k: (k['model'], 'old' in k, k['name'])
)
for entry in sorted_records:
content = ''
if 'old' in entry:
label = 'DEL'
if 'upd' in entry:
label += '(UPD)'
entry['label'] = label
content = '%(label)s %(model)s: %(name)s' % entry
if 'moved' in entry:
potential = ''
if 'potent' in entry:
potential = 'potentially '
entry['potent'] = potential
content += ' [%(potent)smoved to %(moved)s module]' % entry
elif 'renamed' in entry:
content += ' [renamed to %(renamed)s module]' % entry
elif 'new' in entry:
label = 'NEW'
if 'upd' in entry:
label += '(UPD)'
entry['label'] = label
content = '%(label)s %(model)s: %(name)s' % entry
if 'moved' in entry:
potential = ''
if 'potent' in entry:
potential = 'potentially '
entry['potent'] = potential
content += ' [%(potent)smoved from %(moved)s module]' % entry
elif 'renamed' in entry:
content += ' [renamed from %(renamed)s module]' % entry
if 'old' not in entry and 'new' not in entry:
content = '%(model)s: %(name)s' % entry
if entry['domain']:
content += ' (deleted domain)'
if entry['noupdate']:
content += ' (noupdate)'
if entry['noupdate_switched']:
content += ' (noupdate switched)'
reprs[module_map(entry['module'])].append(content)
return reprs
|
def compare_xml_sets(old_records, new_records):
reprs = collections.defaultdict(list)
def match_updates(match_fields):
old_updated, new_updated = {}, {}
for column in copy.copy(old_records):
found_all = search(column, old_records, match_fields, True)
for found in found_all:
old_records.remove(found)
found['old'] = True
found['upd'] = True
found['domain'] = False
found['noupdate_switched'] = False
if found['name'] not in old_updated:
old_updated[found['name']] = []
old_updated[found['name']] += [found]
for column in copy.copy(new_records):
found_all = search(column, new_records, match_fields, True)
for found in found_all:
new_records.remove(found)
found['new'] = True
found['upd'] = True
found['domain'] = False
found['noupdate_switched'] = False
if found['name'] not in new_updated:
new_updated[found['name']] = []
new_updated[found['name']] += [found]
for old_name, old_found in old_updated.items():
for new_name, new_found in new_updated.items():
if old_name == new_name:
old_modules = [x['module'] for x in old_found]
new_modules = [x['module'] for x in new_found]
old_modules_str = ','.join([x for x in old_modules])
new_modules_str = ','.join([x for x in new_modules])
for found in old_found:
found['moved'] = new_modules_str
found['potent'] = True
for found in new_found:
found['moved'] = old_modules_str
found['potent'] = True
matched_records = list(old_updated.values()) + list(
new_updated.values())
matched_records = [y for x in matched_records for y in x]
return matched_records
def match(match_fields, match_type='direct'):
matched_records = []
for column in copy.copy(old_records):
found = search(column, new_records, match_fields)
if found:
old_records.remove(column)
new_records.remove(found)
if match_type != 'direct':
column['old'] = True
found['new'] = True
column[match_type] = found['module']
found[match_type] = column['module']
found['domain'] = column['domain'] != found['domain'] and \
column['domain'] != '[]' and found['domain'] is False
column['domain'] = False
column['noupdate_switched'] = False
found['noupdate_switched'] = \
column['noupdate'] != found['noupdate']
if match_type != 'direct':
matched_records.append(column)
matched_records.append(found)
elif (match_type == 'direct' and found['domain']) or \
found['noupdate_switched']:
matched_records.append(found)
return matched_records
# direct match
modified_records = match(['module', 'model', 'name'])
# unmatched updated records
updated_records = match_updates(['model', 'name'])
# other module, same full xmlid
moved_records = match(['model', 'name'], 'moved')
# other module, same suffix, other prefix
renamed_records = match(['model', 'suffix', 'other_prefix'], 'renamed')
for record in old_records:
record['old'] = True
record['domain'] = False
record['noupdate_switched'] = False
for record in new_records:
record['new'] = True
record['domain'] = False
record['noupdate_switched'] = False
sorted_records = sorted(
old_records + new_records + moved_records + renamed_records +
modified_records + updated_records,
key=lambda k: (k['model'], 'old' in k, k['name'])
)
for entry in sorted_records:
content = ''
if 'old' in entry:
label = 'DEL'
if 'upd' in entry:
label += '(UPD)'
entry['label'] = label
content = '%(label)s %(model)s: %(name)s' % entry
if 'moved' in entry:
potential = ''
if 'potent' in entry:
potential = 'potentially '
entry['potent'] = potential
content += ' [%(potent)smoved to %(moved)s module]' % entry
elif 'renamed' in entry:
content += ' [renamed to %(renamed)s module]' % entry
elif 'new' in entry:
label = 'NEW'
if 'upd' in entry:
label = 'UPD'
entry['label'] = label
content = '%(label)s %(model)s: %(name)s' % entry
if 'moved' in entry:
potential = ''
if 'potent' in entry:
potential = 'potentially '
entry['potent'] = potential
content += ' [%(potent)smoved from %(moved)s module]' % entry
elif 'renamed' in entry:
content += ' [renamed from %(renamed)s module]' % entry
if 'old' not in entry and 'new' not in entry:
content = '%(model)s: %(name)s' % entry
if entry['domain']:
content += ' (deleted domain)'
if entry['noupdate']:
content += ' (noupdate)'
if entry['noupdate_switched']:
content += ' (noupdate switched)'
reprs[module_map(entry['module'])].append(content)
return reprs
|
34,222 |
def create_dir(dir_path: Text) -> None:
"""Creates a directory and its super paths.
Succeeds even if the path already exists."""
try:
os.makedirs(dir_path)
except OSError as e:
# be happy if someone already created the path
if e.errno != errno.EEXIST:
raise
|
def create_directory(directory_path: Text) -> None:
"""Creates a directory and its super paths.
Succeeds even if the path already exists."""
try:
os.makedirs(dir_path)
except OSError as e:
# be happy if someone already created the path
if e.errno != errno.EEXIST:
raise
|
44,243 |
def test_intialization(self):
"""Test initialization for a SymbolicOp"""
base = TempOperator("a")
op = SymbolicOp(base, id="something")
assert op.base is base
assert op.hyperparameters["base"] is base
assert op.id == "something"
assert op.queue_idx is None
assert op.name == "Symbolic"
|
def test_intialization():
"""Test initialization for a SymbolicOp"""
base = TempOperator("a")
op = SymbolicOp(base, id="something")
assert op.base is base
assert op.hyperparameters["base"] is base
assert op.id == "something"
assert op.queue_idx is None
assert op.name == "Symbolic"
|
35,188 |
def _parse_gufunc_signature(signature):
# The code has been modifyed from dask to support optional dimensions
if type(signature) != str:
raise TypeError('Signature is not a string')
if signature == '' or signature is None:
raise ValueError('Signature cannot be empty')
signature = signature.replace(' ', '')
if not re.match(_SIGNATURE, signature):
raise ValueError('Not a valid gufunc signature: {}'.format(signature))
in_txt, out_txt = signature.split('->')
ins = [tuple(x.split(',')) if x != '' else ()
for x in in_txt[1:-1].split('),(')]
outs = [tuple(y.split(',')) if y != '' else ()
for y in out_txt[1:-1].split('),(')]
# TODO(ecastill) multiple output support
if len(outs) > 1:
raise ValueError('Currently more than 1 output is not supported')
outs = outs[0] if ((len(outs) == 1) and (out_txt[-1] != ',')) else outs
return ins, outs
|
def _parse_gufunc_signature(signature):
# The code has been modifyed from dask to support optional dimensions
if not isinstance(signature, str):
raise TypeError('Signature is not a string')
if signature == '' or signature is None:
raise ValueError('Signature cannot be empty')
signature = signature.replace(' ', '')
if not re.match(_SIGNATURE, signature):
raise ValueError('Not a valid gufunc signature: {}'.format(signature))
in_txt, out_txt = signature.split('->')
ins = [tuple(x.split(',')) if x != '' else ()
for x in in_txt[1:-1].split('),(')]
outs = [tuple(y.split(',')) if y != '' else ()
for y in out_txt[1:-1].split('),(')]
# TODO(ecastill) multiple output support
if len(outs) > 1:
raise ValueError('Currently more than 1 output is not supported')
outs = outs[0] if ((len(outs) == 1) and (out_txt[-1] != ',')) else outs
return ins, outs
|
4,402 |
def read_dig_dat(fname):
r"""Read electrode positions from a ``*.dat`` file.
.. Warning::
This function was implemented based on ``*.dat`` files available from
`Compumedics <https://compumedicsneuroscan.com/scan-acquire-
configuration-files/>`__ and might not work as expected with novel
files. If it does not read your files correctly please contact the
mne-python developers.
Parameters
----------
fname : path-like
File from which to read electrode locations.
Returns
-------
montage : DigMontage
The montage.
See Also
--------
read_dig_captrak
read_dig_dat
read_dig_egi
read_dig_fif
read_dig_hpts
read_dig_polhemus_isotrak
make_dig_montage
Notes
-----
``*.dat`` files are plain text files and can be inspected and amended with
a plain text editor.
"""
from ._standard_montage_utils import _check_dupes_odict
fname = _check_fname(fname, overwrite='read', must_exist=True)
with open(fname, 'r') as fid:
lines = fid.readlines()
ch_names, poss = list(), list()
nasion = lpa = rpa = None
for i, line in enumerate(lines):
items = line.split()
if not items:
continue
elif not (len(items) == 4 or len(items) == 5):
raise ValueError(
"Error reading %s, line %s has unexpected number of entries:\n"
"%s" % (fname, i, line.rstrip()))
if len(items) == 5:
num = items[1]
if num == '67':
continue # centroid
pos = np.array([float(item) for item in items[2:]])
if num == '78':
nasion = pos
elif num == '76':
lpa = pos
elif num == '82':
rpa = pos
else:
ch_names.append(items[0])
poss.append(pos)
elif len(items) == 4:
label = items[0]
if label == 'Centroid':
continue # centroid
pos = np.array([float(item) for item in items[1:]])
if label == 'Nasion':
nasion = pos
elif label == 'Left':
lpa = pos
elif label == 'Right':
rpa = pos
else:
ch_names.append(items[0])
poss.append(pos)
electrodes = _check_dupes_odict(ch_names, poss)
return make_dig_montage(electrodes, nasion, lpa, rpa)
|
def read_dig_dat(fname):
r"""Read electrode positions from a ``*.dat`` file.
.. Warning::
This function was implemented based on ``*.dat`` files available from
`Compumedics <https://compumedicsneuroscan.com/scan-acquire-
configuration-files/>`__ and might not work as expected with novel
files. If it does not read your files correctly please contact the
mne-python developers.
Parameters
----------
fname : path-like
File from which to read electrode locations.
Returns
-------
montage : DigMontage
The montage.
See Also
--------
read_dig_captrak
read_dig_dat
read_dig_egi
read_dig_fif
read_dig_hpts
read_dig_polhemus_isotrak
make_dig_montage
Notes
-----
``*.dat`` files are plain text files and can be inspected and amended with
a plain text editor.
"""
from ._standard_montage_utils import _check_dupes_odict
fname = _check_fname(fname, overwrite='read', must_exist=True)
with open(fname, 'r') as fid:
lines = fid.readlines()
ch_names, poss = list(), list()
nasion = lpa = rpa = None
for i, line in enumerate(lines):
items = line.split()
if not items:
continue
elif len(items) not in (4, 5):
raise ValueError(
"Error reading %s, line %s has unexpected number of entries:\n"
"%s" % (fname, i, line.rstrip()))
if len(items) == 5:
num = items[1]
if num == '67':
continue # centroid
pos = np.array([float(item) for item in items[2:]])
if num == '78':
nasion = pos
elif num == '76':
lpa = pos
elif num == '82':
rpa = pos
else:
ch_names.append(items[0])
poss.append(pos)
elif len(items) == 4:
label = items[0]
if label == 'Centroid':
continue # centroid
pos = np.array([float(item) for item in items[1:]])
if label == 'Nasion':
nasion = pos
elif label == 'Left':
lpa = pos
elif label == 'Right':
rpa = pos
else:
ch_names.append(items[0])
poss.append(pos)
electrodes = _check_dupes_odict(ch_names, poss)
return make_dig_montage(electrodes, nasion, lpa, rpa)
|
14,142 |
def _build_sig_attr_dict(signals):
if isinstance(signals, dict):
return signals
else:
return dict([(sig, sig) for sig in signals])
|
def _build_sig_attr_dict(signals):
if isinstance(signals, dict):
return signals
else:
return {sig: sig for sig in signals}
|
10,789 |
def _lower_parfor_parallel(lowerer, parfor):
"""Lowerer that handles LLVM code generation for parfor.
This function lowers a parfor IR node to LLVM.
The general approach is as follows:
1) The code from the parfor's init block is lowered normally
in the context of the current function.
2) The body of the parfor is transformed into a gufunc function.
3) Code is inserted into the main function that calls do_scheduling
to divide the iteration space for each thread, allocates
reduction arrays, calls the gufunc function, and then invokes
the reduction function across the reduction arrays to produce
the final reduction values.
"""
from numba.np.ufunc.parallel import get_thread_count
ensure_parallel_support()
typingctx = lowerer.context.typing_context
targetctx = lowerer.context
# We copy the typemap here because for race condition variable we'll
# update their type to array so they can be updated by the gufunc.
orig_typemap = lowerer.fndesc.typemap
# replace original typemap with copy and restore the original at the end.
lowerer.fndesc.typemap = copy.copy(orig_typemap)
typemap = lowerer.fndesc.typemap
varmap = lowerer.varmap
if config.DEBUG_ARRAY_OPT:
print("_lower_parfor_parallel")
parfor.dump()
loc = parfor.init_block.loc
scope = parfor.init_block.scope
# produce instructions for init_block
if config.DEBUG_ARRAY_OPT:
print("init_block = ", parfor.init_block, " ", type(parfor.init_block))
for instr in parfor.init_block.body:
if config.DEBUG_ARRAY_OPT:
print("lower init_block instr = ", instr)
lowerer.lower_inst(instr)
for racevar in parfor.races:
if racevar not in varmap:
rvtyp = typemap[racevar]
rv = ir.Var(scope, racevar, loc)
lowerer._alloca_var(rv.name, rvtyp)
alias_map = {}
arg_aliases = {}
numba.parfors.parfor.find_potential_aliases_parfor(parfor, parfor.params, typemap,
lowerer.func_ir, alias_map, arg_aliases)
if config.DEBUG_ARRAY_OPT:
print("alias_map", alias_map)
print("arg_aliases", arg_aliases)
# run get_parfor_outputs() and get_parfor_reductions() before gufunc creation
# since Jumps are modified so CFG of loop_body dict will become invalid
assert parfor.params is not None
parfor_output_arrays = numba.parfors.parfor.get_parfor_outputs(
parfor, parfor.params)
parfor_redvars, parfor_reddict = numba.parfors.parfor.get_parfor_reductions(
lowerer.func_ir, parfor, parfor.params, lowerer.fndesc.calltypes)
# init reduction array allocation here.
nredvars = len(parfor_redvars)
redarrs = {}
if nredvars > 0:
# reduction arrays outer dimension equal to thread count
thread_count = get_thread_count()
scope = parfor.init_block.scope
loc = parfor.init_block.loc
pfbdr = ParforLoweringBuilder(lowerer=lowerer, scope=scope, loc=loc)
# For each reduction variable...
for i in range(nredvars):
redvar_typ = lowerer.fndesc.typemap[parfor_redvars[i]]
redvar = ir.Var(scope, parfor_redvars[i], loc)
redarrvar_typ = redtyp_to_redarraytype(redvar_typ)
reddtype = redarrvar_typ.dtype
if config.DEBUG_ARRAY_OPT:
print("redvar_typ", redvar_typ, redarrvar_typ, reddtype, types.DType(reddtype))
# If this is reduction over an array,
# the reduction array has just one added per-worker dimension.
if isinstance(redvar_typ, types.npytypes.Array):
redarrdim = redvar_typ.ndim + 1
else:
redarrdim = 1
# Reduction array is created and initialized to the initial reduction value.
# First create a var for the numpy empty ufunc.
empty_bound_node = pfbdr.bind_global_function(
fobj=np.empty,
ftype=get_np_ufunc_typ(np.empty),
args=(
types.UniTuple(types.intp, redarrdim),
types.DType(reddtype),
),
)
# Create var for outer dimension size of reduction array equal to number of threads.
num_threads_var = pfbdr.make_const_variable(
cval=thread_count,
typ=types.intp,
name='num_threads',
)
size_var_list = [num_threads_var]
# If this is a reduction over an array...
if isinstance(redvar_typ, types.npytypes.Array):
# Add code to get the shape of the array being reduced over.
redshape_var = pfbdr.assign(
rhs=ir.Expr.getattr(redvar, "shape", loc),
typ=types.UniTuple(types.intp, redvar_typ.ndim),
name="redarr_shape",
)
# Add the dimension sizes of the array being reduced over to the tuple of sizes pass to empty.
for j in range(redvar_typ.ndim):
onedimvar = pfbdr.assign(
rhs=ir.Expr.static_getitem(redshape_var, j, None, loc),
typ=types.intp,
name="redshapeonedim",
)
size_var_list.append(onedimvar)
# Empty call takes tuple of sizes. Create here and fill in outer dimension (num threads).
size_var = pfbdr.make_tuple_variable(
size_var_list, name='tuple_size_var',
)
# Add call to empty passing the size var tuple.
empty_call = pfbdr.call(empty_bound_node, args=[size_var])
redarr_var = pfbdr.assign(
rhs=empty_call, typ=redarrvar_typ, name="redarr",
)
# Remember mapping of original reduction array to the newly created per-worker reduction array.
redarrs[redvar.name] = redarr_var
init_val = parfor_reddict[parfor_redvars[i]][0]
if init_val is not None:
if isinstance(redvar_typ, types.npytypes.Array):
# Create an array of identity values for the reduction.
# First, create a variable for np.full.
full_func_node = pfbdr.bind_global_function(
fobj=np.full,
ftype=get_np_ufunc_typ(np.full),
args=(
types.UniTuple(types.intp, redvar_typ.ndim),
reddtype,
types.DType(reddtype),
),
)
# Then create a var with the identify value.
init_val_var = pfbdr.make_const_variable(
cval=init_val,
typ=reddtype,
name="init_val",
)
# Then, call np.full with the shape of the reduction array and the identity value.
full_call = pfbdr.call(
full_func_node, args=[redshape_var, init_val_var],
)
redtoset = pfbdr.assign(
rhs=full_call,
typ=redvar_typ,
name="redtoset",
)
else:
redtoset = pfbdr.make_const_variable(
cval=init_val,
typ=reddtype,
name="redtoset",
)
else:
redtoset = redvar
# For each thread, initialize the per-worker reduction array to the current reduction array value.
for j in range(get_thread_count()):
index_var = pfbdr.make_const_variable(
cval=j, typ=types.uintp, name="index_var",
)
pfbdr.setitem(obj=redarr_var, index=index_var, val=redtoset)
# compile parfor body as a separate function to be used with GUFuncWrapper
flags = copy.copy(parfor.flags)
flags.set('error_model', 'numpy')
# Can't get here unless flags.set('auto_parallel', ParallelOptions(True))
index_var_typ = typemap[parfor.loop_nests[0].index_variable.name]
# index variables should have the same type, check rest of indices
for l in parfor.loop_nests[1:]:
assert typemap[l.index_variable.name] == index_var_typ
numba.parfors.parfor.sequential_parfor_lowering = True
try:
func, func_args, func_sig, redargstartdim, func_arg_types = _create_gufunc_for_parfor_body(
lowerer, parfor, typemap, typingctx, targetctx, flags, {},
bool(alias_map), index_var_typ, parfor.races)
finally:
numba.parfors.parfor.sequential_parfor_lowering = False
# get the shape signature
func_args = ['sched'] + func_args
num_reductions = len(parfor_redvars)
num_inputs = len(func_args) - len(parfor_output_arrays) - num_reductions
if config.DEBUG_ARRAY_OPT:
print("func_args = ", func_args)
print("num_inputs = ", num_inputs)
print("parfor_outputs = ", parfor_output_arrays)
print("parfor_redvars = ", parfor_redvars)
print("num_reductions = ", num_reductions)
gu_signature = _create_shape_signature(
parfor.get_shape_classes,
num_inputs,
num_reductions,
func_args,
redargstartdim,
func_sig,
parfor.races,
typemap)
if config.DEBUG_ARRAY_OPT:
print("gu_signature = ", gu_signature)
# call the func in parallel by wrapping it with ParallelGUFuncBuilder
loop_ranges = [(l.start, l.stop, l.step) for l in parfor.loop_nests]
if config.DEBUG_ARRAY_OPT:
print("loop_nests = ", parfor.loop_nests)
print("loop_ranges = ", loop_ranges)
call_parallel_gufunc(
lowerer,
func,
gu_signature,
func_sig,
func_args,
func_arg_types,
loop_ranges,
parfor_redvars,
parfor_reddict,
redarrs,
parfor.init_block,
index_var_typ,
parfor.races)
if config.DEBUG_ARRAY_OPT:
sys.stdout.flush()
if nredvars > 0:
# Perform the final reduction across the reduction array created above.
thread_count = get_thread_count()
scope = parfor.init_block.scope
loc = parfor.init_block.loc
# For each reduction variable...
for i in range(nredvars):
name = parfor_redvars[i]
redarr = redarrs[name]
redvar_typ = lowerer.fndesc.typemap[name]
if config.DEBUG_ARRAY_OPT:
print("post-gufunc reduction:", name, redarr, redvar_typ)
if config.DEBUG_ARRAY_OPT_RUNTIME:
res_print_str = "res_print"
strconsttyp = types.StringLiteral(res_print_str)
lhs = pfbldr.make_const_variable(
cval=res_print_str,
typ=strconsttyp,
name="str_const",
)
res_print = ir.Print(args=[lhs, redarr], vararg=None, loc=loc)
lowerer.fndesc.calltypes[res_print] = signature(types.none,
typemap[lhs.name],
typemap[redarr.name])
print("res_print", res_print)
lowerer.lower_inst(res_print)
# For each element in the reduction array created above.
for j in range(get_thread_count()):
# Create index var to access that element.
index_var = pfbdr.make_const_variable(
cval=j, typ=types.uintp, name="index_var",
)
# Read that element from the array into oneelem.
oneelemgetitem = pfbdr.getitem(
obj=redarr, index=index_var, typ=redvar_typ,
)
oneelem = pfbdr.assign(
rhs=oneelemgetitem,
typ=redvar_typ,
name="redelem",
)
init_var = pfbdr.assign_inplace(
rhs=oneelem, typ=redvar_typ, name=name + "#init",
)
if config.DEBUG_ARRAY_OPT_RUNTIME:
res_print_str = "res_print1 for thread " + str(j) + ":"
strconsttyp = types.StringLiteral(res_print_str)
lhs = pfbdr.make_const_variable(
cval=res_print_str,
typ=strconsttyp,
name="str_const",
)
res_print = ir.Print(args=[lhs, index_var, oneelem, init_var, ir.Var(scope, name, loc)],
vararg=None, loc=loc)
lowerer.fndesc.calltypes[res_print] = signature(types.none,
typemap[lhs.name],
typemap[index_var.name],
typemap[oneelem.name],
typemap[init_var.name],
typemap[name])
print("res_print1", res_print)
lowerer.lower_inst(res_print)
# generate code for combining reduction variable with thread output
for inst in parfor_reddict[name][1]:
# If we have a case where a parfor body has an array reduction like A += B
# and A and B have different data types then the reduction in the parallel
# region will operate on those differeing types. However, here, after the
# parallel region, we are summing across the reduction array and that is
# guaranteed to have the same data type so we need to change the reduction
# nodes so that the right-hand sides have a type equal to the reduction-type
# and therefore the left-hand side.
if isinstance(inst, ir.Assign):
rhs = inst.value
# We probably need to generalize this since it only does substitutions in
# inplace_binops.
if (isinstance(rhs, ir.Expr) and rhs.op == 'inplace_binop' and
rhs.rhs.name == init_var.name):
if config.DEBUG_ARRAY_OPT:
print("Adding call to reduction", rhs)
if rhs.fn == operator.isub:
rhs.fn = operator.iadd
rhs.immutable_fn = operator.add
if rhs.fn == operator.itruediv or rhs.fn == operator.ifloordiv:
rhs.fn = operator.imul
rhs.immutable_fn = operator.mul
if config.DEBUG_ARRAY_OPT:
print("After changing sub to add or div to mul", rhs)
# Get calltype of rhs.
ct = lowerer.fndesc.calltypes[rhs]
assert(len(ct.args) == 2)
# Create new arg types replace the second arg type with the reduction var type.
ctargs = (ct.args[0], redvar_typ)
# Update the signature of the call.
ct = ct.replace(args=ctargs)
# Remove so we can re-insert since calltypes is unique dict.
lowerer.fndesc.calltypes.pop(rhs)
# Add calltype back in for the expr with updated signature.
lowerer.fndesc.calltypes[rhs] = ct
lowerer.lower_inst(inst)
if isinstance(inst, ir.Assign) and name == inst.target.name:
break
if config.DEBUG_ARRAY_OPT_RUNTIME:
res_print_str = "res_print2 for thread " + str(j) + ":"
strconsttyp = types.StringLiteral(res_print_str)
lhs = pfbdr.make_const_variable(
cval=res_print_str,
typ=strconsttyp,
name="str_const",
)
res_print = ir.Print(args=[lhs, index_var, oneelem, init_var, ir.Var(scope, name, loc)],
vararg=None, loc=loc)
lowerer.fndesc.calltypes[res_print] = signature(types.none,
typemap[lhs.name],
typemap[index_var.name],
typemap[oneelem.name],
typemap[init_var.name],
typemap[name])
print("res_print2", res_print)
lowerer.lower_inst(res_print)
# Cleanup reduction variable
for v in redarrs.values():
lowerer.lower_inst(ir.Del(v.name, loc=loc))
# Restore the original typemap of the function that was replaced temporarily at the
# Beginning of this function.
lowerer.fndesc.typemap = orig_typemap
if config.DEBUG_ARRAY_OPT:
print("_lower_parfor_parallel done")
|
def _lower_parfor_parallel(lowerer, parfor):
"""Lowerer that handles LLVM code generation for parfor.
This function lowers a parfor IR node to LLVM.
The general approach is as follows:
1) The code from the parfor's init block is lowered normally
in the context of the current function.
2) The body of the parfor is transformed into a gufunc function.
3) Code is inserted into the main function that calls do_scheduling
to divide the iteration space for each thread, allocates
reduction arrays, calls the gufunc function, and then invokes
the reduction function across the reduction arrays to produce
the final reduction values.
"""
from numba.np.ufunc.parallel import get_thread_count
ensure_parallel_support()
typingctx = lowerer.context.typing_context
targetctx = lowerer.context
# We copy the typemap here because for race condition variable we'll
# update their type to array so they can be updated by the gufunc.
orig_typemap = lowerer.fndesc.typemap
# replace original typemap with copy and restore the original at the end.
lowerer.fndesc.typemap = copy.copy(orig_typemap)
typemap = lowerer.fndesc.typemap
varmap = lowerer.varmap
if config.DEBUG_ARRAY_OPT:
print("_lower_parfor_parallel")
parfor.dump()
loc = parfor.init_block.loc
scope = parfor.init_block.scope
# produce instructions for init_block
if config.DEBUG_ARRAY_OPT:
print("init_block = ", parfor.init_block, " ", type(parfor.init_block))
for instr in parfor.init_block.body:
if config.DEBUG_ARRAY_OPT:
print("lower init_block instr = ", instr)
lowerer.lower_inst(instr)
for racevar in parfor.races:
if racevar not in varmap:
rvtyp = typemap[racevar]
rv = ir.Var(scope, racevar, loc)
lowerer._alloca_var(rv.name, rvtyp)
alias_map = {}
arg_aliases = {}
numba.parfors.parfor.find_potential_aliases_parfor(parfor, parfor.params, typemap,
lowerer.func_ir, alias_map, arg_aliases)
if config.DEBUG_ARRAY_OPT:
print("alias_map", alias_map)
print("arg_aliases", arg_aliases)
# run get_parfor_outputs() and get_parfor_reductions() before gufunc creation
# since Jumps are modified so CFG of loop_body dict will become invalid
assert parfor.params is not None
parfor_output_arrays = numba.parfors.parfor.get_parfor_outputs(
parfor, parfor.params)
parfor_redvars, parfor_reddict = numba.parfors.parfor.get_parfor_reductions(
lowerer.func_ir, parfor, parfor.params, lowerer.fndesc.calltypes)
# init reduction array allocation here.
nredvars = len(parfor_redvars)
redarrs = {}
if nredvars > 0:
# reduction arrays outer dimension equal to thread count
thread_count = get_thread_count()
scope = parfor.init_block.scope
loc = parfor.init_block.loc
pfbdr = ParforLoweringBuilder(lowerer=lowerer, scope=scope, loc=loc)
# For each reduction variable...
for i in range(nredvars):
redvar_typ = lowerer.fndesc.typemap[parfor_redvars[i]]
redvar = ir.Var(scope, parfor_redvars[i], loc)
redarrvar_typ = redtyp_to_redarraytype(redvar_typ)
reddtype = redarrvar_typ.dtype
if config.DEBUG_ARRAY_OPT:
print("redvar_typ", redvar_typ, redarrvar_typ, reddtype, types.DType(reddtype))
# If this is reduction over an array,
# the reduction array has just one added per-worker dimension.
if isinstance(redvar_typ, types.npytypes.Array):
redarrdim = redvar_typ.ndim + 1
else:
redarrdim = 1
# Reduction array is created and initialized to the initial reduction value.
# First create a var for the numpy empty ufunc.
glbl_np_empty = pfbdr.bind_global_function(
fobj=np.empty,
ftype=get_np_ufunc_typ(np.empty),
args=(
types.UniTuple(types.intp, redarrdim),
types.DType(reddtype),
),
)
# Create var for outer dimension size of reduction array equal to number of threads.
num_threads_var = pfbdr.make_const_variable(
cval=thread_count,
typ=types.intp,
name='num_threads',
)
size_var_list = [num_threads_var]
# If this is a reduction over an array...
if isinstance(redvar_typ, types.npytypes.Array):
# Add code to get the shape of the array being reduced over.
redshape_var = pfbdr.assign(
rhs=ir.Expr.getattr(redvar, "shape", loc),
typ=types.UniTuple(types.intp, redvar_typ.ndim),
name="redarr_shape",
)
# Add the dimension sizes of the array being reduced over to the tuple of sizes pass to empty.
for j in range(redvar_typ.ndim):
onedimvar = pfbdr.assign(
rhs=ir.Expr.static_getitem(redshape_var, j, None, loc),
typ=types.intp,
name="redshapeonedim",
)
size_var_list.append(onedimvar)
# Empty call takes tuple of sizes. Create here and fill in outer dimension (num threads).
size_var = pfbdr.make_tuple_variable(
size_var_list, name='tuple_size_var',
)
# Add call to empty passing the size var tuple.
empty_call = pfbdr.call(empty_bound_node, args=[size_var])
redarr_var = pfbdr.assign(
rhs=empty_call, typ=redarrvar_typ, name="redarr",
)
# Remember mapping of original reduction array to the newly created per-worker reduction array.
redarrs[redvar.name] = redarr_var
init_val = parfor_reddict[parfor_redvars[i]][0]
if init_val is not None:
if isinstance(redvar_typ, types.npytypes.Array):
# Create an array of identity values for the reduction.
# First, create a variable for np.full.
full_func_node = pfbdr.bind_global_function(
fobj=np.full,
ftype=get_np_ufunc_typ(np.full),
args=(
types.UniTuple(types.intp, redvar_typ.ndim),
reddtype,
types.DType(reddtype),
),
)
# Then create a var with the identify value.
init_val_var = pfbdr.make_const_variable(
cval=init_val,
typ=reddtype,
name="init_val",
)
# Then, call np.full with the shape of the reduction array and the identity value.
full_call = pfbdr.call(
full_func_node, args=[redshape_var, init_val_var],
)
redtoset = pfbdr.assign(
rhs=full_call,
typ=redvar_typ,
name="redtoset",
)
else:
redtoset = pfbdr.make_const_variable(
cval=init_val,
typ=reddtype,
name="redtoset",
)
else:
redtoset = redvar
# For each thread, initialize the per-worker reduction array to the current reduction array value.
for j in range(get_thread_count()):
index_var = pfbdr.make_const_variable(
cval=j, typ=types.uintp, name="index_var",
)
pfbdr.setitem(obj=redarr_var, index=index_var, val=redtoset)
# compile parfor body as a separate function to be used with GUFuncWrapper
flags = copy.copy(parfor.flags)
flags.set('error_model', 'numpy')
# Can't get here unless flags.set('auto_parallel', ParallelOptions(True))
index_var_typ = typemap[parfor.loop_nests[0].index_variable.name]
# index variables should have the same type, check rest of indices
for l in parfor.loop_nests[1:]:
assert typemap[l.index_variable.name] == index_var_typ
numba.parfors.parfor.sequential_parfor_lowering = True
try:
func, func_args, func_sig, redargstartdim, func_arg_types = _create_gufunc_for_parfor_body(
lowerer, parfor, typemap, typingctx, targetctx, flags, {},
bool(alias_map), index_var_typ, parfor.races)
finally:
numba.parfors.parfor.sequential_parfor_lowering = False
# get the shape signature
func_args = ['sched'] + func_args
num_reductions = len(parfor_redvars)
num_inputs = len(func_args) - len(parfor_output_arrays) - num_reductions
if config.DEBUG_ARRAY_OPT:
print("func_args = ", func_args)
print("num_inputs = ", num_inputs)
print("parfor_outputs = ", parfor_output_arrays)
print("parfor_redvars = ", parfor_redvars)
print("num_reductions = ", num_reductions)
gu_signature = _create_shape_signature(
parfor.get_shape_classes,
num_inputs,
num_reductions,
func_args,
redargstartdim,
func_sig,
parfor.races,
typemap)
if config.DEBUG_ARRAY_OPT:
print("gu_signature = ", gu_signature)
# call the func in parallel by wrapping it with ParallelGUFuncBuilder
loop_ranges = [(l.start, l.stop, l.step) for l in parfor.loop_nests]
if config.DEBUG_ARRAY_OPT:
print("loop_nests = ", parfor.loop_nests)
print("loop_ranges = ", loop_ranges)
call_parallel_gufunc(
lowerer,
func,
gu_signature,
func_sig,
func_args,
func_arg_types,
loop_ranges,
parfor_redvars,
parfor_reddict,
redarrs,
parfor.init_block,
index_var_typ,
parfor.races)
if config.DEBUG_ARRAY_OPT:
sys.stdout.flush()
if nredvars > 0:
# Perform the final reduction across the reduction array created above.
thread_count = get_thread_count()
scope = parfor.init_block.scope
loc = parfor.init_block.loc
# For each reduction variable...
for i in range(nredvars):
name = parfor_redvars[i]
redarr = redarrs[name]
redvar_typ = lowerer.fndesc.typemap[name]
if config.DEBUG_ARRAY_OPT:
print("post-gufunc reduction:", name, redarr, redvar_typ)
if config.DEBUG_ARRAY_OPT_RUNTIME:
res_print_str = "res_print"
strconsttyp = types.StringLiteral(res_print_str)
lhs = pfbldr.make_const_variable(
cval=res_print_str,
typ=strconsttyp,
name="str_const",
)
res_print = ir.Print(args=[lhs, redarr], vararg=None, loc=loc)
lowerer.fndesc.calltypes[res_print] = signature(types.none,
typemap[lhs.name],
typemap[redarr.name])
print("res_print", res_print)
lowerer.lower_inst(res_print)
# For each element in the reduction array created above.
for j in range(get_thread_count()):
# Create index var to access that element.
index_var = pfbdr.make_const_variable(
cval=j, typ=types.uintp, name="index_var",
)
# Read that element from the array into oneelem.
oneelemgetitem = pfbdr.getitem(
obj=redarr, index=index_var, typ=redvar_typ,
)
oneelem = pfbdr.assign(
rhs=oneelemgetitem,
typ=redvar_typ,
name="redelem",
)
init_var = pfbdr.assign_inplace(
rhs=oneelem, typ=redvar_typ, name=name + "#init",
)
if config.DEBUG_ARRAY_OPT_RUNTIME:
res_print_str = "res_print1 for thread " + str(j) + ":"
strconsttyp = types.StringLiteral(res_print_str)
lhs = pfbdr.make_const_variable(
cval=res_print_str,
typ=strconsttyp,
name="str_const",
)
res_print = ir.Print(args=[lhs, index_var, oneelem, init_var, ir.Var(scope, name, loc)],
vararg=None, loc=loc)
lowerer.fndesc.calltypes[res_print] = signature(types.none,
typemap[lhs.name],
typemap[index_var.name],
typemap[oneelem.name],
typemap[init_var.name],
typemap[name])
print("res_print1", res_print)
lowerer.lower_inst(res_print)
# generate code for combining reduction variable with thread output
for inst in parfor_reddict[name][1]:
# If we have a case where a parfor body has an array reduction like A += B
# and A and B have different data types then the reduction in the parallel
# region will operate on those differeing types. However, here, after the
# parallel region, we are summing across the reduction array and that is
# guaranteed to have the same data type so we need to change the reduction
# nodes so that the right-hand sides have a type equal to the reduction-type
# and therefore the left-hand side.
if isinstance(inst, ir.Assign):
rhs = inst.value
# We probably need to generalize this since it only does substitutions in
# inplace_binops.
if (isinstance(rhs, ir.Expr) and rhs.op == 'inplace_binop' and
rhs.rhs.name == init_var.name):
if config.DEBUG_ARRAY_OPT:
print("Adding call to reduction", rhs)
if rhs.fn == operator.isub:
rhs.fn = operator.iadd
rhs.immutable_fn = operator.add
if rhs.fn == operator.itruediv or rhs.fn == operator.ifloordiv:
rhs.fn = operator.imul
rhs.immutable_fn = operator.mul
if config.DEBUG_ARRAY_OPT:
print("After changing sub to add or div to mul", rhs)
# Get calltype of rhs.
ct = lowerer.fndesc.calltypes[rhs]
assert(len(ct.args) == 2)
# Create new arg types replace the second arg type with the reduction var type.
ctargs = (ct.args[0], redvar_typ)
# Update the signature of the call.
ct = ct.replace(args=ctargs)
# Remove so we can re-insert since calltypes is unique dict.
lowerer.fndesc.calltypes.pop(rhs)
# Add calltype back in for the expr with updated signature.
lowerer.fndesc.calltypes[rhs] = ct
lowerer.lower_inst(inst)
if isinstance(inst, ir.Assign) and name == inst.target.name:
break
if config.DEBUG_ARRAY_OPT_RUNTIME:
res_print_str = "res_print2 for thread " + str(j) + ":"
strconsttyp = types.StringLiteral(res_print_str)
lhs = pfbdr.make_const_variable(
cval=res_print_str,
typ=strconsttyp,
name="str_const",
)
res_print = ir.Print(args=[lhs, index_var, oneelem, init_var, ir.Var(scope, name, loc)],
vararg=None, loc=loc)
lowerer.fndesc.calltypes[res_print] = signature(types.none,
typemap[lhs.name],
typemap[index_var.name],
typemap[oneelem.name],
typemap[init_var.name],
typemap[name])
print("res_print2", res_print)
lowerer.lower_inst(res_print)
# Cleanup reduction variable
for v in redarrs.values():
lowerer.lower_inst(ir.Del(v.name, loc=loc))
# Restore the original typemap of the function that was replaced temporarily at the
# Beginning of this function.
lowerer.fndesc.typemap = orig_typemap
if config.DEBUG_ARRAY_OPT:
print("_lower_parfor_parallel done")
|
45,927 |
def draw_line(
image : torch.Tensor,
p1 : torch.Tensor, p2 : torch.Tensor,
color : torch.Tensor,
) -> torch.Tensor:
r"""Draw a single line into an image.
Args:
image: the input image to where to draw the lines with shape (C,H,W).
p1: the start point of the line with shape (2).
p2: the end point of the line with shape (2).
color: the color of the line with shape (3).
Return:
the image containing the line.
"""
# assign points
x1, y1 = p1
x2, y2 = p2
# calcullate coefficients A,B,C of line
# from equation Ax + By + C = 0
A = y2 - y1
B = x1 - x2
C = x2 * y1 - x1 * y2
# make sure A is positive to utilize the functiom properly
if (A < 0):
A = -A
B = -B
C = -C
# calculate the slope of the line
# check for division by zero
if (B != 0):
m = -A / B
# make sure you start drawing in the right direction
x1, x2 = min(x1, x2), max(x1, x2)
y1, y2 = min(y1, y2), max(y1, y2)
# line equation that determines the distance away from the line
def line_equation(x, y):
return A * x + B * y + C
# vertical line
if B == 0:
image[:, y1:y2 + 1, x1] = color
# horizontal line
elif A == 0:
image[:, y1, x1:x2 + 1] = color
# slope between 0 and 1
elif 0 < m < 1:
for i in range(x1, x2 + 1):
_draw_pixel(image, i, y1, color)
if line_equation(i + 1, y1 + 0.5) > 0:
y1 += 1
# slope greater than or equal to 1
elif m >= 1:
for j in range(y1, y2 + 1):
_draw_pixel(image, x1, j, color)
if line_equation(x1 + 0.5, j + 1) < 0:
x1 += 1
# slope less then -1
elif m <= -1:
for j in range(y1, y2 + 1):
_draw_pixel(image, x2, j, color)
if line_equation(x2 - 0.5, j + 1) > 0:
x2 -= 1
# slope between -1 and 0
elif -1 < m < 0:
for i in range(x1, x2 + 1):
_draw_pixel(image, i, y2, color)
if line_equation(i + 1, y2 - 0.5) > 0:
y2 -= 1
return image
|
def draw_line(
image : torch.Tensor,
p1 : torch.Tensor, p2 : torch.Tensor,
color : torch.Tensor,
) -> torch.Tensor:
r"""Draw a single line into an image.
Args:
image: the input image to where to draw the lines with shape (C,H,W).
p1: the start point of the line with shape (2).
p2: the end point of the line with shape (2).
color: the color of the line with shape (3).
Return:
the image containing the line with the same shape as the input.
"""
# assign points
x1, y1 = p1
x2, y2 = p2
# calcullate coefficients A,B,C of line
# from equation Ax + By + C = 0
A = y2 - y1
B = x1 - x2
C = x2 * y1 - x1 * y2
# make sure A is positive to utilize the functiom properly
if (A < 0):
A = -A
B = -B
C = -C
# calculate the slope of the line
# check for division by zero
if (B != 0):
m = -A / B
# make sure you start drawing in the right direction
x1, x2 = min(x1, x2), max(x1, x2)
y1, y2 = min(y1, y2), max(y1, y2)
# line equation that determines the distance away from the line
def line_equation(x, y):
return A * x + B * y + C
# vertical line
if B == 0:
image[:, y1:y2 + 1, x1] = color
# horizontal line
elif A == 0:
image[:, y1, x1:x2 + 1] = color
# slope between 0 and 1
elif 0 < m < 1:
for i in range(x1, x2 + 1):
_draw_pixel(image, i, y1, color)
if line_equation(i + 1, y1 + 0.5) > 0:
y1 += 1
# slope greater than or equal to 1
elif m >= 1:
for j in range(y1, y2 + 1):
_draw_pixel(image, x1, j, color)
if line_equation(x1 + 0.5, j + 1) < 0:
x1 += 1
# slope less then -1
elif m <= -1:
for j in range(y1, y2 + 1):
_draw_pixel(image, x2, j, color)
if line_equation(x2 - 0.5, j + 1) > 0:
x2 -= 1
# slope between -1 and 0
elif -1 < m < 0:
for i in range(x1, x2 + 1):
_draw_pixel(image, i, y2, color)
if line_equation(i + 1, y2 - 0.5) > 0:
y2 -= 1
return image
|
8,553 |
def hostname(dnsname: str) -> str:
"""
Validate the dns name.
:param dnsname: Hostname or FQDN
:returns: dnsname
:raises CX: If the Hostname/FQDN is not a string or in an invalid format.
"""
if not isinstance(dnsname, str):
raise CX("Invalid input, dnsname must be a string")
else:
dnsname = dnsname.strip()
if dnsname == "":
# hostname is not required
return dnsname
if not RE_HOSTNAME.match(dnsname):
raise CX("Invalid hostname format (%s)" % dnsname)
return dnsname
|
def hostname(dnsname: str) -> str:
"""
Validate the DNS name.
:param dnsname: Hostname or FQDN
:returns: dnsname
:raises CX: If the Hostname/FQDN is not a string or in an invalid format.
"""
if not isinstance(dnsname, str):
raise CX("Invalid input, dnsname must be a string")
else:
dnsname = dnsname.strip()
if dnsname == "":
# hostname is not required
return dnsname
if not RE_HOSTNAME.match(dnsname):
raise CX("Invalid hostname format (%s)" % dnsname)
return dnsname
|
30,544 |
def find_indicators_to_limit(indicator_query: str, limit: int, offset: int = 0,
panos_compatible: bool = True, url_port_stripping: bool = False) -> list:
"""
Finds indicators using demisto.searchIndicators
Parameters:
indicator_query (str): Query that determines which indicators to include in
the EDL (Cortex XSOAR indicator query syntax)
limit (int): The maximum number of indicators to include in the EDL
offset (int): The stating index from which to fetch incidents
panos_compatible (bool): Whether to make the indicators PANOS compatible or not
url_port_stripping (bool): Whether to strip the port from URL indicators (if a port is present) or not
Returns:
list: The IoCs list up until the amount set by 'limit'
"""
if offset:
next_page = int(offset / PAGE_SIZE)
# set the offset from the starting page
offset_in_page = offset - (PAGE_SIZE * next_page)
else:
next_page = 0
offset_in_page = 0
iocs, _ = find_indicators_to_limit_loop(indicator_query, limit, next_page=next_page,
panos_compatible=panos_compatible,
url_port_stripping=url_port_stripping)
# if offset in page is bigger than the amount of results returned return empty list
if len(iocs) <= offset_in_page:
return []
return iocs[offset_in_page:limit + offset_in_page]
|
def find_indicators_to_limit(indicator_query: str, limit: int, offset: int = 0,
panos_compatible: bool = True, url_port_stripping: bool = False) -> list:
"""
Finds indicators using demisto.searchIndicators
Parameters:
indicator_query (str): Query that determines which indicators to include in
the EDL (Cortex XSOAR indicator query syntax)
limit (int): The maximum number of indicators to include in the EDL
offset (int): The starting index from which to fetch incidents
panos_compatible (bool): Whether to make the indicators PANOS compatible or not
url_port_stripping (bool): Whether to strip the port from URL indicators (if a port is present) or not
Returns:
list: The IoCs list up until the amount set by 'limit'
"""
if offset:
next_page = int(offset / PAGE_SIZE)
# set the offset from the starting page
offset_in_page = offset - (PAGE_SIZE * next_page)
else:
next_page = 0
offset_in_page = 0
iocs, _ = find_indicators_to_limit_loop(indicator_query, limit, next_page=next_page,
panos_compatible=panos_compatible,
url_port_stripping=url_port_stripping)
# if offset in page is bigger than the amount of results returned return empty list
if len(iocs) <= offset_in_page:
return []
return iocs[offset_in_page:limit + offset_in_page]
|
33,029 |
def make_rpm_list(args: argparse.Namespace, packages: Set[str]) -> Set[str]:
packages = set(packages) # make a copy
if args.bootable:
# Temporary hack: dracut only adds crypto support to the initrd, if the cryptsetup binary is installed
if args.encrypt or args.verity:
packages.add("cryptsetup")
if args.output_format == OutputFormat.gpt_ext4:
packages.add("e2fsprogs")
if args.output_format == OutputFormat.gpt_xfs:
packages.add("xfsprogs")
if args.output_format == OutputFormat.gpt_btrfs:
packages.add("btrfs-progs")
if args.bios_partno:
if args.distribution in (Distribution.mageia, Distribution.openmandriva):
packages.add("grub2")
else:
packages.add("grub2-pc")
return packages
|
def make_rpm_list(args: argparse.Namespace, packages: Set[str]) -> Set[str]:
packages = packages.copy()
if args.bootable:
# Temporary hack: dracut only adds crypto support to the initrd, if the cryptsetup binary is installed
if args.encrypt or args.verity:
packages.add("cryptsetup")
if args.output_format == OutputFormat.gpt_ext4:
packages.add("e2fsprogs")
if args.output_format == OutputFormat.gpt_xfs:
packages.add("xfsprogs")
if args.output_format == OutputFormat.gpt_btrfs:
packages.add("btrfs-progs")
if args.bios_partno:
if args.distribution in (Distribution.mageia, Distribution.openmandriva):
packages.add("grub2")
else:
packages.add("grub2-pc")
return packages
|
50,476 |
def test_imwrite_symbol_name():
name = '''#!~@$%^&`-+{};',.() []_=.jpg'''
imageio.imwrite(name, np.zeros((128, 128, 3), dtype=np.uint8))
im = imageio.imread(name)
assert im.shape == (128, 128, 3)
|
def test_imwrite_symbol_name():
name = '''#!~@$%^&`-+{};',.() []_=.jpg'''
tmp_request = imageio.core.Request(name, "r")
assert tmp_request.extension == ".jpg"
tmp_request.finish()
|
22,504 |
def load_root_component() -> Component:
new_data_yaml = resource_string(__name__.rsplit('.', 1)[0], 'navigation.yml')
navigation_raw = yaml.safe_load(new_data_yaml)
return Component.from_dict("root", navigation_raw)
|
def load_root_component() -> Component:
new_data_yaml = resource_string(__package__, "navigation.yml")
navigation_raw = yaml.safe_load(new_data_yaml)
return Component.from_dict("root", navigation_raw)
|
44,105 |
def tape_to_graph(tape: QuantumTape) -> MultiDiGraph:
"""
Converts a quantum tape to a directed multigraph.
.. note::
This operation is designed for use as part of the circuit cutting workflow.
Check out the :func:`qml.cut_circuit() <pennylane.cut_circuit>` transform for more details.
Args:
tape (QuantumTape): tape to be converted into a directed multigraph
Returns:
nx.MultiDiGraph: a directed multigraph that captures the circuit structure
of the input tape
**Example**
Consider the following tape:
.. code-block:: python
with qml.tape.QuantumTape() as tape:
qml.RX(0.4, wires=0)
qml.RY(0.9, wires=0)
qml.CNOT(wires=[0, 1])
qml.expval(qml.PauliZ(1))
Its corresponding circuit graph can be found using
>>> qml.transforms.qcut.tape_to_graph(tape)
<networkx.classes.multidigraph.MultiDiGraph at 0x7fe41cbd7210>
"""
graph = MultiDiGraph()
wire_latest_node = {w: None for w in tape.wires}
for order, op in enumerate(tape.operations):
_add_operator_node(graph, op, order, wire_latest_node)
order += 1 # pylint: disable=undefined-loop-variable
for m in tape.measurements:
obs = getattr(m, "obs", None)
if obs is not None and isinstance(obs, Tensor):
for o in obs.obs:
m_ = MeasurementProcess(m.return_type, obs=o)
_add_operator_node(graph, m_, order, wire_latest_node)
elif m.return_type.name == "Sample":
for w in m.wires.tolist():
s_ = qml.sample(qml.Projector([1], wires=w))
_add_operator_node(graph, s_, order, wire_latest_node)
else:
_add_operator_node(graph, m, order, wire_latest_node)
order += 1
return graph
|
def tape_to_graph(tape: QuantumTape) -> MultiDiGraph:
"""
Converts a quantum tape to a directed multigraph.
.. note::
This operation is designed for use as part of the circuit cutting workflow.
Check out the :func:`qml.cut_circuit() <pennylane.cut_circuit>` transform for more details.
Args:
tape (QuantumTape): tape to be converted into a directed multigraph
Returns:
nx.MultiDiGraph: a directed multigraph that captures the circuit structure
of the input tape
**Example**
Consider the following tape:
.. code-block:: python
with qml.tape.QuantumTape() as tape:
qml.RX(0.4, wires=0)
qml.RY(0.9, wires=0)
qml.CNOT(wires=[0, 1])
qml.expval(qml.PauliZ(1))
Its corresponding circuit graph can be found using
>>> qml.transforms.qcut.tape_to_graph(tape)
<networkx.classes.multidigraph.MultiDiGraph at 0x7fe41cbd7210>
"""
graph = MultiDiGraph()
wire_latest_node = {w: None for w in tape.wires}
for order, op in enumerate(tape.operations):
_add_operator_node(graph, op, order, wire_latest_node)
order += 1 # pylint: disable=undefined-loop-variable
for m in tape.measurements:
obs = getattr(m, "obs", None)
if obs is not None and isinstance(obs, Tensor):
for o in obs.obs:
m_ = MeasurementProcess(m.return_type, obs=o)
_add_operator_node(graph, m_, order, wire_latest_node)
elif m.return_type is Sample and obs is None:
for w in m.wires.tolist():
s_ = qml.sample(qml.Projector([1], wires=w))
_add_operator_node(graph, s_, order, wire_latest_node)
else:
_add_operator_node(graph, m, order, wire_latest_node)
order += 1
return graph
|
7,515 |
def download_file(remote_url, cache=False, show_progress=True, timeout=None,
sources=None, pkgname='astropy', http_headers=None,
ftp_tls=False):
"""Downloads a URL and optionally caches the result.
It returns the filename of a file containing the URL's contents.
If ``cache=True`` and the file is present in the cache, just
returns the filename; if the file had to be downloaded, add it
to the cache. If ``cache="update"`` always download and add it
to the cache.
The cache is effectively a dictionary mapping URLs to files; by default the
file contains the contents of the URL that is its key, but in practice
these can be obtained from a mirror (using ``sources``) or imported from
the local filesystem (using `~import_file_to_cache` or
`~import_download_cache`). Regardless, each file is regarded as
representing the contents of a particular URL, and this URL should be used
to look them up or otherwise manipulate them.
The files in the cache directory are named according to a cryptographic
hash of their URLs (currently MD5, so hackers can cause collisions).
The modification times on these files normally indicate when they were
last downloaded from the Internet.
Parameters
----------
remote_url : str
The URL of the file to download
cache : bool or "update", optional
Whether to cache the contents of remote URLs. If "update",
always download the remote URL in case there is a new version
and store the result in the cache.
show_progress : bool, optional
Whether to display a progress bar during the download (default
is `True`). Regardless of this setting, the progress bar is only
displayed when outputting to a terminal.
timeout : float, optional
The timeout, in seconds. Otherwise, use
`astropy.utils.data.Conf.remote_timeout`. Set this to zero to prevent
any attempt to download anything.
sources : list of str, optional
If provided, a list of URLs to try to obtain the file from. The
result will be stored under the original URL. The original URL
will *not* be tried unless it is in this list; this is to prevent
long waits for a primary server that is known to be inaccessible
at the moment. If an empty list is passed, then ``download_file``
will not attempt to connect to the Internet, that is, if the file
is not in the cache a KeyError will be raised.
pkgname : `str`, optional
The package name to use to locate the download cache. i.e. for
``pkgname='astropy'`` the default cache location is
``~/.astropy/cache``.
http_headers : dict or None
HTTP request headers to pass into ``urlopen`` if needed. (These headers
are ignored if the protocol for the ``name_or_obj``/``sources`` entry
is not a remote HTTP URL.) In the default case (None), the headers are
``User-Agent: some_value`` and ``Accept: */*``, where ``some_value``
is set by ``astropy.utils.data.conf.default_http_user_agent``.
ftp_tls : bool
If True, use TLS with ftp URLs instead of the standard unsecured FTP.
Certain servers require this.
Returns
-------
local_path : str
Returns the local path that the file was download to.
Raises
------
urllib.error.URLError
Whenever there's a problem getting the remote file.
KeyError
When a file was requested from the cache but is missing and no
sources were provided to obtain it from the Internet.
Notes
-----
Because `download_file` returns a filename, another process could run
clear_download_cache before you actually open the file, leaving
you with a filename that no longer points to a usable file.
"""
if timeout is None:
timeout = conf.remote_timeout
if sources is None:
sources = [remote_url]
if http_headers is None:
http_headers = {'User-Agent': conf.default_http_user_agent,
'Accept': '*/*'}
missing_cache = ""
url_key = remote_url
if cache:
try:
dldir = _get_download_cache_loc(pkgname)
except OSError:
cache = False
missing_cache = (
"Cache directory cannot be read or created, "
"providing data in temporary file instead."
)
else:
if cache == "update":
pass
elif isinstance(cache, str):
raise ValueError(f"Cache value '{cache}' was requested but "
f"'update' is the only recognized string; "
f"otherwise use a boolean")
else:
filename = os.path.join(dldir, _url_to_dirname(url_key), "contents")
if os.path.exists(filename):
return os.path.abspath(filename)
errors = {}
for source_url in sources:
try:
f_name = _download_file_from_source(
source_url,
timeout=timeout,
show_progress=show_progress,
cache=cache,
remote_url=remote_url,
pkgname=pkgname,
http_headers=http_headers,
ftp_tls=ftp_tls)
# Success!
break
except urllib.error.URLError as e:
# errno 8 is from SSL "EOF occurred in violation of protocol"
if (hasattr(e, 'reason')
and hasattr(e.reason, 'errno')
and e.reason.errno == 8):
e.reason.strerror = (e.reason.strerror +
'. requested URL: '
+ remote_url)
e.reason.args = (e.reason.errno, e.reason.strerror)
errors[source_url] = e
except socket.timeout as e:
# this isn't supposed to happen, but occasionally a socket.timeout
# gets through. It's supposed to be caught in urllib and raised
# in this way, but for some reason in mysterious circumstances it
# doesn't (or didn't in python2?). So we'll just re-raise it here
# instead.
errors[source_url] = e
else: # No success
if not sources:
raise KeyError(
f"No sources listed and file {remote_url} not in cache! "
f"Please include primary URL in sources if you want it to be "
f"included as a valid source.")
elif len(sources) == 1:
raise errors[sources[0]]
else:
raise urllib.error.URLError(
f"Unable to open any source! Exceptions were {errors}") \
from errors[sources[0]]
if cache:
try:
return import_file_to_cache(url_key, f_name,
remove_original=True,
replace=(cache == 'update'),
pkgname=pkgname)
except PermissionError:
# Cache is readonly, we can't update it
missing_cache = (
f"Cache directory appears to be read-only, unable to import "
f"downloaded file, providing data in temporary file {f_name} "
f"instead.")
# FIXME: other kinds of cache problem can occur?
if missing_cache:
warn(CacheMissingWarning(missing_cache, f_name))
if conf.delete_temporary_downloads_at_exit:
global _tempfilestodel
_tempfilestodel.append(f_name)
return os.path.abspath(f_name)
|
def download_file(remote_url, cache=False, show_progress=True, timeout=None,
sources=None, pkgname='astropy', http_headers=None,
ftp_tls=False):
"""Downloads a URL and optionally caches the result.
It returns the filename of a file containing the URL's contents.
If ``cache=True`` and the file is present in the cache, just
returns the filename; if the file had to be downloaded, add it
to the cache. If ``cache="update"`` always download and add it
to the cache.
The cache is effectively a dictionary mapping URLs to files; by default the
file contains the contents of the URL that is its key, but in practice
these can be obtained from a mirror (using ``sources``) or imported from
the local filesystem (using `~import_file_to_cache` or
`~import_download_cache`). Regardless, each file is regarded as
representing the contents of a particular URL, and this URL should be used
to look them up or otherwise manipulate them.
The files in the cache directory are named according to a cryptographic
hash of their URLs (currently MD5, so hackers can cause collisions).
The modification times on these files normally indicate when they were
last downloaded from the Internet.
Parameters
----------
remote_url : str
The URL of the file to download
cache : bool or "update", optional
Whether to cache the contents of remote URLs. If "update",
always download the remote URL in case there is a new version
and store the result in the cache.
show_progress : bool, optional
Whether to display a progress bar during the download (default
is `True`). Regardless of this setting, the progress bar is only
displayed when outputting to a terminal.
timeout : float, optional
The timeout, in seconds. Otherwise, use
`astropy.utils.data.Conf.remote_timeout`. Set this to zero to prevent
any attempt to download anything.
sources : list of str, optional
If provided, a list of URLs to try to obtain the file from. The
result will be stored under the original URL. The original URL
will *not* be tried unless it is in this list; this is to prevent
long waits for a primary server that is known to be inaccessible
at the moment. If an empty list is passed, then ``download_file``
will not attempt to connect to the Internet, that is, if the file
is not in the cache a KeyError will be raised.
pkgname : `str`, optional
The package name to use to locate the download cache. i.e. for
``pkgname='astropy'`` the default cache location is
``~/.astropy/cache``.
http_headers : dict or None
HTTP request headers to pass into ``urlopen`` if needed. (These headers
are ignored if the protocol for the ``name_or_obj``/``sources`` entry
is not a remote HTTP URL.) In the default case (None), the headers are
``User-Agent: some_value`` and ``Accept: */*``, where ``some_value``
is set by ``astropy.utils.data.conf.default_http_user_agent``.
ftp_tls : bool
If True, use TLS with ftp URLs instead of the standard unsecured FTP.
Certain servers require this.
Returns
-------
local_path : str
Returns the local path that the file was download to.
Raises
------
urllib.error.URLError
Whenever there's a problem getting the remote file.
KeyError
When a file was requested from the cache but is missing and no
sources were provided to obtain it from the Internet.
Notes
-----
Because this function returns a filename, another process could run
clear_download_cache before you actually open the file, leaving
you with a filename that no longer points to a usable file.
"""
if timeout is None:
timeout = conf.remote_timeout
if sources is None:
sources = [remote_url]
if http_headers is None:
http_headers = {'User-Agent': conf.default_http_user_agent,
'Accept': '*/*'}
missing_cache = ""
url_key = remote_url
if cache:
try:
dldir = _get_download_cache_loc(pkgname)
except OSError:
cache = False
missing_cache = (
"Cache directory cannot be read or created, "
"providing data in temporary file instead."
)
else:
if cache == "update":
pass
elif isinstance(cache, str):
raise ValueError(f"Cache value '{cache}' was requested but "
f"'update' is the only recognized string; "
f"otherwise use a boolean")
else:
filename = os.path.join(dldir, _url_to_dirname(url_key), "contents")
if os.path.exists(filename):
return os.path.abspath(filename)
errors = {}
for source_url in sources:
try:
f_name = _download_file_from_source(
source_url,
timeout=timeout,
show_progress=show_progress,
cache=cache,
remote_url=remote_url,
pkgname=pkgname,
http_headers=http_headers,
ftp_tls=ftp_tls)
# Success!
break
except urllib.error.URLError as e:
# errno 8 is from SSL "EOF occurred in violation of protocol"
if (hasattr(e, 'reason')
and hasattr(e.reason, 'errno')
and e.reason.errno == 8):
e.reason.strerror = (e.reason.strerror +
'. requested URL: '
+ remote_url)
e.reason.args = (e.reason.errno, e.reason.strerror)
errors[source_url] = e
except socket.timeout as e:
# this isn't supposed to happen, but occasionally a socket.timeout
# gets through. It's supposed to be caught in urllib and raised
# in this way, but for some reason in mysterious circumstances it
# doesn't (or didn't in python2?). So we'll just re-raise it here
# instead.
errors[source_url] = e
else: # No success
if not sources:
raise KeyError(
f"No sources listed and file {remote_url} not in cache! "
f"Please include primary URL in sources if you want it to be "
f"included as a valid source.")
elif len(sources) == 1:
raise errors[sources[0]]
else:
raise urllib.error.URLError(
f"Unable to open any source! Exceptions were {errors}") \
from errors[sources[0]]
if cache:
try:
return import_file_to_cache(url_key, f_name,
remove_original=True,
replace=(cache == 'update'),
pkgname=pkgname)
except PermissionError:
# Cache is readonly, we can't update it
missing_cache = (
f"Cache directory appears to be read-only, unable to import "
f"downloaded file, providing data in temporary file {f_name} "
f"instead.")
# FIXME: other kinds of cache problem can occur?
if missing_cache:
warn(CacheMissingWarning(missing_cache, f_name))
if conf.delete_temporary_downloads_at_exit:
global _tempfilestodel
_tempfilestodel.append(f_name)
return os.path.abspath(f_name)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.