id
int64 11
59.9k
| original
stringlengths 33
150k
| modified
stringlengths 37
150k
|
---|---|---|
31,182 |
def main():
"""
PARSE AND VALIDATE INTEGRATION PARAMS
"""
# get Acalvio API Server url
base_url = demisto.params()['url']
# get Acalvio API Key
apikey = demisto.params()['apikey']
# check if SSL is to be verified
verify_certificate = demisto.params().get('insecure', False)
proxy = demisto.params().get('proxy', False)
# set the headers
headers = {
'api_key': apikey,
'content-type': 'application/json'
}
demisto.log(f'Command being called is \'{demisto.command()}\'')
result = None
acalerror = AcalError()
try:
client = Client(
base_url=base_url,
verify=verify_certificate,
headers=headers,
proxy=proxy)
if demisto.command() == 'test-module':
# This is the call made when pressing the integration Test button
result, acalerror = do_test_connection(client)
elif demisto.command() == 'acalvio-is-deception-host':
result, acalerror = \
do_deception_host_command(client, demisto.args())
elif demisto.command() == 'acalvio-is-deception-file':
result, acalerror = \
do_deception_file_command(client, demisto.args())
elif demisto.command() == 'acalvio-is-deception-user':
result, acalerror = \
do_deception_user_command(client, demisto.args())
elif demisto.command() == 'acalvio-mute-deception-host':
result, acalerror = \
do_mute_deception_host_command(client, demisto.args())
elif demisto.command() == 'acalvio-unmute-deception-host':
result, acalerror = \
do_unmute_deception_host_command(client, demisto.args())
elif demisto.command() == 'acalvio-mute-deception-on-endpoint':
result, acalerror = \
do_mute_deception_ep_command(client, demisto.args())
elif demisto.command() == 'acalvio-unmute-deception-on-endpoint':
result, acalerror = \
do_unmute_deception_ep_command(client, demisto.args())
# Log exceptions
except Exception as e:
acalerror = AcalError(message=f'Failed to execute \'{demisto.command()}\' command. Error: {str(e)}')
finally:
if result is not None:
return_results(result)
else:
if acalerror is None:
acalerror = AcalError()
return_error(message=acalerror.message,
error=acalerror.error,
outputs=acalerror.outputs)
|
def main():
"""
PARSE AND VALIDATE INTEGRATION PARAMS
"""
# get Acalvio API Server url
base_url = demisto.params()['url']
# get Acalvio API Key
apikey = demisto.params()['apikey']
# check if SSL is to be verified
verify_certificate = demisto.params().get('insecure', False)
proxy = demisto.params().get('proxy', False)
# set the headers
headers = {
'api_key': apikey,
'content-type': 'application/json'
}
demisto.info(f'Command being called is {demisto.command()}')
result = None
acalerror = AcalError()
try:
client = Client(
base_url=base_url,
verify=verify_certificate,
headers=headers,
proxy=proxy)
if demisto.command() == 'test-module':
# This is the call made when pressing the integration Test button
result, acalerror = do_test_connection(client)
elif demisto.command() == 'acalvio-is-deception-host':
result, acalerror = \
do_deception_host_command(client, demisto.args())
elif demisto.command() == 'acalvio-is-deception-file':
result, acalerror = \
do_deception_file_command(client, demisto.args())
elif demisto.command() == 'acalvio-is-deception-user':
result, acalerror = \
do_deception_user_command(client, demisto.args())
elif demisto.command() == 'acalvio-mute-deception-host':
result, acalerror = \
do_mute_deception_host_command(client, demisto.args())
elif demisto.command() == 'acalvio-unmute-deception-host':
result, acalerror = \
do_unmute_deception_host_command(client, demisto.args())
elif demisto.command() == 'acalvio-mute-deception-on-endpoint':
result, acalerror = \
do_mute_deception_ep_command(client, demisto.args())
elif demisto.command() == 'acalvio-unmute-deception-on-endpoint':
result, acalerror = \
do_unmute_deception_ep_command(client, demisto.args())
# Log exceptions
except Exception as e:
acalerror = AcalError(message=f'Failed to execute \'{demisto.command()}\' command. Error: {str(e)}')
finally:
if result is not None:
return_results(result)
else:
if acalerror is None:
acalerror = AcalError()
return_error(message=acalerror.message,
error=acalerror.error,
outputs=acalerror.outputs)
|
13,858 |
def test_not_an_image(tmp_path):
fname = os.path.join(tmp_path, "notanimage.bsdf")
# Not an image not a list
bsdf.save(fname, 1)
with raises(RuntimeError):
imageio.imread(fname)
# A list with non-images
bsdf.save(fname, [1])
with raises(RuntimeError):
imageio.imread(fname)
# An empty list could work though
bsdf.save(fname, [])
with raises(IndexError):
imageio.imread(fname)
assert imageio.mimread(fname) == []
|
def test_not_an_image(tmp_path):
fname = tmp_path / "notanimage.bsdf"
# Not an image not a list
bsdf.save(fname, 1)
with raises(RuntimeError):
imageio.imread(fname)
# A list with non-images
bsdf.save(fname, [1])
with raises(RuntimeError):
imageio.imread(fname)
# An empty list could work though
bsdf.save(fname, [])
with raises(IndexError):
imageio.imread(fname)
assert imageio.mimread(fname) == []
|
6,976 |
def execute():
child_tables = [
r[0]
for r in frappe.get_all(
"DocField",
fields="options",
filters={"fieldtype": ["in", frappe.model.table_fields], "parent": "Workspace"},
as_list=1,
)
]
for child_table in child_tables:
module = get_doctype_module(child_table).lower()
frappe.reload_doc(module, "doctype", child_table, force=True)
|
def execute():
child_tables = [
r[0]
for r in frappe.get_all(
"DocField",
fields="options",
filters={"fieldtype": ["in", frappe.model.table_fields], "parent": "Workspace"},
as_list=1,
)
]
for child_table in child_tables:
module = get_doctype_module(child_table).lower()
frappe.reload_doctype(child_table, force=True)
|
11,036 |
def get_child_arguments():
"""
Return the executable. This contains a workaround for Windows if the
executable is reported to not have the .exe extension which can cause bugs
on reloading.
"""
import django.__main__
django_main_path = Path(django.__main__.__file__)
py_script = Path(sys.argv[0])
args = [sys.executable] + ['-W%s' % o for o in sys.warnoptions]
if py_script == django_main_path:
# The server was started with `python -m django runserver`.
args += ['-m', 'django']
args += sys.argv[1:]
elif not py_script.exists():
# sys.argv[0] may not exist for several reasons on Windows.
# It may exist with a .exe extension or have a -script.py suffix.
exe_entrypoint = py_script.with_suffix('.exe')
if exe_entrypoint.exists():
# Should be executed directly, ignoring sys.executable.
# TODO: Remove str() when dropping support for PY37.
# arg parameter accepts path-like on Windows from Python 3.8.
return [str(exe_entrypoint), *sys.argv[1:]]
script_entrypoint = py_script.with_name('%s-script.py' % py_script.name)
if script_entrypoint.exists():
# Should be executed as usual.
# TODO: Remove str() when dropping support for PY37.
# arg parameter accepts path-like on Windows from Python 3.8.
return [*args, str(script_entrypoint), *sys.argv[1:]]
raise RuntimeError('Script %s does not exist.' % py_script)
else:
args += sys.argv
return args
|
def get_child_arguments():
"""
Return the executable. This contains a workaround for Windows if the
executable is reported to not have the .exe extension which can cause bugs
on reloading.
"""
import django.__main__
django_main_path = Path(django.__main__.__file__)
py_script = Path(sys.argv[0])
args = [sys.executable] + ['-W%s' % o for o in sys.warnoptions]
if py_script == django_main_path:
# The server was started with `python -m django runserver`.
args += ['-m', 'django']
args += sys.argv[1:]
elif not py_script.exists():
# sys.argv[0] may not exist for several reasons on Windows.
# It may exist with a .exe extension or have a -script.py suffix.
exe_entrypoint = py_script.with_suffix('.exe')
if exe_entrypoint.exists():
# Should be executed directly, ignoring sys.executable.
# TODO: Remove str() when dropping support for PY37.
# arg parameter accepts path-like on Windows from Python 3.8.
return [str(exe_entrypoint), *sys.argv[1:]]
script_entrypoint = py_script.with_name('%s-script.py' % py_script.name)
if script_entrypoint.exists():
# Should be executed as usual.
# TODO: Remove str() when dropping support for PY37.
# args parameter accepts path-like on Windows from Python 3.8.
return [*args, str(script_entrypoint), *sys.argv[1:]]
raise RuntimeError('Script %s does not exist.' % py_script)
else:
args += sys.argv
return args
|
24,973 |
def _get_ansi_code(msg_style: MessageStyle) -> str:
"""Return ANSI escape code corresponding to color and style.
:param msg_style: the message style
:raise KeyError: if a non-existent color or style identifier is given
:return: the built escape code
"""
ansi_code = [ANSI_STYLES[effect] for effect in msg_style.style]
if msg_style.color:
if msg_style.color.isdigit():
ansi_code.extend(["38", "5"])
ansi_code.append(msg_style.color)
else:
ansi_code.append(ANSI_COLORS[msg_style.color])
if ansi_code:
return ANSI_PREFIX + ";".join(ansi_code) + ANSI_END
return ""
|
def _get_ansi_code(msg_style: MessageStyle) -> str:
"""Return ANSI escape code corresponding to color and style.
:param msg_style: the message style
:raise KeyError: if a nonexistent color or style identifier is given
:return: the built escape code
"""
ansi_code = [ANSI_STYLES[effect] for effect in msg_style.style]
if msg_style.color:
if msg_style.color.isdigit():
ansi_code.extend(["38", "5"])
ansi_code.append(msg_style.color)
else:
ansi_code.append(ANSI_COLORS[msg_style.color])
if ansi_code:
return ANSI_PREFIX + ";".join(ansi_code) + ANSI_END
return ""
|
40,766 |
def supervised_training_step_amp(
model: torch.nn.Module,
optimizer: torch.optim.Optimizer,
loss_fn: Union[Callable, torch.nn.Module],
device: Optional[Union[str, torch.device]] = None,
non_blocking: bool = False,
prepare_batch: Callable = _prepare_batch,
output_transform: Callable = lambda x, y, y_pred, loss: loss.item(),
scaler: Optional["torch.cuda.amp.GradScaler"] = None,
gradient_accumulation_steps: int = 1,
) -> Callable:
"""Factory function for supervised training using ``torch.cuda.amp``.
Args:
model: the model to train.
optimizer: the optimizer to use.
loss_fn: the loss function to use.
device: device type specification (default: None).
Applies to batches after starting the engine. Model *will not* be moved.
Device can be CPU, GPU.
non_blocking: if True and this copy is between CPU and GPU, the copy may occur asynchronously
with respect to the host. For other cases, this argument has no effect.
prepare_batch: function that receives `batch`, `device`, `non_blocking` and outputs
tuple of tensors `(batch_x, batch_y)`.
output_transform: function that receives 'x', 'y', 'y_pred', 'loss' and returns value
to be assigned to engine's state.output after each iteration. Default is returning `loss.item()`.
scaler: GradScaler instance for gradient scaling. (default: None)
gradient_accumulation_steps: Number of steps the gradients should be accumulated across.
(default: 1 (means no gradient accumulation))
Returns:
Callable: update function
Example::
from ignite.engine import Engine, supervised_training_step_amp
model = ...
optimizer = ...
loss_fn = ...
scaler = torch.cuda.amp.GradScaler(2**10)
update_fn = supervised_training_step_amp(model, optimizer, loss_fn, 'cuda', scaler=scaler)
trainer = Engine(update_fn)
.. versionadded:: 0.4.5
.. versionadded:: 0.5.0
Added Gradient Accumulation.
"""
try:
from torch.cuda.amp import autocast
except ImportError:
raise ImportError("Please install torch>=1.6.0 to use amp_mode='amp'.")
def update(engine: Engine, batch: Sequence[torch.Tensor]) -> Union[Any, Tuple[torch.Tensor]]:
model.train()
x, y = prepare_batch(batch, device=device, non_blocking=non_blocking)
with autocast(enabled=True):
y_pred = model(x)
loss = loss_fn(y_pred, y) / gradient_accumulation_steps
if scaler:
scaler.scale(loss).backward()
if engine.state.iteration % gradient_accumulation_steps == 0:
scaler.step(optimizer)
scaler.update()
optimizer.zero_grad()
else:
loss.backward()
if engine.state.iteration % gradient_accumulation_steps == 0:
optimizer.step()
optimizer.zero_grad()
return output_transform(x, y, y_pred, loss)
return update
|
def supervised_training_step_amp(
model: torch.nn.Module,
optimizer: torch.optim.Optimizer,
loss_fn: Union[Callable, torch.nn.Module],
device: Optional[Union[str, torch.device]] = None,
non_blocking: bool = False,
prepare_batch: Callable = _prepare_batch,
output_transform: Callable = lambda x, y, y_pred, loss: loss.item(),
scaler: Optional["torch.cuda.amp.GradScaler"] = None,
gradient_accumulation_steps: int = 1,
) -> Callable:
"""Factory function for supervised training using ``torch.cuda.amp``.
Args:
model: the model to train.
optimizer: the optimizer to use.
loss_fn: the loss function to use.
device: device type specification (default: None).
Applies to batches after starting the engine. Model *will not* be moved.
Device can be CPU, GPU.
non_blocking: if True and this copy is between CPU and GPU, the copy may occur asynchronously
with respect to the host. For other cases, this argument has no effect.
prepare_batch: function that receives `batch`, `device`, `non_blocking` and outputs
tuple of tensors `(batch_x, batch_y)`.
output_transform: function that receives 'x', 'y', 'y_pred', 'loss' and returns value
to be assigned to engine's state.output after each iteration. Default is returning `loss.item()`.
scaler: GradScaler instance for gradient scaling. (default: None)
gradient_accumulation_steps: Number of steps the gradients should be accumulated across.
(default: 1 (means no gradient accumulation))
Returns:
Callable: update function
Example::
from ignite.engine import Engine, supervised_training_step_amp
model = ...
optimizer = ...
loss_fn = ...
scaler = torch.cuda.amp.GradScaler(2**10)
update_fn = supervised_training_step_amp(model, optimizer, loss_fn, 'cuda', scaler=scaler)
trainer = Engine(update_fn)
.. versionadded:: 0.4.5
.. versionchanged:: 0.5.0
Added Gradient Accumulation.
"""
try:
from torch.cuda.amp import autocast
except ImportError:
raise ImportError("Please install torch>=1.6.0 to use amp_mode='amp'.")
def update(engine: Engine, batch: Sequence[torch.Tensor]) -> Union[Any, Tuple[torch.Tensor]]:
model.train()
x, y = prepare_batch(batch, device=device, non_blocking=non_blocking)
with autocast(enabled=True):
y_pred = model(x)
loss = loss_fn(y_pred, y) / gradient_accumulation_steps
if scaler:
scaler.scale(loss).backward()
if engine.state.iteration % gradient_accumulation_steps == 0:
scaler.step(optimizer)
scaler.update()
optimizer.zero_grad()
else:
loss.backward()
if engine.state.iteration % gradient_accumulation_steps == 0:
optimizer.step()
optimizer.zero_grad()
return output_transform(x, y, y_pred, loss)
return update
|
29,719 |
def _running_process_matches(handle):
"""Check whether the current process is same as of handle's
Parameters
----------
handle: ``pyvnml.nvml.LP_struct_c_nvmlDevice_t``
NVML handle to CUDA device
Returns
-------
out: bool
``True`` if device handle's has a CUDA context on the running process,
or ``False`` otherwise.
"""
init_once()
if hasattr(pynvml, "nvmlDeviceGetComputeRunningProcesses_v2"):
running_processes = pynvml.nvmlDeviceGetComputeRunningProcesses_v2(handle)
else:
running_processes = pynvml.nvmlDeviceGetComputeRunningProcesses(handle)
for proc in running_processes:
if os.getpid() == proc.pid:
return True
return False
|
def _running_process_matches(handle):
"""Check whether the current process is same as of handle's
Parameters
----------
handle : pyvnml.nvml.LP_struct_c_nvmlDevice_t
NVML handle to CUDA device
Returns
-------
out: bool
``True`` if device handle's has a CUDA context on the running process,
or ``False`` otherwise.
"""
init_once()
if hasattr(pynvml, "nvmlDeviceGetComputeRunningProcesses_v2"):
running_processes = pynvml.nvmlDeviceGetComputeRunningProcesses_v2(handle)
else:
running_processes = pynvml.nvmlDeviceGetComputeRunningProcesses(handle)
for proc in running_processes:
if os.getpid() == proc.pid:
return True
return False
|
56,682 |
def test_author_dates_match():
_atype = {'key': '/type/author'}
basic = {
'name': 'John Smith',
'death_date': '1688',
'key': '/a/OL6398451A',
'birth_date': '1650',
'type': _atype,
}
full_dates = {
'name': 'John Smith',
'death_date': '23 June 1688',
'key': '/a/OL6398452A',
'birth_date': '01 January 1650',
'type': _atype,
}
full_different = {
'name': 'John Smith',
'death_date': '12 June 1688',
'key': '/a/OL6398453A',
'birth_date': '01 December 1650',
'type': _atype,
}
no_death = {
'name': 'John Smith',
'key': '/a/OL6398454A',
'birth_date': '1650',
'type': _atype,
}
no_dates = {'name': 'John Smith', 'key': '/a/OL6398455A', 'type': _atype}
non_match = {
'name': 'John Smith',
'death_date': '1999',
'key': '/a/OL6398456A',
'birth_date': '1950',
'type': _atype,
}
different_name = {'name': 'Jane Farrier', 'key': '/a/OL6398457A', 'type': _atype}
assert author_dates_match(basic, basic)
assert author_dates_match(basic, full_dates)
assert author_dates_match(basic, no_death)
assert author_dates_match(basic, no_dates)
assert author_dates_match(no_dates, no_dates)
assert author_dates_match(
no_dates, non_match
) # Without dates, the match returns True
assert author_dates_match(
no_dates, different_name
) # This method only compares dates and ignores names
assert author_dates_match(basic, non_match) is False
# FIXME: the following should properly be False:
assert author_dates_match(
full_different, full_dates
) # this shows matches are only occurring on year, full dates are ignored!
|
def test_author_dates_match():
_atype = {'key': '/type/author'}
basic = {
'name': 'John Smith',
'death_date': '1688',
'key': '/a/OL6398451A',
'birth_date': '1650',
'type': _atype,
}
full_dates = {
'name': 'John Smith',
'death_date': '23 June 1688',
'key': '/a/OL6398452A',
'birth_date': '01 January 1650',
'type': _atype,
}
full_different = {
'name': 'John Smith',
'death_date': '12 June 1688',
'key': '/a/OL6398453A',
'birth_date': '01 December 1650',
'type': _atype,
}
no_death = {
'name': 'John Smith',
'key': '/a/OL6398454A',
'birth_date': '1650',
'type': _atype,
}
no_dates = {'name': 'John Smith', 'key': '/a/OL6398455A', 'type': _atype}
non_match = {
'name': 'John Smith',
'death_date': '1999',
'key': '/a/OL6398456A',
'birth_date': '1950',
'type': _atype,
}
different_name = {'name': 'Jane Farrier', 'key': '/a/OL6398457A', 'type': _atype}
assert author_dates_match(basic, basic)
assert author_dates_match(basic, full_dates)
assert author_dates_match(basic, no_death)
assert author_dates_match(basic, no_dates)
assert author_dates_match(no_dates, no_dates)
# Without dates, the match returns True
assert author_dates_match(no_dates, non_match)
# This method only compares dates and ignores names
assert author_dates_match(no_dates, different_name)
assert author_dates_match(basic, non_match) is False
# FIXME: the following should properly be False:
assert author_dates_match(
full_different, full_dates
) # this shows matches are only occurring on year, full dates are ignored!
|
27,467 |
def delete_registry(
service_account_json, project_id, cloud_region, registry_id):
"""Deletes the specified registry."""
# [START iot_delete_registry]
print('Delete registry')
client = iot_v1.DeviceManagerClient()
registry_path = client.registry_path(project_id, cloud_region, registry_id)
try:
response = client.delete_device_registry(registry_path)
print('Deleted registry')
return 'Registry deleted'
except HttpError:
print('Error, registry not deleted')
return 'Registry not deleted'
# [END iot_delete_registry]
|
def delete_registry(
service_account_json, project_id, cloud_region, registry_id):
"""Deletes the specified registry."""
# [START iot_delete_registry]
print('Delete registry')
client = iot_v1.DeviceManagerClient()
registry_path = client.registry_path(project_id, cloud_region, registry_id)
try:
client.delete_device_registry(registry_path)
print('Deleted registry')
return 'Registry deleted'
except HttpError:
print('Error, registry not deleted')
return 'Registry not deleted'
# [END iot_delete_registry]
|
14,779 |
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the sensor."""
name = config.get(CONF_NAME)
username = config.get(CONF_USERNAME)
password = config.get(CONF_PASSWORD)
try:
client = AtomeClient(username, password)
except PyAtomeError as exp:
_LOGGER.error(exp)
return False
# finally:
# client.close_session()
add_entities([AtomeSensor(name, client)])
return True
|
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the sensor."""
name = config.get(CONF_NAME)
username = config.get(CONF_USERNAME)
password = config[CONF_PASSWORD]
try:
client = AtomeClient(username, password)
except PyAtomeError as exp:
_LOGGER.error(exp)
return False
# finally:
# client.close_session()
add_entities([AtomeSensor(name, client)])
return True
|
10,564 |
def main():
# the command module is the one ansible module that does not take key=value args
# hence don't copy this one if you are looking to build others!
module = AnsibleModule(
argument_spec=dict(
_raw_params=dict(),
_uses_shell=dict(type='bool', default=False),
argv=dict(type='list', elements='str'),
chdir=dict(type='path'),
executable=dict(),
creates=dict(type='path'),
removes=dict(type='path'),
changes=dict(type='path'),
# The default for this really comes from the action plugin
warn=dict(type='bool', default=False, removed_in_version='2.14', removed_from_collection='ansible.builtin'),
stdin=dict(required=False),
stdin_add_newline=dict(type='bool', default=True),
strip_empty_ends=dict(type='bool', default=True),
),
supports_check_mode=True,
)
shell = module.params['_uses_shell']
chdir = module.params['chdir']
executable = module.params['executable']
args = module.params['_raw_params']
argv = module.params['argv']
creates = module.params['creates']
removes = module.params['removes']
changes = module.params['changes']
warn = module.params['warn']
stdin = module.params['stdin']
stdin_add_newline = module.params['stdin_add_newline']
strip = module.params['strip_empty_ends']
if not shell and executable:
module.warn("As of Ansible 2.4, the parameter 'executable' is no longer supported with the 'command' module. Not using '%s'." % executable)
executable = None
if (not args or args.strip() == '') and not argv:
module.fail_json(rc=256, msg="no command given")
if args and argv:
module.fail_json(rc=256, msg="only command or argv can be given, not both")
if not shell and args:
args = shlex.split(args)
args = args or argv
# All args must be strings
if is_iterable(args, include_strings=False):
args = [to_native(arg, errors='surrogate_or_strict', nonstring='simplerepr') for arg in args]
if chdir:
try:
chdir = to_bytes(os.path.abspath(chdir), errors='surrogate_or_strict')
except ValueError as e:
module.fail_json(msg='Unable to use supplied chdir: %s' % to_text(e))
try:
os.chdir(chdir)
except (IOError, OSError) as e:
module.fail_json(msg='Unable to change directory before execution: %s' % to_text(e))
if creates:
# do not run the command if the line contains creates=filename
# and the filename already exists. This allows idempotence
# of command executions.
if glob.glob(creates):
module.exit_json(
cmd=args,
stdout="skipped, since %s exists" % creates,
changed=False,
rc=0
)
if removes:
# do not run the command if the line contains removes=filename
# and the filename does not exist. This allows idempotence
# of command executions.
if not glob.glob(removes):
module.exit_json(
cmd=args,
stdout="skipped, since %s does not exist" % removes,
changed=False,
rc=0
)
if changes:
# do not run the command if the line contains changes=filename
# and the filename does not exist. This allows idempotence
# of command executions.
if not glob.glob(changes):
module.exit_json(
cmd=args,
stdout="skipped, since %s does not exist" % changes,
changed=False,
rc=0
)
else:
basehashes = []
for filename in glob.glob(changes):
basefile = open(filename, 'rb')
basedata = basefile.read(65536)
basehash = hashlib.sha1(basedata).hexdigest()
basehashes.append(basehash)
if warn:
check_command(module, args)
changed = True
startd = datetime.datetime.now()
if not module.check_mode:
rc, out, err = module.run_command(args, executable=executable, use_unsafe_shell=shell, encoding=None, data=stdin, binary_data=(not stdin_add_newline))
elif creates or removes or changes:
rc = 0
out = err = b'Command would have run if not in check mode'
else:
module.exit_json(msg="skipped, running in check mode", skipped=True)
endd = datetime.datetime.now()
delta = endd - startd
if strip:
out = out.rstrip(b"\r\n")
err = err.rstrip(b"\r\n")
if changes:
finalhashes = []
for filename in glob.glob(changes):
finalfile = open(filename, 'rb')
finaldata = finalfile.read(65536)
finalhash = hashlib.sha1(finaldata).hexdigest()
finalhashes.append(finalhash)
if finalhashes == basehashes:
changed = False
result = dict(
cmd=args,
stdout=out,
stderr=err,
rc=rc,
start=str(startd),
end=str(endd),
delta=str(delta),
changed=changed,
)
if rc != 0:
module.fail_json(msg='non-zero return code', **result)
module.exit_json(**result)
|
def main():
# the command module is the one ansible module that does not take key=value args
# hence don't copy this one if you are looking to build others!
module = AnsibleModule(
argument_spec=dict(
_raw_params=dict(),
_uses_shell=dict(type='bool', default=False),
argv=dict(type='list', elements='str'),
chdir=dict(type='path'),
executable=dict(),
creates=dict(type='path'),
removes=dict(type='path'),
changes=dict(type='path'),
# The default for this really comes from the action plugin
warn=dict(type='bool', default=False, removed_in_version='2.14', removed_from_collection='ansible.builtin'),
stdin=dict(required=False),
stdin_add_newline=dict(type='bool', default=True),
strip_empty_ends=dict(type='bool', default=True),
),
supports_check_mode=True,
)
shell = module.params['_uses_shell']
chdir = module.params['chdir']
executable = module.params['executable']
args = module.params['_raw_params']
argv = module.params['argv']
creates = module.params['creates']
removes = module.params['removes']
changes = module.params['changes']
warn = module.params['warn']
stdin = module.params['stdin']
stdin_add_newline = module.params['stdin_add_newline']
strip = module.params['strip_empty_ends']
if not shell and executable:
module.warn("As of Ansible 2.4, the parameter 'executable' is no longer supported with the 'command' module. Not using '%s'." % executable)
executable = None
if (not args or args.strip() == '') and not argv:
module.fail_json(rc=256, msg="no command given")
if args and argv:
module.fail_json(rc=256, msg="only command or argv can be given, not both")
if not shell and args:
args = shlex.split(args)
args = args or argv
# All args must be strings
if is_iterable(args, include_strings=False):
args = [to_native(arg, errors='surrogate_or_strict', nonstring='simplerepr') for arg in args]
if chdir:
try:
chdir = to_bytes(os.path.abspath(chdir), errors='surrogate_or_strict')
except ValueError as e:
module.fail_json(msg='Unable to use supplied chdir: %s' % to_text(e))
try:
os.chdir(chdir)
except (IOError, OSError) as e:
module.fail_json(msg='Unable to change directory before execution: %s' % to_text(e))
if creates:
# do not run the command if the line contains creates=filename
# and the filename already exists. This allows idempotence
# of command executions.
if glob.glob(creates):
module.exit_json(
cmd=args,
stdout="skipped, since %s exists" % creates,
changed=False,
rc=0
)
if removes:
# do not run the command if the line contains removes=filename
# and the filename does not exist. This allows idempotence
# of command executions.
if not glob.glob(removes):
module.exit_json(
cmd=args,
stdout="skipped, since %s does not exist" % removes,
changed=False,
rc=0
)
if changes:
# Do not run the command and exit now if the file does not exist
if not glob.glob(changes):
module.exit_json(
cmd=args,
stdout="skipped, since %s does not exist" % changes,
changed=False,
rc=0
)
else:
basehashes = []
for filename in glob.glob(changes):
basefile = open(filename, 'rb')
basedata = basefile.read(65536)
basehash = hashlib.sha1(basedata).hexdigest()
basehashes.append(basehash)
if warn:
check_command(module, args)
changed = True
startd = datetime.datetime.now()
if not module.check_mode:
rc, out, err = module.run_command(args, executable=executable, use_unsafe_shell=shell, encoding=None, data=stdin, binary_data=(not stdin_add_newline))
elif creates or removes or changes:
rc = 0
out = err = b'Command would have run if not in check mode'
else:
module.exit_json(msg="skipped, running in check mode", skipped=True)
endd = datetime.datetime.now()
delta = endd - startd
if strip:
out = out.rstrip(b"\r\n")
err = err.rstrip(b"\r\n")
if changes:
finalhashes = []
for filename in glob.glob(changes):
finalfile = open(filename, 'rb')
finaldata = finalfile.read(65536)
finalhash = hashlib.sha1(finaldata).hexdigest()
finalhashes.append(finalhash)
if finalhashes == basehashes:
changed = False
result = dict(
cmd=args,
stdout=out,
stderr=err,
rc=rc,
start=str(startd),
end=str(endd),
delta=str(delta),
changed=changed,
)
if rc != 0:
module.fail_json(msg='non-zero return code', **result)
module.exit_json(**result)
|
4,477 |
def get_fill_colors(cols, n_fill):
"""Get the fill colors for the middle of divergent colormaps."""
steps = np.linalg.norm(np.diff(cols[:, :3].astype(float), axis=0),
axis=1)
ind = np.flatnonzero(steps[1:-1] > steps[[0, -1]].mean() * 3)
if ind.size > 0:
# choose the two colors between which there is the large step
ind = ind[0] + 1
fillcols = np.r_[np.tile(cols[ind, :], (int(n_fill / 2), 1)),
np.tile(cols[ind + 1, :],
(n_fill - int(n_fill / 2), 1))]
else:
# choose a color from the middle of the colormap
fillcols = np.tile(cols[int(cols.shape[0] / 2), :], (n_fill, 1))
return fillcols
|
def get_fill_colors(cols, n_fill):
"""Get the fill colors for the middle of divergent colormaps."""
steps = np.linalg.norm(np.diff(cols[:, :3].astype(float), axis=0),
axis=1)
ind = np.flatnonzero(steps[1:-1] > steps[[0, -1]].mean() * 3)
if ind.size > 0:
# choose the two colors between which there is the large step
ind = ind[0] + 1
fillcols = np.r_[np.tile(cols[ind, :], (int(n_fill / 2), 1)),
np.tile(cols[ind + 1, :],
(n_fill - n_fill // 2, 1))]
else:
# choose a color from the middle of the colormap
fillcols = np.tile(cols[int(cols.shape[0] / 2), :], (n_fill, 1))
return fillcols
|
5,419 |
def _parse_interfaces(interface_files=None):
"""
Parse /etc/network/interfaces and return current configured interfaces
"""
if interface_files is None:
interface_files = []
# Add this later.
if os.path.exists(_DEB_NETWORK_DIR):
interface_files += [
"{}/{}".format(_DEB_NETWORK_DIR, dir)
for dir in os.listdir(_DEB_NETWORK_DIR)
]
if os.path.isfile(_DEB_NETWORK_FILE):
interface_files.insert(0, _DEB_NETWORK_FILE)
adapters = salt.utils.odict.OrderedDict()
method = -1
for interface_file in interface_files:
with salt.utils.files.fopen(interface_file) as interfaces:
# This ensures iface_dict exists, but does not ensure we're not reading a new interface.
iface_dict = {}
for line in interfaces:
line = salt.utils.stringutils.to_unicode(line)
# Identify the clauses by the first word of each line.
# Go to the next line if the current line is a comment
# or all spaces.
if line.lstrip().startswith("#") or line.isspace():
continue
# Parse the iface clause
if line.startswith("iface"):
sline = line.split()
if len(sline) != 4:
msg = "Interface file malformed: {0}."
msg = msg.format(sline)
log.error(msg)
raise AttributeError(msg)
iface_name = sline[1]
addrfam = sline[2]
method = sline[3]
# Create item in dict, if not already there
if iface_name not in adapters:
adapters[iface_name] = salt.utils.odict.OrderedDict()
# Create item in dict, if not already there
if "data" not in adapters[iface_name]:
adapters[iface_name]["data"] = salt.utils.odict.OrderedDict()
if addrfam not in adapters[iface_name]["data"]:
adapters[iface_name]["data"][
addrfam
] = salt.utils.odict.OrderedDict()
iface_dict = adapters[iface_name]["data"][addrfam]
iface_dict["addrfam"] = addrfam
iface_dict["proto"] = method
iface_dict["filename"] = interface_file
# Parse the detail clauses.
elif line[0].isspace():
sline = line.split()
# conf file attr: dns-nameservers
# salt states.network attr: dns
attr, valuestr = line.rstrip().split(None, 1)
if _attrmaps_contain_attr(attr):
if "-" in attr:
attrname = attr.replace("-", "_")
else:
attrname = attr
(valid, value, errmsg) = _validate_interface_option(
attr, valuestr, addrfam
)
if attrname == "address" and "address" in iface_dict:
if "addresses" not in iface_dict:
iface_dict["addresses"] = []
iface_dict["addresses"].append(value)
else:
iface_dict[attrname] = value
elif attr in _REV_ETHTOOL_CONFIG_OPTS:
if "ethtool" not in iface_dict:
iface_dict["ethtool"] = salt.utils.odict.OrderedDict()
iface_dict["ethtool"][attr] = valuestr
elif attr.startswith("bond"):
opt = re.split(r"[_-]", attr, maxsplit=1)[1]
if "bonding" not in iface_dict:
iface_dict["bonding"] = salt.utils.odict.OrderedDict()
iface_dict["bonding"][opt] = valuestr
elif attr.startswith("bridge"):
opt = re.split(r"[_-]", attr, maxsplit=1)[1]
if "bridging" not in iface_dict:
iface_dict["bridging"] = salt.utils.odict.OrderedDict()
iface_dict["bridging"][opt] = valuestr
elif attr in [
"up",
"pre-up",
"post-up",
"down",
"pre-down",
"post-down",
]:
cmd = valuestr
cmd_key = "{}_cmds".format(re.sub("-", "_", attr))
if cmd_key not in iface_dict:
iface_dict[cmd_key] = []
iface_dict[cmd_key].append(cmd)
elif line.startswith("auto"):
for word in line.split()[1:]:
if word not in adapters:
adapters[word] = salt.utils.odict.OrderedDict()
adapters[word]["enabled"] = True
elif line.startswith("allow-hotplug"):
for word in line.split()[1:]:
if word not in adapters:
adapters[word] = salt.utils.odict.OrderedDict()
adapters[word]["hotplug"] = True
elif line.startswith("source"):
if "source" not in adapters:
adapters["source"] = salt.utils.odict.OrderedDict()
# Create item in dict, if not already there
if "data" not in adapters["source"]:
adapters["source"]["data"] = salt.utils.odict.OrderedDict()
adapters["source"]["data"]["sources"] = []
adapters["source"]["data"]["sources"].append(line.split()[1])
# Return a sorted list of the keys for bond, bridge and ethtool options to
# ensure a consistent order
malformed_interfaces = set()
for iface_name in adapters:
if iface_name == "source":
continue
if "data" not in adapters[iface_name]:
msg = "Interface file malformed for interface: {}.".format(iface_name)
log.error(msg)
malformed_interfaces.add(iface_name)
continue
for opt in ["ethtool", "bonding", "bridging"]:
for inet in ["inet", "inet6"]:
if inet in adapters[iface_name]["data"]:
if opt in adapters[iface_name]["data"][inet]:
opt_keys = sorted(
adapters[iface_name]["data"][inet][opt].keys()
)
adapters[iface_name]["data"][inet][opt + "_keys"] = opt_keys
for malformed_interface in malformed_interfaces:
del adapters[malformed_interface]
return adapters
|
def _parse_interfaces(interface_files=None):
"""
Parse /etc/network/interfaces and return current configured interfaces
"""
if interface_files is None:
interface_files = []
# Add this later.
if os.path.exists(_DEB_NETWORK_DIR):
interface_files += [
"{}/{}".format(_DEB_NETWORK_DIR, dir)
for dir in os.listdir(_DEB_NETWORK_DIR)
]
if os.path.isfile(_DEB_NETWORK_FILE):
interface_files.insert(0, _DEB_NETWORK_FILE)
adapters = salt.utils.odict.OrderedDict()
method = -1
for interface_file in interface_files:
with salt.utils.files.fopen(interface_file) as interfaces:
# This ensures iface_dict exists, but does not ensure we're not reading a new interface.
iface_dict = {}
for line in interfaces:
line = salt.utils.stringutils.to_unicode(line)
# Identify the clauses by the first word of each line.
# Go to the next line if the current line is a comment
# or all spaces.
if line.lstrip().startswith("#") or line.isspace():
continue
# Parse the iface clause
if line.startswith("iface"):
sline = line.split()
if len(sline) != 4:
msg = "Interface file malformed: {0}."
msg = msg.format(sline)
log.error(msg)
raise AttributeError(msg)
iface_name = sline[1]
addrfam = sline[2]
method = sline[3]
# Create item in dict, if not already there
if iface_name not in adapters:
adapters[iface_name] = salt.utils.odict.OrderedDict()
# Create item in dict, if not already there
if "data" not in adapters[iface_name]:
adapters[iface_name]["data"] = salt.utils.odict.OrderedDict()
if addrfam not in adapters[iface_name]["data"]:
adapters[iface_name]["data"][
addrfam
] = salt.utils.odict.OrderedDict()
iface_dict = adapters[iface_name]["data"][addrfam]
iface_dict["addrfam"] = addrfam
iface_dict["proto"] = method
iface_dict["filename"] = interface_file
# Parse the detail clauses.
elif line[0].isspace():
sline = line.split()
# conf file attr: dns-nameservers
# salt states.network attr: dns
attr, valuestr = line.rstrip().split(None, 1)
if _attrmaps_contain_attr(attr):
if "-" in attr:
attrname = attr.replace("-", "_")
else:
attrname = attr
(valid, value, errmsg) = _validate_interface_option(
attr, valuestr, addrfam
)
if attrname == "address" and "address" in iface_dict:
if "addresses" not in iface_dict:
iface_dict["addresses"] = []
iface_dict["addresses"].append(value)
else:
iface_dict[attrname] = value
elif attr in _REV_ETHTOOL_CONFIG_OPTS:
if "ethtool" not in iface_dict:
iface_dict["ethtool"] = salt.utils.odict.OrderedDict()
iface_dict["ethtool"][attr] = valuestr
elif attr.startswith("bond"):
opt = re.split(r"[_-]", attr, maxsplit=1)[1]
if "bonding" not in iface_dict:
iface_dict["bonding"] = salt.utils.odict.OrderedDict()
iface_dict["bonding"][opt] = valuestr
elif attr.startswith("bridge"):
opt = re.split(r"[_-]", attr, maxsplit=1)[1]
if "bridging" not in iface_dict:
iface_dict["bridging"] = salt.utils.odict.OrderedDict()
iface_dict["bridging"][opt] = valuestr
elif attr in [
"up",
"pre-up",
"post-up",
"down",
"pre-down",
"post-down",
]:
cmd = valuestr
cmd_key = "{}_cmds".format(re.sub("-", "_", attr))
if cmd_key not in iface_dict:
iface_dict[cmd_key] = []
iface_dict[cmd_key].append(cmd)
elif line.startswith("auto"):
for word in line.split()[1:]:
if word not in adapters:
adapters[word] = salt.utils.odict.OrderedDict()
adapters[word]["enabled"] = True
elif line.startswith("allow-hotplug"):
for word in line.split()[1:]:
if word not in adapters:
adapters[word] = salt.utils.odict.OrderedDict()
adapters[word]["hotplug"] = True
elif line.startswith("source"):
if "source" not in adapters:
adapters["source"] = salt.utils.odict.OrderedDict()
# Create item in dict, if not already there
if "data" not in adapters["source"]:
adapters["source"]["data"] = salt.utils.odict.OrderedDict()
adapters["source"]["data"]["sources"] = []
adapters["source"]["data"]["sources"].append(line.split()[1])
# Return a sorted list of the keys for bond, bridge and ethtool options to
# ensure a consistent order
for iface_name in list(adapters):
if iface_name == "source":
continue
if "data" not in adapters[iface_name]:
msg = "Interface file malformed for interface: {}.".format(iface_name)
log.error(msg)
malformed_interfaces.add(iface_name)
continue
for opt in ["ethtool", "bonding", "bridging"]:
for inet in ["inet", "inet6"]:
if inet in adapters[iface_name]["data"]:
if opt in adapters[iface_name]["data"][inet]:
opt_keys = sorted(
adapters[iface_name]["data"][inet][opt].keys()
)
adapters[iface_name]["data"][inet][opt + "_keys"] = opt_keys
for malformed_interface in malformed_interfaces:
del adapters[malformed_interface]
return adapters
|
7,703 |
def check_person_link_email(event, email):
"""Check whether an email can be used in a person link.
:param event: The event
:param email: The email address
"""
email = email.lower().strip()
person = event.persons.filter_by(email=email).first()
user = get_user_by_email(email)
if person is not None:
return dict(status='warning', conflict='person-already-exists', person_name=person.name)
elif user:
return dict(status='warning', conflict='user-already-exists')
email_err = validate_email_verbose(email)
if email_err:
return dict(status='error', conflict='email-invalid', email_error=email_err)
else:
return dict(status='ok')
|
def check_person_link_email(event, email):
"""Check whether an email can be used in a person link.
:param event: The event
:param email: The email address
"""
email = email.lower().strip()
person = event.persons.filter_by(email=email).first()
user = get_user_by_email(email)
if person is not None:
return dict(status='warning', conflict='person-already-exists', person_name=person.name)
elif user:
return dict(status='warning', conflict='user-already-exists')
if email_err := validate_email_verbose(email):
return dict(status='error', conflict='email-invalid', email_error=email_err)
else:
return dict(status='ok')
|
14,693 |
def setup(hass, config):
"""Set up the Netatmo devices."""
import pyatmo
hass.data[DATA_PERSONS] = {}
try:
conf = pyatmo.ClientAuth(
config[DOMAIN][CONF_API_KEY], config[DOMAIN][CONF_SECRET_KEY],
config[DOMAIN][CONF_USERNAME], config[DOMAIN][CONF_PASSWORD],
'read_station read_camera access_camera '
'read_thermostat write_thermostat '
'read_presence access_presence read_homecoach')
except HTTPError:
_LOGGER.error("Unable to connect to Netatmo API")
return False
if config[DOMAIN][CONF_DISCOVERY]:
for component in 'camera', 'sensor', 'binary_sensor', 'climate':
discovery.load_platform(hass, component, DOMAIN, {}, config)
if config[DOMAIN][CONF_WEBHOOKS]:
webhook_id = hass.components.webhook.async_generate_id()
hass.data[
DATA_WEBHOOK_URL] = hass.components.webhook.async_generate_url(
webhook_id)
hass.components.webhook.async_register(
DOMAIN, 'Netatmo', webhook_id, handle_webhook)
conf.addwebhook(hass.data[DATA_WEBHOOK_URL])
hass.bus.listen_once(
EVENT_HOMEASSISTANT_STOP, dropwebhook)
def _service_addwebhook(service):
"""Service to (re)add webhooks during runtime."""
url = service.data.get(CONF_URL)
if url is None:
url = hass.data[DATA_WEBHOOK_URL]
_LOGGER.info("Adding webhook for URL: %s", url)
conf.addwebhook(url)
hass.services.register(
DOMAIN, SERVICE_ADDWEBHOOK, _service_addwebhook,
schema=SCHEMA_SERVICE_ADDWEBHOOK)
def _service_dropwebhook(service):
"""Service to drop webhooks during runtime."""
_LOGGER.info("Dropping webhook")
conf.dropwebhook()
hass.services.register(
DOMAIN, SERVICE_DROPWEBHOOK, _service_dropwebhook,
schema=SCHEMA_SERVICE_DROPWEBHOOK)
# Store config to be used during entry setup
hass.data[DATA_NETATMO_CONFIG] = conf
return True
|
def setup(hass, config):
"""Set up the Netatmo devices."""
import pyatmo
hass.data[DATA_PERSONS] = {}
try:
auth = pyatmo.ClientAuth(
config[DOMAIN][CONF_API_KEY], config[DOMAIN][CONF_SECRET_KEY],
config[DOMAIN][CONF_USERNAME], config[DOMAIN][CONF_PASSWORD],
'read_station read_camera access_camera '
'read_thermostat write_thermostat '
'read_presence access_presence read_homecoach')
except HTTPError:
_LOGGER.error("Unable to connect to Netatmo API")
return False
if config[DOMAIN][CONF_DISCOVERY]:
for component in 'camera', 'sensor', 'binary_sensor', 'climate':
discovery.load_platform(hass, component, DOMAIN, {}, config)
if config[DOMAIN][CONF_WEBHOOKS]:
webhook_id = hass.components.webhook.async_generate_id()
hass.data[
DATA_WEBHOOK_URL] = hass.components.webhook.async_generate_url(
webhook_id)
hass.components.webhook.async_register(
DOMAIN, 'Netatmo', webhook_id, handle_webhook)
conf.addwebhook(hass.data[DATA_WEBHOOK_URL])
hass.bus.listen_once(
EVENT_HOMEASSISTANT_STOP, dropwebhook)
def _service_addwebhook(service):
"""Service to (re)add webhooks during runtime."""
url = service.data.get(CONF_URL)
if url is None:
url = hass.data[DATA_WEBHOOK_URL]
_LOGGER.info("Adding webhook for URL: %s", url)
conf.addwebhook(url)
hass.services.register(
DOMAIN, SERVICE_ADDWEBHOOK, _service_addwebhook,
schema=SCHEMA_SERVICE_ADDWEBHOOK)
def _service_dropwebhook(service):
"""Service to drop webhooks during runtime."""
_LOGGER.info("Dropping webhook")
conf.dropwebhook()
hass.services.register(
DOMAIN, SERVICE_DROPWEBHOOK, _service_dropwebhook,
schema=SCHEMA_SERVICE_DROPWEBHOOK)
# Store config to be used during entry setup
hass.data[DATA_NETATMO_CONFIG] = conf
return True
|
32,586 |
def main() -> None:
"""
main function
"""
params: Dict[str, Any] = demisto.params()
args: Dict[str, Any] = demisto.args()
command = demisto.command()
demisto.debug(f'Command being called is {command}')
try:
requests.packages.urllib3.disable_warnings()
if isinstance(params, dict):
creds = params.get('credentials')
if isinstance(creds, dict):
api = creds.get('password')
auth_id = creds.get('identifier')
headers = {
'Authorization': f'{api}',
'x-xdr-auth-id': f'{auth_id}',
'Content-Type': 'application/json'
}
url_suffix = "/public_api/v1"
url = params['url']
add_sensitive_log_strs(api)
base_url = urljoin(url, url_suffix)
client = Client(
base_url=base_url,
verify=True,
headers=headers,
proxy=False,
auth=None)
commands = {
'asm-getexternalservices': getexternalservices_command,
'asm-getexternalservice': getexternalservice_command,
'asm-getexternalipaddressranges': getexternalipaddressranges_command,
'asm-getexternalipaddressrange': getexternalipaddressrange_command,
'asm-getassetsinternetexposure': getassetsinternetexposure_command,
'asm-getassetinternetexposure': getassetinternetexposure_command,
}
if command == 'test-module':
test_module(client)
elif command in commands:
return_results(commands[command](client, args))
else:
raise NotImplementedError(f'{command} command is not implemented.')
except Exception as e:
return_error(str(e))
|
def main() -> None:
"""
main function
"""
params: Dict[str, Any] = demisto.params()
args: Dict[str, Any] = demisto.args()
command = demisto.command()
demisto.debug(f'Command being called is {command}')
try:
requests.packages.urllib3.disable_warnings()
if isinstance(params, dict):
creds = params.get('credentials')
if isinstance(creds, dict):
api = creds.get('password')
auth_id = creds.get('identifier')
headers = {
'Authorization': f'{api}',
'x-xdr-auth-id': f'{auth_id}',
'Content-Type': 'application/json'
}
url_suffix = "/public_api/v1"
url = params.get('url', '')
add_sensitive_log_strs(api)
base_url = urljoin(url, url_suffix)
client = Client(
base_url=base_url,
verify=True,
headers=headers,
proxy=False,
auth=None)
commands = {
'asm-getexternalservices': getexternalservices_command,
'asm-getexternalservice': getexternalservice_command,
'asm-getexternalipaddressranges': getexternalipaddressranges_command,
'asm-getexternalipaddressrange': getexternalipaddressrange_command,
'asm-getassetsinternetexposure': getassetsinternetexposure_command,
'asm-getassetinternetexposure': getassetinternetexposure_command,
}
if command == 'test-module':
test_module(client)
elif command in commands:
return_results(commands[command](client, args))
else:
raise NotImplementedError(f'{command} command is not implemented.')
except Exception as e:
return_error(str(e))
|
32,621 |
def disable_user(args: dict):
"""Disable user from supported integrations.
Will not return errors on un-supported commands unless there is no supported ones.
Args:
args[approve_action]: Must be yes in order for the command to work.
args[username]: The username to disable.
Returns:
The CommandResults of all the supported commands.
"""
if not argToBoolean(args.get('approve_action', False)):
return 'approve_action must be `yes`'
username = args.get('username')
if not username:
raise ValueError('username is not specified')
command_executors = create_commands(username)
return CommandRunner.run_commands_with_summary(command_executors)
|
def disable_user(args: dict):
"""Disable user from supported integrations.
Will not return errors on un-supported commands unless there is no supported ones.
args (dict):
args[approve_action]: Must be yes in order for the command to work.
args[username]: The username to disable.
Returns:
The CommandResults of all the supported commands.
"""
if not argToBoolean(args.get('approve_action', False)):
return 'approve_action must be `yes`'
username = args.get('username')
if not username:
raise ValueError('username is not specified')
command_executors = create_commands(username)
return CommandRunner.run_commands_with_summary(command_executors)
|
13,278 |
def find_castable_builtin_for_dtype(xp, dtype: Type) -> Type[Union[bool, int, float]]:
"""Returns builtin type which can have values that are castable to the given
dtype, according to :xp-ref:`type promotion rules <type_promotion.html>`.
``float`` is always returned for floating dtypes, as opposed to ``int``.
"""
stubs = []
try:
bool_dtype = xp.bool
if dtype == bool_dtype:
return bool
except AttributeError:
stubs.append("bool")
int_dtypes, int_stubs = partition_attributes_and_stubs(xp, ALL_INT_NAMES)
if dtype in int_dtypes:
return int
float_dtypes, float_stubs = partition_attributes_and_stubs(xp, FLOAT_NAMES)
if dtype in float_dtypes:
return float
stubs.extend(int_stubs)
stubs.extend(float_stubs)
if len(stubs) > 0:
warn_on_missing_dtypes(xp, stubs)
raise InvalidArgument("dtype {dtype} not recognised in {xp}")
|
def find_castable_builtin_for_dtype(xp, dtype: Type) -> Type[Union[bool, int, float]]:
"""Returns builtin type which can have values that are castable to the given
dtype, according to :xp-ref:`type promotion rules <type_promotion.html>`.
For floating dtypes we always return ``float``, even though ``int`` is also castable.
"""
stubs = []
try:
bool_dtype = xp.bool
if dtype == bool_dtype:
return bool
except AttributeError:
stubs.append("bool")
int_dtypes, int_stubs = partition_attributes_and_stubs(xp, ALL_INT_NAMES)
if dtype in int_dtypes:
return int
float_dtypes, float_stubs = partition_attributes_and_stubs(xp, FLOAT_NAMES)
if dtype in float_dtypes:
return float
stubs.extend(int_stubs)
stubs.extend(float_stubs)
if len(stubs) > 0:
warn_on_missing_dtypes(xp, stubs)
raise InvalidArgument("dtype {dtype} not recognised in {xp}")
|
23,608 |
def singlediode(photocurrent, saturation_current, resistance_series,
resistance_shunt, nNsVth, ivcurve_pnts=None,
method='lambertw'):
r"""
Solve the single-diode model to obtain a photovoltaic IV curve.
Singlediode solves the single diode equation [1]_
.. math::
I = I_L -
I_0 \left[
\exp \left(\frac{V+I R_s}{n N_s V_{th}} \right)-1
\right] -
\frac{V + I R_s}{R_{sh}}
for :math:`I` and :math:`V` when given :math:`I_L, I_0, R_s, R_{sh},` and
:math:`n N_s V_{th}` which are described later. Returns a DataFrame
which contains the 5 points on the I-V curve specified in
SAND2004-3535 [3]_. If all :math:`I_L, I_0, R_s, R_{sh},` and
:math:`n N_s V_{th}` are scalar, a single curve will be returned, if any
are Series (of the same length), multiple IV curves will be calculated.
The input parameters can be calculated using
:py:func:`~pvlib.pvsystem.calcparams_desoto` from meteorological data.
Parameters
----------
photocurrent : numeric
Light-generated current :math:`I_L` (photocurrent) under desired
IV curve conditions. ``0 <= photocurrent``. [A]
saturation_current : numeric
Diode saturation :math:`I_0` current under desired IV curve
conditions. ``0 < saturation_current``. [A]
resistance_series : numeric
Series resistance :math:`R_s` under desired IV curve conditions.
``0 <= resistance_series < numpy.inf``. [ohms]
resistance_shunt : numeric
Shunt resistance :math:`R_{sh}` under desired IV curve conditions.
``0 < resistance_shunt <= numpy.inf``. [ohms]
nNsVth : numeric
The product of three components. 1) The usual diode ideal factor
:math:`n`, 2) the number of cells in series :math:`N_s`, and 3)
the cell thermal voltage under the desired IV curve conditions
:math:`V_{th}`. The thermal voltage of the cell (in volts) may be
calculated as :math:`k_B T_c / q`, where :math:`k_B` is
Boltzmann's constant (J/K), :math:`T_c` is the temperature of the p-n
junction in Kelvin, and :math:`q` is the charge of an electron
(coulombs). ``0 < nNsVth``. [V]
ivcurve_pnts : None or int, default None
Number of points in the desired IV curve. If None or 0, no
IV curves will be produced.
method : str, default 'lambertw'
Determines the method used to calculate points on the IV curve. The
options are ``'lambertw'``, ``'newton'``, or ``'brentq'``.
Returns
-------
OrderedDict or DataFrame
The returned dict-like object always contains the keys/columns:
* i_sc - short circuit current in amperes.
* v_oc - open circuit voltage in volts.
* i_mp - current at maximum power point in amperes.
* v_mp - voltage at maximum power point in volts.
* p_mp - power at maximum power point in watts.
* i_x - current, in amperes, at ``v = 0.5*v_oc``.
* i_xx - current, in amperes, at ``V = 0.5*(v_oc+v_mp)``.
If ivcurve_pnts is greater than 0, the output dictionary will also
include the keys:
* i - IV curve current in amperes.
* v - IV curve voltage in volts.
The output will be an OrderedDict if photocurrent is a scalar,
array, or ivcurve_pnts is not None.
The output will be a DataFrame if photocurrent is a Series and
ivcurve_pnts is None.
Notes
-----
If the method is ``'lambertw'`` then the solution employed to solve the
implicit diode equation utilizes the Lambert W function to obtain an
explicit function of :math:`V=f(I)` and :math:`I=f(V)` as shown in [2]_.
If the method is ``'newton'`` then the root-finding Newton-Raphson method
is used. It should be safe for well behaved IV-curves, but the ``'brentq'``
method is recommended for reliability.
If the method is ``'brentq'`` then Brent's bisection search method is used
that guarantees convergence by bounding the voltage between zero and
open-circuit.
If the method is either ``'newton'`` or ``'brentq'`` and ``ivcurve_pnts``
are indicated, then :func:`pvlib.singlediode.bishop88` [4]_ is used to
calculate the points on the IV curve points at diode voltages from zero to
open-circuit voltage with a log spacing that gets closer as voltage
increases. If the method is ``'lambertw'`` then the calculated points on
the IV curve are linearly spaced.
References
----------
.. [1] S.R. Wenham, M.A. Green, M.E. Watt, "Applied Photovoltaics" ISBN
0 86758 909 4
.. [2] A. Jain, A. Kapoor, "Exact analytical solutions of the
parameters of real solar cells using Lambert W-function", Solar
Energy Materials and Solar Cells, 81 (2004) 269-277.
.. [3] D. King et al, "Sandia Photovoltaic Array Performance Model",
SAND2004-3535, Sandia National Laboratories, Albuquerque, NM
.. [4] "Computer simulation of the effects of electrical mismatches in
photovoltaic cell interconnection circuits" JW Bishop, Solar Cell (1988)
https://doi.org/10.1016/0379-6787(88)90059-2
See also
--------
sapm
calcparams_desoto
pvlib.singlediode.bishop88
"""
# Calculate points on the IV curve using the LambertW solution to the
# single diode equation
if method.lower() == 'lambertw':
out = _singlediode._lambertw(
photocurrent, saturation_current, resistance_series,
resistance_shunt, nNsVth, ivcurve_pnts
)
i_sc, v_oc, i_mp, v_mp, p_mp, i_x, i_xx = out[:7]
if ivcurve_pnts:
ivcurve_i, ivcurve_v = out[7:]
else:
# Calculate points on the IV curve using either 'newton' or 'brentq'
# methods. Voltages are determined by first solving the single diode
# equation for the diode voltage V_d then backing out voltage
args = (photocurrent, saturation_current, resistance_series,
resistance_shunt, nNsVth) # collect args
v_oc = _singlediode.bishop88_v_from_i(
0.0, *args, method=method.lower()
)
i_mp, v_mp, p_mp = _singlediode.bishop88_mpp(
*args, method=method.lower()
)
i_sc = _singlediode.bishop88_i_from_v(
0.0, *args, method=method.lower()
)
i_x = _singlediode.bishop88_i_from_v(
v_oc / 2.0, *args, method=method.lower()
)
i_xx = _singlediode.bishop88_i_from_v(
(v_oc + v_mp) / 2.0, *args, method=method.lower()
)
# calculate the IV curve if requested using bishop88
if ivcurve_pnts:
vd = v_oc * (
(11.0 - np.logspace(np.log10(11.0), 0.0,
ivcurve_pnts)) / 10.0
)
ivcurve_i, ivcurve_v, _ = _singlediode.bishop88(vd, *args)
out = OrderedDict()
out['i_sc'] = i_sc
out['v_oc'] = v_oc
out['i_mp'] = i_mp
out['v_mp'] = v_mp
out['p_mp'] = p_mp
out['i_x'] = i_x
out['i_xx'] = i_xx
if ivcurve_pnts:
out['v'] = ivcurve_v
out['i'] = ivcurve_i
if isinstance(photocurrent, pd.Series) and not ivcurve_pnts:
out = pd.DataFrame(out, index=photocurrent.index)
return out
|
def singlediode(photocurrent, saturation_current, resistance_series,
resistance_shunt, nNsVth, ivcurve_pnts=None,
method='lambertw'):
r"""
Solve the single-diode model to obtain a photovoltaic IV curve.
Singlediode solves the single diode equation [1]_
.. math::
I = I_L -
I_0 \left[
\exp \left(\frac{V+I R_s}{n N_s V_{th}} \right)-1
\right] -
\frac{V + I R_s}{R_{sh}}
for :math:`I` and :math:`V` when given :math:`I_L, I_0, R_s, R_{sh},` and
:math:`n N_s V_{th}` which are described later. Returns a DataFrame
which contains the 5 points on the I-V curve specified in
SAND2004-3535 [3]_. If all :math:`I_L, I_0, R_s, R_{sh},` and
:math:`n N_s V_{th}` are scalar, a single curve is returned, if any
are Series (of the same length), multiple IV curves will be calculated.
The input parameters can be calculated using
:py:func:`~pvlib.pvsystem.calcparams_desoto` from meteorological data.
Parameters
----------
photocurrent : numeric
Light-generated current :math:`I_L` (photocurrent) under desired
IV curve conditions. ``0 <= photocurrent``. [A]
saturation_current : numeric
Diode saturation :math:`I_0` current under desired IV curve
conditions. ``0 < saturation_current``. [A]
resistance_series : numeric
Series resistance :math:`R_s` under desired IV curve conditions.
``0 <= resistance_series < numpy.inf``. [ohms]
resistance_shunt : numeric
Shunt resistance :math:`R_{sh}` under desired IV curve conditions.
``0 < resistance_shunt <= numpy.inf``. [ohms]
nNsVth : numeric
The product of three components. 1) The usual diode ideal factor
:math:`n`, 2) the number of cells in series :math:`N_s`, and 3)
the cell thermal voltage under the desired IV curve conditions
:math:`V_{th}`. The thermal voltage of the cell (in volts) may be
calculated as :math:`k_B T_c / q`, where :math:`k_B` is
Boltzmann's constant (J/K), :math:`T_c` is the temperature of the p-n
junction in Kelvin, and :math:`q` is the charge of an electron
(coulombs). ``0 < nNsVth``. [V]
ivcurve_pnts : None or int, default None
Number of points in the desired IV curve. If None or 0, no
IV curves will be produced.
method : str, default 'lambertw'
Determines the method used to calculate points on the IV curve. The
options are ``'lambertw'``, ``'newton'``, or ``'brentq'``.
Returns
-------
OrderedDict or DataFrame
The returned dict-like object always contains the keys/columns:
* i_sc - short circuit current in amperes.
* v_oc - open circuit voltage in volts.
* i_mp - current at maximum power point in amperes.
* v_mp - voltage at maximum power point in volts.
* p_mp - power at maximum power point in watts.
* i_x - current, in amperes, at ``v = 0.5*v_oc``.
* i_xx - current, in amperes, at ``V = 0.5*(v_oc+v_mp)``.
If ivcurve_pnts is greater than 0, the output dictionary will also
include the keys:
* i - IV curve current in amperes.
* v - IV curve voltage in volts.
The output will be an OrderedDict if photocurrent is a scalar,
array, or ivcurve_pnts is not None.
The output will be a DataFrame if photocurrent is a Series and
ivcurve_pnts is None.
Notes
-----
If the method is ``'lambertw'`` then the solution employed to solve the
implicit diode equation utilizes the Lambert W function to obtain an
explicit function of :math:`V=f(I)` and :math:`I=f(V)` as shown in [2]_.
If the method is ``'newton'`` then the root-finding Newton-Raphson method
is used. It should be safe for well behaved IV-curves, but the ``'brentq'``
method is recommended for reliability.
If the method is ``'brentq'`` then Brent's bisection search method is used
that guarantees convergence by bounding the voltage between zero and
open-circuit.
If the method is either ``'newton'`` or ``'brentq'`` and ``ivcurve_pnts``
are indicated, then :func:`pvlib.singlediode.bishop88` [4]_ is used to
calculate the points on the IV curve points at diode voltages from zero to
open-circuit voltage with a log spacing that gets closer as voltage
increases. If the method is ``'lambertw'`` then the calculated points on
the IV curve are linearly spaced.
References
----------
.. [1] S.R. Wenham, M.A. Green, M.E. Watt, "Applied Photovoltaics" ISBN
0 86758 909 4
.. [2] A. Jain, A. Kapoor, "Exact analytical solutions of the
parameters of real solar cells using Lambert W-function", Solar
Energy Materials and Solar Cells, 81 (2004) 269-277.
.. [3] D. King et al, "Sandia Photovoltaic Array Performance Model",
SAND2004-3535, Sandia National Laboratories, Albuquerque, NM
.. [4] "Computer simulation of the effects of electrical mismatches in
photovoltaic cell interconnection circuits" JW Bishop, Solar Cell (1988)
https://doi.org/10.1016/0379-6787(88)90059-2
See also
--------
sapm
calcparams_desoto
pvlib.singlediode.bishop88
"""
# Calculate points on the IV curve using the LambertW solution to the
# single diode equation
if method.lower() == 'lambertw':
out = _singlediode._lambertw(
photocurrent, saturation_current, resistance_series,
resistance_shunt, nNsVth, ivcurve_pnts
)
i_sc, v_oc, i_mp, v_mp, p_mp, i_x, i_xx = out[:7]
if ivcurve_pnts:
ivcurve_i, ivcurve_v = out[7:]
else:
# Calculate points on the IV curve using either 'newton' or 'brentq'
# methods. Voltages are determined by first solving the single diode
# equation for the diode voltage V_d then backing out voltage
args = (photocurrent, saturation_current, resistance_series,
resistance_shunt, nNsVth) # collect args
v_oc = _singlediode.bishop88_v_from_i(
0.0, *args, method=method.lower()
)
i_mp, v_mp, p_mp = _singlediode.bishop88_mpp(
*args, method=method.lower()
)
i_sc = _singlediode.bishop88_i_from_v(
0.0, *args, method=method.lower()
)
i_x = _singlediode.bishop88_i_from_v(
v_oc / 2.0, *args, method=method.lower()
)
i_xx = _singlediode.bishop88_i_from_v(
(v_oc + v_mp) / 2.0, *args, method=method.lower()
)
# calculate the IV curve if requested using bishop88
if ivcurve_pnts:
vd = v_oc * (
(11.0 - np.logspace(np.log10(11.0), 0.0,
ivcurve_pnts)) / 10.0
)
ivcurve_i, ivcurve_v, _ = _singlediode.bishop88(vd, *args)
out = OrderedDict()
out['i_sc'] = i_sc
out['v_oc'] = v_oc
out['i_mp'] = i_mp
out['v_mp'] = v_mp
out['p_mp'] = p_mp
out['i_x'] = i_x
out['i_xx'] = i_xx
if ivcurve_pnts:
out['v'] = ivcurve_v
out['i'] = ivcurve_i
if isinstance(photocurrent, pd.Series) and not ivcurve_pnts:
out = pd.DataFrame(out, index=photocurrent.index)
return out
|
44,307 |
def pow(base, z=1, lazy=True, do_queue=True, id=None):
"""Raise an Operator to a power.
Args:
base (~.operation.Operator): the operator to be raised to a power
z=1 (float): the exponent
Keyword Args:
lazy=True (bool): In lazy mode, all operations are wrapped in a ``Pow`` class
and handled later. If ``lazy=False``, operation-specific simplifications are first attempted.
Returns:
Operator
.. seealso:: :class:`~.Pow`, :meth:`~.Operator.pow`.
**Example**
>>> qml.pow(qml.PauliX(0), 0.5)
PauliX(wires=[0])**0.5
>>> qml.pow(qml.PauliX(0), 0.5, lazy=False)
SX(wires=[0])
>>> qml.pow(qml.PauliX(0), 0.1, lazy=False)
PauliX(wires=[0])**0.1
>>> qml.pow(qml.PauliX(0), 2, lazy=False)
Identity(wires=[0])
Lazy behavior can also be accessed via ``op ** z``.
"""
if lazy:
return Pow(base, z, do_queue=do_queue, id=id)
try:
pow_ops = base.pow(z)
except PowUndefinedError:
return Pow(base, z)
num_ops = len(pow_ops)
if num_ops == 0:
# needs to be identity (not prod of identities) so device knows to skip
pow_op = qml.Identity(base.wires[0])
elif num_ops == 1:
pow_op = pow_ops[0]
else:
pow_op = qml.prod(*pow_ops)
if do_queue:
QueuingContext.safe_update_info(base, owner=pow_op)
QueuingContext.safe_update_info(pow_op, owns=base)
return pow_op
|
def pow(base, z=1, lazy=True, do_queue=True, id=None):
"""Raise an Operator to a power.
Args:
base (~.operation.Operator): the operator to be raised to a power
z=1 (float): the exponent
Keyword Args:
lazy=True (bool): In lazy mode, all operations are wrapped in a ``Pow`` class
and handled later. If ``lazy=False``, operation-specific simplifications are first attempted.
Returns:
Operator
.. seealso:: :class:`~.Pow`, :meth:`~.Operator.pow`.
**Example**
>>> qml.pow(qml.PauliX(0), 0.5)
PauliX(wires=[0])**0.5
>>> qml.pow(qml.PauliX(0), 0.5, lazy=False)
SX(wires=[0])
>>> qml.pow(qml.PauliX(0), 0.1, lazy=False)
PauliX(wires=[0])**0.1
>>> qml.pow(qml.PauliX(0), 2, lazy=False)
Identity(wires=[0])
Lazy behavior can also be accessed via ``op ** z``.
"""
if lazy:
return Pow(base, z, do_queue=do_queue, id=id)
try:
pow_ops = base.pow(z)
except PowUndefinedError:
return Pow(base, z, do_queue=do_queue, id=id)
num_ops = len(pow_ops)
if num_ops == 0:
# needs to be identity (not prod of identities) so device knows to skip
pow_op = qml.Identity(base.wires[0])
elif num_ops == 1:
pow_op = pow_ops[0]
else:
pow_op = qml.prod(*pow_ops)
if do_queue:
QueuingContext.safe_update_info(base, owner=pow_op)
QueuingContext.safe_update_info(pow_op, owns=base)
return pow_op
|
31,312 |
def store_offset_in_context(offset: int) -> int:
integration_context = demisto.getIntegrationContext()
integration_context["offset"] = offset
demisto.setIntegrationContext(integration_context)
return offset
|
def store_offset_in_context(offset: int) -> int:
integration_context = get_integration_context()
integration_context["offset"] = offset
demisto.setIntegrationContext(integration_context)
return offset
|
40,343 |
def set_masks(model: torch.nn.Module, mask, edge_index, apply_sigmoid=True):
"""Apply mask to every graph layer in the model."""
loop_mask = edge_index[0] != edge_index[1]
# Loop over layers and set masks on MessagePassing layers
for module in model.modules():
if isinstance(module, MessagePassing):
module.__explain__ = True
module.__edge_mask__ = mask
module.__loop_mask__ = loop_mask
module.__apply_sigmoid__ = apply_sigmoid
|
def set_masks(model: torch.nn.Module, mask: Tensor, edge_index: Tensor, apply_sigmoid: bool=True):
"""Apply mask to every graph layer in the model."""
loop_mask = edge_index[0] != edge_index[1]
# Loop over layers and set masks on MessagePassing layers
for module in model.modules():
if isinstance(module, MessagePassing):
module.__explain__ = True
module.__edge_mask__ = mask
module.__loop_mask__ = loop_mask
module.__apply_sigmoid__ = apply_sigmoid
|
36,133 |
def set_log_level(_, __, value):
"""Set the log level for all loggers.
Note that we cannot use the most obvious approach of directly setting the level on the ``AIIDA_LOGGER``. The reason
is that after this callback is finished, the :meth:`aiida.common.log.configure_logging` method can be called again,
for example when the database backend is loaded, and this will undo this change. So instead, we change the value of
the `aiida.common.log.CLI_LOG_LEVEL` constant. When the logging is reconfigured, that value is no longer ``None``
which will ensure that the ``cli`` handler is configured for all handlers with the level of ``CLI_LOG_LEVEL``. This
approach tighly couples the generic :mod:`aiida.common.log` module to the :mod:`aiida.cmdline` module, which is not
the cleanest, but given that other module code can undo the logging configuration by calling that method, there
seems no easy way around this approach.
"""
from aiida.common import log
try:
log_level = value.upper()
except AttributeError:
raise click.BadParameter(f'`{value}` is not a string.')
if log_level not in LOG_LEVELS:
raise click.BadParameter(f'`{log_level}` is not a valid log level.')
log.CLI_LOG_LEVEL = log_level
# Make sure the logging is configured, even if it may be undone in the future by another call to this method.
configure_logging()
return log_level
|
def set_log_level(_, __, value):
"""Fix the log level for all loggers from the cli.
Note that we cannot use the most obvious approach of directly setting the level on the ``AIIDA_LOGGER``. The reason
is that after this callback is finished, the :meth:`aiida.common.log.configure_logging` method can be called again,
for example when the database backend is loaded, and this will undo this change. So instead, we change the value of
the `aiida.common.log.CLI_LOG_LEVEL` constant. When the logging is reconfigured, that value is no longer ``None``
which will ensure that the ``cli`` handler is configured for all handlers with the level of ``CLI_LOG_LEVEL``. This
approach tighly couples the generic :mod:`aiida.common.log` module to the :mod:`aiida.cmdline` module, which is not
the cleanest, but given that other module code can undo the logging configuration by calling that method, there
seems no easy way around this approach.
"""
from aiida.common import log
try:
log_level = value.upper()
except AttributeError:
raise click.BadParameter(f'`{value}` is not a string.')
if log_level not in LOG_LEVELS:
raise click.BadParameter(f'`{log_level}` is not a valid log level.')
log.CLI_LOG_LEVEL = log_level
# Make sure the logging is configured, even if it may be undone in the future by another call to this method.
configure_logging()
return log_level
|
57,798 |
def main():
"""
Executes an integration command
"""
LOG('Command being called is ' + demisto.command())
"""
PARSE AND VALIDATE INTEGRATION PARAMS
"""
rest_client = RestClient(
base_url=BASE_URL,
verify=VERIFY_CERT,
proxy=True
)
try:
if demisto.command() == 'test-module':
test_module(rest_client)
demisto.results('ok')
elif demisto.command() == 'fetch-incidents':
# get all tenant ids
next_run, incidents = fetch_incidents(rest_client, demisto.getLastRun())
demisto.setLastRun(next_run)
demisto.incidents(incidents)
elif demisto.command() == 'respond-close-incident':
return_outputs(close_incident_command(rest_client, demisto.args()))
elif demisto.command() == 'respond-assign-user':
return_outputs(assign_user_command(rest_client, demisto.args()))
elif demisto.command() == 'respond-remove-user':
return_outputs(remove_user_command(rest_client, demisto.args()))
elif demisto.command() == 'respond-get-incident':
return_outputs(get_incident_command(rest_client, demisto.args()))
elif demisto.command() == 'update-remote-system':
demisto.debug('in update-remote-system')
return_results(update_remote_system_command(rest_client, demisto.args()))
elif demisto.command() == 'get-mapping-fields':
demisto.debug('get-mapping-fields called')
return_results(get_mapping_fields_command())
elif demisto.command() == 'get-remote-data':
return_results(get_remote_data_command(rest_client, demisto.args()))
elif demisto.command() == 'respond-get-escalations':
return_results(get_escalations_command(rest_client, demisto.args()))
except Exception as err:
return_error(str(err))
|
def main():
"""
Executes an integration command
"""
LOG('Command being called is ' + demisto.command())
"""
PARSE AND VALIDATE INTEGRATION PARAMS
"""
rest_client = RestClient(
base_url=BASE_URL,
verify=VERIFY_CERT,
proxy=True
)
try:
if demisto.command() == 'test-module':
test_module(rest_client)
demisto.results('ok')
elif demisto.command() == 'fetch-incidents':
# get all tenant ids
next_run, incidents = fetch_incidents(rest_client, demisto.getLastRun())
demisto.setLastRun(next_run)
demisto.incidents(incidents)
elif demisto.command() == 'respond-close-incident':
return_outputs(close_incident_command(rest_client, demisto.args()))
elif demisto.command() == 'respond-assign-user':
return_outputs(assign_user_command(rest_client, demisto.args()))
elif demisto.command() == 'respond-remove-user':
return_outputs(remove_user_command(rest_client, demisto.args()))
elif demisto.command() == 'respond-get-incident':
return_outputs(get_incident_command(rest_client, demisto.args()))
elif demisto.command() == 'update-remote-system':
demisto.debug('in update-remote-system')
return_results(update_remote_system_command(rest_client, demisto.args()))
elif demisto.command() == 'get-mapping-fields':
demisto.debug('get-mapping-fields called')
return_results(get_mapping_fields_command())
elif demisto.command() == 'get-remote-data':
return_results(get_remote_data_command(rest_client, demisto.args()))
elif demisto.command() == 'respond-get-escalations':
return_results(get_escalations_command(rest_client, demisto.args()))
except Exception as err:
demisto.error(traceback.format_exc()) # print the traceback
return_error(f'Failed to execute {demisto.command()} command.\nError:\n{str(err)}')
|
31,027 |
def panorama_zone_lookup_command():
"""
Gets the outgoing interface from the Palo Alto Firewall route table, and the list of interfaces
comparing the two
"""
dest_ip = demisto.args().get("dest_ip")
vr = demisto.args().get("virtual_router", None)
route = panorama_route_lookup(dest_ip, vr)
if not route:
demisto.results(f"Could find a matching route to {dest_ip}.")
return
interface = route["interface"]
interfaces = panorama_get_interfaces()
r = {}
if "ifnet" in interfaces["response"]["result"]:
for entry in interfaces["response"]["result"]["ifnet"]["entry"]:
if entry["name"] == interface:
if "zone" in entry:
r = {**entry, **route}
if r:
demisto.results({
'Type': entryTypes['note'],
'ContentsFormat': formats['json'],
'Contents': r,
'ReadableContentsFormat': formats['markdown'],
'HumanReadable': f'The IP {dest_ip} is in zone {r["zone"]}',
'EntryContext': {"Panorama.ZoneLookup(val.Name == obj.Name)": r} # add key -> deleted: true
})
return r
else:
demisto.results(f"Could not map {dest_ip} to zone.")
return {}
|
def panorama_zone_lookup_command():
"""
Gets the outgoing interface from the Palo Alto Firewall route table, and the list of interfaces
comparing the two
"""
dest_ip = demisto.args().get("dest_ip")
vr = demisto.args().get("virtual_router", None)
route = panorama_route_lookup(dest_ip, vr)
if not route:
demisto.results(f"Could find a matching route to {dest_ip}.")
return
interface = route["interface"]
interfaces = panorama_get_interfaces()
r = {}
if "ifnet" in interfaces["response"]["result"]:
for entry in interfaces["response"]["result"]["ifnet"]["entry"]:
if entry.get("name") == interface:
if "zone" in entry:
r = {**entry, **route}
if r:
demisto.results({
'Type': entryTypes['note'],
'ContentsFormat': formats['json'],
'Contents': r,
'ReadableContentsFormat': formats['markdown'],
'HumanReadable': f'The IP {dest_ip} is in zone {r["zone"]}',
'EntryContext': {"Panorama.ZoneLookup(val.Name == obj.Name)": r} # add key -> deleted: true
})
return r
else:
demisto.results(f"Could not map {dest_ip} to zone.")
return {}
|
57,734 |
def get_user_command(client, args, mapper_out):
try:
user_profile = args.get("user-profile")
iam_user_profile = IAMUserProfile(user_profile=user_profile)
salesforce_user = iam_user_profile.map_object(mapper_name=mapper_out)
email = salesforce_user.get('email')
if not email:
raise Exception('You must provide a valid email')
user_id = get_user_if_by_mail(client, email)
if not user_id:
iam_user_profile.set_result(success=False,
error_message="User was not found",
error_code=404,
action=IAMActions.GET_USER,
)
else:
res = client.get_user_profile(user_id)
res_json = res.json()
if res.status_code == 200:
iam_user_profile.set_result(success=True,
iden=res_json.get('Id'),
email=res_json.get('Email'),
username=res_json.get('Username'),
action=IAMActions.GET_USER,
details=res_json,
active=res_json.get('IsActive'))
else:
iam_user_profile.set_result(success=False,
email=email,
error_code=res.status_code,
error_message=res_json.get('message'),
action=IAMActions.GET_USER,
details=res_json)
return iam_user_profile
except Exception as e:
iam_user_profile.set_result(success=False,
error_message=str(e),
action=IAMActions.GET_USER
)
return iam_user_profile
|
def get_user_command(client, args, mapper_out):
try:
user_profile = args.get("user-profile")
iam_user_profile = IAMUserProfile(user_profile=user_profile)
salesforce_user = iam_user_profile.map_object(mapper_name=mapper_out)
email = salesforce_user.get('email')
if not email:
raise Exception('You must provide a valid email')
user_id = get_user_if_by_mail(client, email)
if not user_id:
error_code, error_message = IAMErrors.USER_DOES_NOT_EXIST
iam_user_profile.set_result(success=False,
error_message=error_message,
error_code=error_code,
action=IAMActions.GET_USER,
)
else:
res = client.get_user_profile(user_id)
res_json = res.json()
if res.status_code == 200:
iam_user_profile.set_result(success=True,
iden=res_json.get('Id'),
email=res_json.get('Email'),
username=res_json.get('Username'),
action=IAMActions.GET_USER,
details=res_json,
active=res_json.get('IsActive'))
else:
iam_user_profile.set_result(success=False,
email=email,
error_code=res.status_code,
error_message=res_json.get('message'),
action=IAMActions.GET_USER,
details=res_json)
return iam_user_profile
except Exception as e:
iam_user_profile.set_result(success=False,
error_message=str(e),
action=IAMActions.GET_USER
)
return iam_user_profile
|
35,793 |
def list_models(module: Optional[ModuleType] = None) -> List[str]:
"""
Returns a list with the names of registered models.
Args:
module (ModuleType, optional): The module from which we want to extract the available models.
Returns:
models (list): A list with the names of available models.
"""
models = [
k for k, v in BUILTIN_MODELS.items() if module is None or v.__module__.rsplit(".", 1)[0] == module.__name__
]
return sorted(models)
|
def list_models(module: Optional[ModuleType] = None) -> List[str]:
"""
Returns a list with the names of registered models.
Args:
module (ModuleType, optional): The module from which we want to extract the available models.
Returns:
models (list): A list with the names of available models.
"""
models = [
k for k, v in BUILTIN_MODELS.items() if module is None or v.__module__ is module
]
return sorted(models)
|
34,198 |
def _guess_format(filename: Text) -> Text:
logger.warning(
"Using '_guess_format' is deprecated since Rasa 1.1.5. "
"Please us 'guess_format' instead."
)
return guess_format(filename)
|
def _guess_format(filename: Text) -> Text:
logger.warning(
"Using '_guess_format' is deprecated since Rasa 1.1.5. "
"Please use 'guess_format()' instead."
)
return guess_format(filename)
|
25,183 |
def infer_property(
node: nodes.Call, context: Optional[InferenceContext] = None
) -> objects.Property:
"""Understand `property` class
This only infers the output of `property`
call, not the arguments themselves.
"""
if len(node.args) < 1:
# Invalid property call.
raise UseInferenceDefault
getter = node.args[0]
try:
inferred = next(getter.infer(context=context))
except (InferenceError, StopIteration) as exc:
raise UseInferenceDefault from exc
if not isinstance(inferred, (nodes.FunctionDef, nodes.Lambda)):
raise UseInferenceDefault
prop_func = objects.Property(
function=inferred,
name=inferred.name,
doc=getattr(inferred, "doc_node.value", None),
lineno=node.lineno,
parent=node,
col_offset=node.col_offset,
)
prop_func.postinit(
body=[],
args=inferred.args,
doc_node=getattr(inferred, "doc_node", None),
)
return prop_func
|
def infer_property(
node: nodes.Call, context: Optional[InferenceContext] = None
) -> objects.Property:
"""Understand `property` class
This only infers the output of `property`
call, not the arguments themselves.
"""
if len(node.args) < 1:
# Invalid property call.
raise UseInferenceDefault
getter = node.args[0]
try:
inferred = next(getter.infer(context=context))
except (InferenceError, StopIteration) as exc:
raise UseInferenceDefault from exc
if not isinstance(inferred, (nodes.FunctionDef, nodes.Lambda)):
raise UseInferenceDefault
prop_func = objects.Property(
function=inferred,
name=inferred.name,
doc=getattr(inferred, "doc_node.value", None),
lineno=node.lineno,
parent=node,
col_offset=node.col_offset,
)
prop_func.postinit(
body=[],
args=inferred.args,
doc_node=inferred.doc_node
)
return prop_func
|
49,075 |
def FlorySchulz(name, p):
r"""
Create a discrete random variable with a FlorySchulz distribution.
The density of the FlorySchulz distribution is given by
.. math::
f(k) := (p^2) k (1 - p)^{k-1}
Parameters
==========
p: A probability between 0 and 1
Returns
=======
RandomSymbol
Examples
========
>>> from sympy.stats import density, E, variance, FlorySchulz
>>> from sympy import Symbol, S
>>> p = S.One / 5
>>> z = Symbol("z")
>>> X = FlorySchulz("x", p)
>>> density(X)(z)
(4/5)**(z - 1)*z/25
>>> E(X)
9
>>> variance(X)
40
References
==========
https://en.wikipedia.org/wiki/Flory%E2%80%93Schulz_distribution
"""
return rv(name, FlorySchulzDistribution, p)
|
def FlorySchulz(name, p):
r"""
Create a discrete random variable with a FlorySchulz distribution.
The density of the FlorySchulz distribution is given by
.. math::
f(k) := (p^2) k (1 - p)^{k-1}
Parameters
==========
p
A real number between 0 and 1
Returns
=======
RandomSymbol
Examples
========
>>> from sympy.stats import density, E, variance, FlorySchulz
>>> from sympy import Symbol, S
>>> p = S.One / 5
>>> z = Symbol("z")
>>> X = FlorySchulz("x", p)
>>> density(X)(z)
(4/5)**(z - 1)*z/25
>>> E(X)
9
>>> variance(X)
40
References
==========
https://en.wikipedia.org/wiki/Flory%E2%80%93Schulz_distribution
"""
return rv(name, FlorySchulzDistribution, p)
|
45,648 |
def layout():
return html.Div(id='alignment-body', children=[
html.Div([
html.Div(id='alignment-control-tabs', children=[
dcc.Tabs(
id='alignment-tabs',
children=[
dcc.Tab(
label='About',
value='alignment-tab-about',
children=html.Div(className='alignment-tab', children=[
html.H4(
"What is Alignment Viewer?"
),
html.P(
"""
The Alignment Viewer (MSA) component is used to align
multiple genomic or proteomic sequences from a FASTA or
Clustal file. Among its extensive set of features,
the multiple sequence alignment viewer can display
multiple subplots showing gap and conservation info,
alongside industry standard colorscale support and
consensus sequence. No matter what size your alignment
is, Alignment Viewer is able to display your genes or
proteins snappily thanks to the underlying WebGL
architecture powering the component. You can quickly
scroll through your long sequence with a slider or a
heatmap overview.
"""
),
html.P(
"""
Note that the AlignmentChart only returns a chart of
the sequence, while AlignmentViewer has integrated
controls for colorscale, heatmaps, and subplots allowing
the user to interactively control their sequences.
"""
),
html.P(
"""
Read more about the component here:
https://github.com/plotly/react-alignment-viewer
"""
),
])
),
dcc.Tab(
label='Data',
value='alignment-tab-select',
children=html.Div(className='alignment-tab', children=[
html.H5(
"Select preloaded dataset"
),
dcc.Dropdown(
id='alignment-dropdown',
options=[
{
'label': 'Sample.fasta',
'value': 'dataset1'
},
{
'label': 'P53.fasta naive',
'value': 'dataset2'
},
{
'label': 'P53.fasta aligned (ClustalW)',
'value': 'dataset3'
},
],
value='dataset3',
),
html.Br(),
html.H5(
"Upload your own dataset"
),
html.Div([
html.A(
html.Button(
"Download sample data",
className='alignment-button',
),
href="/assets/sample_data/p53_clustalo.fasta",
download="p53_clustalo.fasta",
)
]),
html.Div(id='alignment-file-upload-container', children=[dcc.Upload(
id='alignment-file-upload',
className='alignment-upload',
children=html.Div([
"Drag and drop FASTA files or select files."
]),
)]),
])
),
dcc.Tab(
label='Interactions',
value='alignment-tab-select2',
children=html.Div(className='alignment-tab', children=[
html.H5(
"Hover/Click/Event Data"
),
html.P('Hover or click on data to see it here.'),
html.Div(
id='alignment-events'
)
]),
),
dcc.Tab(
label='Graph',
value='alignment-tab-customize',
children=html.Div(className='alignment-tab', children=[
html.Div([
html.H3('General', className='alignment-settings-section'),
html.Div(
className='alignment-settings',
children=[
html.Div(className='alignment-setting-name',
children="Colorscale"),
dcc.Dropdown(
id='alignment-colorscale-dropdown',
className='alignment-settings-dropdown',
options=COLORSCALES_DICT,
value='clustal2',
),
html.P("Choose color theme of the viewer."),
],
),
html.Div(
className='alignment-settings',
children=[
html.Div(className='alignment-setting-name',
children='Overview'),
dcc.Dropdown(
id='alignment-overview-dropdown',
className='alignment-settings-dropdown',
options=[
{'label': 'Heatmap', 'value': 'heatmap'},
{'label': 'Slider', 'value': 'slider'},
{'label': 'None', 'value': 'none'},
],
value='heatmap',
),
html.P("Show slider, heatmap or no overview."),
],
),
html.Div(
className='alignment-settings',
children=[
html.Div(className='alignment-setting-name',
children='Consensus'),
dcc.RadioItems(
id='alignment-showconsensus-radio',
className='alignment-radio',
options=[
{'label': 'Show', 'value': True},
{'label': 'Hide', 'value': False},
],
value=True,
labelStyle={
'display': 'inline-block',
'margin-right': '8px',
},
),
html.P(
'Toggle the consensus (most frequent) sequence.'
),
],
),
html.Div(
className='alignment-settings',
children=[
html.Div(className='alignment-setting-name',
children='Text size'),
dcc.Slider(
className='alignment-slider',
id='alignment-textsize-slider',
value=10,
min=8,
max=12,
step=1,
marks={
'8': 8,
'9': 9,
'10': 10,
'11': 11,
'12': 12,
},
),
html.P(
'Adjust the font size (in px) of viewer text.'
),
],
),
]),
html.Hr(),
html.Div([
html.H3('Conservation', className='alignment-settings-section'),
html.Div(
className='alignment-settings',
children=[
html.Div(className='alignment-setting-name',
children='Barplot'),
dcc.RadioItems(
id='alignment-showconservation-radio',
className='alignment-radio',
options=[
{'label': 'Show', 'value': True},
{'label': 'Hide', 'value': False},
],
value=True,
labelStyle={
'display': 'inline-block',
'margin-right': '8px',
},
),
html.P('Show or hide the conservation barplot.')
],
),
html.Div(
className='alignment-settings',
children=[
html.Div(className='alignment-setting-name',
children='Colorscale'),
dcc.Dropdown(
id='alignment-conservationcolorscale-dropdown',
className='alignment-settings-dropdown',
options=[
{'label': col_code, 'value': col_code}
for col_code in CONSERVATION_COLORS_OPT
],
value='Viridis',
),
html.P('Change the colorscale for the '
'conservation barplot.'),
],
),
html.Div(
className='alignment-settings',
children=[
html.Div(className='alignment-setting-name',
children='Method'),
dcc.Dropdown(
id='alignment-conservationmethod-dropdown',
className='alignment-settings-dropdown',
options=[
{'label': 'Entropy',
'value': 'entropy'},
{'label': 'Conservation',
'value': 'conservation'},
],
value='entropy',
),
html.P("Conservation (MLE) or normalized entropy."),
],
),
]),
html.Hr(),
html.Div([
html.H3('Conservation gap',
className='alignment-settings-section'),
html.Div(
className='alignment-settings',
children=[
html.Div(className='alignment-setting-name',
children='Colorscale'),
dcc.RadioItems(
id='alignment-correctgap-radio',
className='alignment-radio',
options=[
{'label': 'Yes', 'value': True},
{'label': 'No', 'value': False},
],
value=True,
labelStyle={
'display': 'inline-block',
'margin-right': '8px',
},
),
html.P("Lowers conservation of high gap sequences.")
],
),
html.Div(
className='alignment-settings',
children=[
html.Div(className='alignment-setting-name',
children='Gap'),
dcc.RadioItems(
id='alignment-showgap-radio',
className='alignment-radio',
options=[
{'label': 'Show', 'value': True},
{'label': 'Hide', 'value': False},
],
value=True,
labelStyle={
'display': 'inline-block',
'margin-right': '8px',
},
),
html.P("Show/hide the gap barplot.")
],
),
html.Div(
className='alignment-settings',
children=[
html.Div(className='alignment-setting-name',
children='Color'),
dcc.Dropdown(
id='alignment-gapcolor-dropdown',
className='alignment-settings-dropdown',
options=[
{'label': col_code, 'value': col_code}
for col_code in GAP_COLORS_OPT
],
value='grey',
),
html.P('Set the color of the traces '
'that represent the gap.')
],
),
html.Div(
className='alignment-settings',
children=[
html.Div(className='alignment-setting-name',
children='Group'),
dcc.RadioItems(
id='alignment-groupbars-radio',
className='alignment-radio',
options=[
{'label': 'Yes', 'value': True},
{'label': 'No', 'value': False},
],
value=False,
labelStyle={
'display': 'inline-block',
'margin-right': '8px',
},
),
html.P('Group gap and conservation bars.')
],
),
# Conservation colorscale
# Gap color
]),
html.Hr(),
html.Div([
html.H3('Layout', className='alignment-settings-section'),
html.Div(
className='alignment-settings',
children=[
html.Div(className='alignment-setting-name',
children='Labels'),
dcc.RadioItems(
id='alignment-showlabel-radio',
className='alignment-radio',
options=[
{'label': 'Show ', 'value': True},
{'label': 'Hide ', 'value': False},
],
value=True,
labelStyle={
'display': 'inline-block',
'margin-right': '8px',
},
),
html.P(
'Show track labels on the left.'
),
],
),
html.Div(
className='alignment-settings',
children=[
html.Div(className='alignment-setting-name',
children='IDs'),
dcc.RadioItems(
id='alignment-showid-radio',
className='alignment-radio',
options=[
{'label': 'Show ', 'value': True},
{'label': 'Hide ', 'value': False},
],
value=True,
labelStyle={
'display': 'inline-block',
'margin-right': '8px',
},
),
html.P(
'Show track IDs on the left.'
)
],
),
]),
]),
),
],
),
]),
]),
html.Div([
dash_bio.AlignmentChart(
id='alignment-chart',
height=725,
data=dataset3,
),
]),
dcc.Store(id='alignment-data-store'),
])
|
def layout():
return html.Div(id='alignment-body', children=[
html.Div([
html.Div(id='alignment-control-tabs', children=[
dcc.Tabs(
id='alignment-tabs',
children=[
dcc.Tab(
label='About',
value='alignment-tab-about',
children=html.Div(className='alignment-tab', children=[
html.H4(
"What is Alignment Viewer?"
),
html.P(
"""
The Alignment Viewer (MSA) component is used to align
multiple genomic or proteomic sequences from a FASTA or
Clustal file. Among its extensive set of features,
the multiple sequence alignment viewer can display
multiple subplots showing gap and conservation info,
alongside industry standard colorscale support and
consensus sequence. No matter what size your alignment
is, Alignment Viewer is able to display your genes or
proteins snappily thanks to the underlying WebGL
architecture powering the component. You can quickly
scroll through your long sequence with a slider or a
heatmap overview.
"""
),
html.P(
"""
Note that the AlignmentChart only returns a chart of
the sequence, while AlignmentViewer has integrated
controls for colorscale, heatmaps, and subplots allowing
you to control interactively your sequences.
"""
),
html.P(
"""
Read more about the component here:
https://github.com/plotly/react-alignment-viewer
"""
),
])
),
dcc.Tab(
label='Data',
value='alignment-tab-select',
children=html.Div(className='alignment-tab', children=[
html.H5(
"Select preloaded dataset"
),
dcc.Dropdown(
id='alignment-dropdown',
options=[
{
'label': 'Sample.fasta',
'value': 'dataset1'
},
{
'label': 'P53.fasta naive',
'value': 'dataset2'
},
{
'label': 'P53.fasta aligned (ClustalW)',
'value': 'dataset3'
},
],
value='dataset3',
),
html.Br(),
html.H5(
"Upload your own dataset"
),
html.Div([
html.A(
html.Button(
"Download sample data",
className='alignment-button',
),
href="/assets/sample_data/p53_clustalo.fasta",
download="p53_clustalo.fasta",
)
]),
html.Div(id='alignment-file-upload-container', children=[dcc.Upload(
id='alignment-file-upload',
className='alignment-upload',
children=html.Div([
"Drag and drop FASTA files or select files."
]),
)]),
])
),
dcc.Tab(
label='Interactions',
value='alignment-tab-select2',
children=html.Div(className='alignment-tab', children=[
html.H5(
"Hover/Click/Event Data"
),
html.P('Hover or click on data to see it here.'),
html.Div(
id='alignment-events'
)
]),
),
dcc.Tab(
label='Graph',
value='alignment-tab-customize',
children=html.Div(className='alignment-tab', children=[
html.Div([
html.H3('General', className='alignment-settings-section'),
html.Div(
className='alignment-settings',
children=[
html.Div(className='alignment-setting-name',
children="Colorscale"),
dcc.Dropdown(
id='alignment-colorscale-dropdown',
className='alignment-settings-dropdown',
options=COLORSCALES_DICT,
value='clustal2',
),
html.P("Choose color theme of the viewer."),
],
),
html.Div(
className='alignment-settings',
children=[
html.Div(className='alignment-setting-name',
children='Overview'),
dcc.Dropdown(
id='alignment-overview-dropdown',
className='alignment-settings-dropdown',
options=[
{'label': 'Heatmap', 'value': 'heatmap'},
{'label': 'Slider', 'value': 'slider'},
{'label': 'None', 'value': 'none'},
],
value='heatmap',
),
html.P("Show slider, heatmap or no overview."),
],
),
html.Div(
className='alignment-settings',
children=[
html.Div(className='alignment-setting-name',
children='Consensus'),
dcc.RadioItems(
id='alignment-showconsensus-radio',
className='alignment-radio',
options=[
{'label': 'Show', 'value': True},
{'label': 'Hide', 'value': False},
],
value=True,
labelStyle={
'display': 'inline-block',
'margin-right': '8px',
},
),
html.P(
'Toggle the consensus (most frequent) sequence.'
),
],
),
html.Div(
className='alignment-settings',
children=[
html.Div(className='alignment-setting-name',
children='Text size'),
dcc.Slider(
className='alignment-slider',
id='alignment-textsize-slider',
value=10,
min=8,
max=12,
step=1,
marks={
'8': 8,
'9': 9,
'10': 10,
'11': 11,
'12': 12,
},
),
html.P(
'Adjust the font size (in px) of viewer text.'
),
],
),
]),
html.Hr(),
html.Div([
html.H3('Conservation', className='alignment-settings-section'),
html.Div(
className='alignment-settings',
children=[
html.Div(className='alignment-setting-name',
children='Barplot'),
dcc.RadioItems(
id='alignment-showconservation-radio',
className='alignment-radio',
options=[
{'label': 'Show', 'value': True},
{'label': 'Hide', 'value': False},
],
value=True,
labelStyle={
'display': 'inline-block',
'margin-right': '8px',
},
),
html.P('Show or hide the conservation barplot.')
],
),
html.Div(
className='alignment-settings',
children=[
html.Div(className='alignment-setting-name',
children='Colorscale'),
dcc.Dropdown(
id='alignment-conservationcolorscale-dropdown',
className='alignment-settings-dropdown',
options=[
{'label': col_code, 'value': col_code}
for col_code in CONSERVATION_COLORS_OPT
],
value='Viridis',
),
html.P('Change the colorscale for the '
'conservation barplot.'),
],
),
html.Div(
className='alignment-settings',
children=[
html.Div(className='alignment-setting-name',
children='Method'),
dcc.Dropdown(
id='alignment-conservationmethod-dropdown',
className='alignment-settings-dropdown',
options=[
{'label': 'Entropy',
'value': 'entropy'},
{'label': 'Conservation',
'value': 'conservation'},
],
value='entropy',
),
html.P("Conservation (MLE) or normalized entropy."),
],
),
]),
html.Hr(),
html.Div([
html.H3('Conservation gap',
className='alignment-settings-section'),
html.Div(
className='alignment-settings',
children=[
html.Div(className='alignment-setting-name',
children='Colorscale'),
dcc.RadioItems(
id='alignment-correctgap-radio',
className='alignment-radio',
options=[
{'label': 'Yes', 'value': True},
{'label': 'No', 'value': False},
],
value=True,
labelStyle={
'display': 'inline-block',
'margin-right': '8px',
},
),
html.P("Lowers conservation of high gap sequences.")
],
),
html.Div(
className='alignment-settings',
children=[
html.Div(className='alignment-setting-name',
children='Gap'),
dcc.RadioItems(
id='alignment-showgap-radio',
className='alignment-radio',
options=[
{'label': 'Show', 'value': True},
{'label': 'Hide', 'value': False},
],
value=True,
labelStyle={
'display': 'inline-block',
'margin-right': '8px',
},
),
html.P("Show/hide the gap barplot.")
],
),
html.Div(
className='alignment-settings',
children=[
html.Div(className='alignment-setting-name',
children='Color'),
dcc.Dropdown(
id='alignment-gapcolor-dropdown',
className='alignment-settings-dropdown',
options=[
{'label': col_code, 'value': col_code}
for col_code in GAP_COLORS_OPT
],
value='grey',
),
html.P('Set the color of the traces '
'that represent the gap.')
],
),
html.Div(
className='alignment-settings',
children=[
html.Div(className='alignment-setting-name',
children='Group'),
dcc.RadioItems(
id='alignment-groupbars-radio',
className='alignment-radio',
options=[
{'label': 'Yes', 'value': True},
{'label': 'No', 'value': False},
],
value=False,
labelStyle={
'display': 'inline-block',
'margin-right': '8px',
},
),
html.P('Group gap and conservation bars.')
],
),
# Conservation colorscale
# Gap color
]),
html.Hr(),
html.Div([
html.H3('Layout', className='alignment-settings-section'),
html.Div(
className='alignment-settings',
children=[
html.Div(className='alignment-setting-name',
children='Labels'),
dcc.RadioItems(
id='alignment-showlabel-radio',
className='alignment-radio',
options=[
{'label': 'Show ', 'value': True},
{'label': 'Hide ', 'value': False},
],
value=True,
labelStyle={
'display': 'inline-block',
'margin-right': '8px',
},
),
html.P(
'Show track labels on the left.'
),
],
),
html.Div(
className='alignment-settings',
children=[
html.Div(className='alignment-setting-name',
children='IDs'),
dcc.RadioItems(
id='alignment-showid-radio',
className='alignment-radio',
options=[
{'label': 'Show ', 'value': True},
{'label': 'Hide ', 'value': False},
],
value=True,
labelStyle={
'display': 'inline-block',
'margin-right': '8px',
},
),
html.P(
'Show track IDs on the left.'
)
],
),
]),
]),
),
],
),
]),
]),
html.Div([
dash_bio.AlignmentChart(
id='alignment-chart',
height=725,
data=dataset3,
),
]),
dcc.Store(id='alignment-data-store'),
])
|
8,813 |
def _parse_chanmodes(value):
items = value.split(',')
if len(items) < 4:
raise ValueError('Not enough channel types to unpack from %r.' % value)
# add extra channel type's modes to their own tuple
# result in (A, B, C, D, (E, F, G, H, ..., Z))
# where A, B, C, D = result[:4]
# and extras = result[4]
return tuple(items[:4]) + (tuple(items[4:]),)
|
def _parse_chanmodes(value):
items = value.split(',')
if len(items) < 4:
raise ValueError('Not enough channel types to unpack from %r.' % value)
# add extra channel mode types to their own tuple
# result in (A, B, C, D, (E, F, G, H, ..., Z))
# where A, B, C, D = result[:4]
# and extras = result[4]
return tuple(items[:4]) + (tuple(items[4:]),)
|
13,591 |
def rand_QB(A, target_rank=None, distribution='normal', oversampling=0, powerIterations=0):
"""
randomisierte QB-Zerlegung
See Algorithm 3.1 in [EMKB19]_.
Parameters
----------
A :
The |VectorArray| for which the randomized QB Decomposition is to be computed.
target_rank : int
The desired rank for the decomposition. If None rank = len(A).
distribution : str
Distribution used for the random projectionmatrix Omega. (`'normal'` or `'uniform'`)
oversampling : int
Oversamplingparameter. Number of extra columns of the projectionmatrix.
powerIterations : int
Number of power Iterations.
Returns
-------
Q :
|VectorArray| containig an approximate optimal Basis for the Image of the Inputmatrix A.
len(Q) = target_rank
B :
Numpy Array. Projection of the Input Matrix into the lower dimensional subspace.
"""
assert isinstance(A, VectorArray)
assert target_rank is None or target_rank <= len(A)
assert distribution in ('normal', 'uniform')
if A.dim == 0 or len(A) == 0:
return A.space.zeros(), np.zeros((target_rank, len(A)))
rank = len(A) if target_rank is None else target_rank + oversampling
target_rank = len(A) if target_rank is None else target_rank
Omega = np.random.normal(0, 1, (rank, len(A))) if distribution == 'normal' else np.random.rand(rank, len(A))
Y = A.lincomb(Omega)[:target_rank]
# Power Iterations
if(powerIterations > 0):
for i in range(powerIterations):
Q = gram_schmidt(Y)[:target_rank]
Z, _ = spla.qr(A.inner(Q))
Y = A.lincomb(Z)[:target_rank]
Q = gram_schmidt(Y)[:target_rank]
B = Q.inner(A)
return Q, B
|
def rand_QB(A, target_rank=None, distribution='normal', oversampling=0, powerIterations=0):
"""
randomisierte QB-Zerlegung
See Algorithm 3.1 in [EMKB19]_.
Parameters
----------
A :
The |VectorArray| for which the randomized QB Decomposition is to be computed.
target_rank : int
The desired rank for the decomposition. If None rank = len(A).
distribution : str
Distribution used for the random projectionmatrix Omega. (`'normal'` or `'uniform'`)
oversampling : int
Oversampling parameter. Number of extra columns of the sample matrix.
powerIterations : int
Number of power Iterations.
Returns
-------
Q :
|VectorArray| containig an approximate optimal Basis for the Image of the Inputmatrix A.
len(Q) = target_rank
B :
Numpy Array. Projection of the Input Matrix into the lower dimensional subspace.
"""
assert isinstance(A, VectorArray)
assert target_rank is None or target_rank <= len(A)
assert distribution in ('normal', 'uniform')
if A.dim == 0 or len(A) == 0:
return A.space.zeros(), np.zeros((target_rank, len(A)))
rank = len(A) if target_rank is None else target_rank + oversampling
target_rank = len(A) if target_rank is None else target_rank
Omega = np.random.normal(0, 1, (rank, len(A))) if distribution == 'normal' else np.random.rand(rank, len(A))
Y = A.lincomb(Omega)[:target_rank]
# Power Iterations
if(powerIterations > 0):
for i in range(powerIterations):
Q = gram_schmidt(Y)[:target_rank]
Z, _ = spla.qr(A.inner(Q))
Y = A.lincomb(Z)[:target_rank]
Q = gram_schmidt(Y)[:target_rank]
B = Q.inner(A)
return Q, B
|
558 |
def sync_usercases_if_applicable(user, spawn_task):
if settings.UNIT_TESTING and not user.project:
return
if (user.project.call_center_config.enabled or user.project.usercase_enabled):
if spawn_task:
sync_usercases_task.delay(user._id, user.project)
else:
sync_usercases_task(user._id, user.project)
|
def sync_usercases_if_applicable(user, spawn_task):
if settings.UNIT_TESTING and not user.project:
return
if (user.project.call_center_config.enabled or user.project.usercase_enabled):
if spawn_task:
sync_usercases_task.delay(user._id, user.project)
else:
sync_usercases(user, user.project)
|
33,954 |
def DistributedTrainableCreator(
func: Callable,
use_gpu: bool = False,
num_hosts: Optional[int] = None,
num_workers: Optional[int] = None,
num_cpus_per_worker: int = 1,
timeout_s: int = 30,
replicate_pem: bool = False,
) -> Type[_HorovodTrainable]:
"""Converts Horovod functions to be executable by Tune.
Requires horovod > 0.19 to work.
This function wraps and sets the resources for a given Horovod
function to be used with Tune. It generates a Horovod Trainable (trial)
which can itself be a distributed training job. One basic assumption of
this implementation is that all sub-workers
of a trial will be placed evenly across different machines.
It is recommended that if `num_hosts` per trial > 1, you set
num_workers == the size (or number of GPUs) of a single host.
If num_hosts == 1, then you can set num_workers to be <=
the size (number of GPUs) of a single host.
This above assumption can be relaxed - please file a feature request
on Github to inform the maintainers.
Another assumption is that this API requires gloo as the underlying
communication primitive. You will need to install Horovod with
`HOROVOD_WITH_GLOO` enabled.
*Fault Tolerance:* The trial workers themselves are not fault tolerant.
When a host of a trial fails, all workers of a trial are expected to
die, and the trial is expected to restart. This currently does not
support function checkpointing.
Args:
func (Callable[[dict], None]): A training function that takes in
a config dict for hyperparameters and should initialize
horovod via horovod.init.
use_gpu (bool); Whether to allocate a GPU per worker.
num_cpus_per_worker (int): Number of CPUs to request
from Ray per worker.
num_hosts (int): Number of hosts that each trial is expected
to use.
num_workers (int): Number of workers to start on each host.
timeout_s (int): Seconds for Horovod rendezvous to timeout.
replicate_pem (bool): THIS MAY BE INSECURE. If true, this will
replicate the underlying Ray cluster ssh key across all hosts.
This may be useful if using the Ray Autoscaler.
Returns:
Trainable class that can be passed into `tune.run`.
Example:
.. code-block:: python
def train(config):
horovod.init()
horovod.allreduce()
from ray.tune.integration.horovod import DistributedTrainableCreator
trainable_cls = DistributedTrainableCreator(
train, num_hosts=1, num_workers=2, use_gpu=True)
tune.run(trainable_cls)
.. versionadded:: 1.0.0
"""
ssh_identity_file = None
sshkeystr = None
if replicate_pem:
from ray.tune.cluster_info import get_ssh_key
ssh_identity_file = get_ssh_key()
if os.path.exists(ssh_identity_file):
# For now, we assume that you're on a Ray cluster.
with open(ssh_identity_file) as f:
sshkeystr = f.read()
class WrappedHorovodTrainable(_HorovodTrainable):
_function = func
_num_hosts = num_hosts
_num_workers = num_workers
_num_cpus_per_worker = num_cpus_per_worker
_use_gpu = use_gpu
_ssh_identity_file = ssh_identity_file
_ssh_str = sshkeystr
_timeout_s = timeout_s
@classmethod
def default_resource_request(cls, config: Dict):
return PlacementGroupFactory(
[{}]
+ [{"CPU": cls._num_cpus_per_worker, "GPU": int(use_gpu)}]
* (num_workers)
)
return WrappedHorovodTrainable
|
def DistributedTrainableCreator(
func: Callable,
use_gpu: bool = False,
num_hosts: Optional[int] = None,
num_workers: int = 1,
num_cpus_per_worker: int = 1,
timeout_s: int = 30,
replicate_pem: bool = False,
) -> Type[_HorovodTrainable]:
"""Converts Horovod functions to be executable by Tune.
Requires horovod > 0.19 to work.
This function wraps and sets the resources for a given Horovod
function to be used with Tune. It generates a Horovod Trainable (trial)
which can itself be a distributed training job. One basic assumption of
this implementation is that all sub-workers
of a trial will be placed evenly across different machines.
It is recommended that if `num_hosts` per trial > 1, you set
num_workers == the size (or number of GPUs) of a single host.
If num_hosts == 1, then you can set num_workers to be <=
the size (number of GPUs) of a single host.
This above assumption can be relaxed - please file a feature request
on Github to inform the maintainers.
Another assumption is that this API requires gloo as the underlying
communication primitive. You will need to install Horovod with
`HOROVOD_WITH_GLOO` enabled.
*Fault Tolerance:* The trial workers themselves are not fault tolerant.
When a host of a trial fails, all workers of a trial are expected to
die, and the trial is expected to restart. This currently does not
support function checkpointing.
Args:
func (Callable[[dict], None]): A training function that takes in
a config dict for hyperparameters and should initialize
horovod via horovod.init.
use_gpu (bool); Whether to allocate a GPU per worker.
num_cpus_per_worker (int): Number of CPUs to request
from Ray per worker.
num_hosts (int): Number of hosts that each trial is expected
to use.
num_workers (int): Number of workers to start on each host.
timeout_s (int): Seconds for Horovod rendezvous to timeout.
replicate_pem (bool): THIS MAY BE INSECURE. If true, this will
replicate the underlying Ray cluster ssh key across all hosts.
This may be useful if using the Ray Autoscaler.
Returns:
Trainable class that can be passed into `tune.run`.
Example:
.. code-block:: python
def train(config):
horovod.init()
horovod.allreduce()
from ray.tune.integration.horovod import DistributedTrainableCreator
trainable_cls = DistributedTrainableCreator(
train, num_hosts=1, num_workers=2, use_gpu=True)
tune.run(trainable_cls)
.. versionadded:: 1.0.0
"""
ssh_identity_file = None
sshkeystr = None
if replicate_pem:
from ray.tune.cluster_info import get_ssh_key
ssh_identity_file = get_ssh_key()
if os.path.exists(ssh_identity_file):
# For now, we assume that you're on a Ray cluster.
with open(ssh_identity_file) as f:
sshkeystr = f.read()
class WrappedHorovodTrainable(_HorovodTrainable):
_function = func
_num_hosts = num_hosts
_num_workers = num_workers
_num_cpus_per_worker = num_cpus_per_worker
_use_gpu = use_gpu
_ssh_identity_file = ssh_identity_file
_ssh_str = sshkeystr
_timeout_s = timeout_s
@classmethod
def default_resource_request(cls, config: Dict):
return PlacementGroupFactory(
[{}]
+ [{"CPU": cls._num_cpus_per_worker, "GPU": int(use_gpu)}]
* (num_workers)
)
return WrappedHorovodTrainable
|
30,650 |
def main():
"""
PARSE AND VALIDATE INTEGRATION PARAMS
"""
params = demisto.params()
username = params.get('credentials', {}).get('identifier', '')
password = params.get('credentials', {}).get('password', '')
base_url = params['url'][:-1] if (params['url'] and params['url'].endswith('/')) else params['url']
verify_certificate = not demisto.params().get('insecure', False)
proxy = demisto.params().get('proxy', False)
demisto.debug(f'Command being called is {demisto.command()}')
try:
client = Client(
base_url=base_url,
verify=verify_certificate,
auth=(username, password),
proxy=proxy,
ok_codes=(200, 201, 204),
headers={'accept': "application/json"}
)
if demisto.command() == 'test-module':
result = test_module(client)
return_outputs(result)
elif demisto.command() == 'guardian-search':
result = search_by_query(client, demisto.args())
return_outputs(result[0], result[1], result[2])
elif demisto.command() == 'guardian-list-all-assets':
result = list_all_assets(client)
return_outputs(result[0], result[1], result[2])
elif demisto.command() == 'guardian-find-ip-by-mac':
result = find_ip_by_mac(client, demisto.args())
return_outputs(result[0], result[1], result[2])
except Exception as e:
return_error(str(f'Failed to execute {demisto.command()} command. Error: {str(e)}'))
|
def main():
"""
PARSE AND VALIDATE INTEGRATION PARAMS
"""
params = demisto.params()
username = params.get('credentials', {}).get('identifier', '')
password = params.get('credentials', {}).get('password', '')
base_url = params['url'][:-1] if (params['url'] and params['url'].endswith('/')) else params['url']
verify_certificate = not params.get('insecure', False)
proxy = demisto.params().get('proxy', False)
demisto.debug(f'Command being called is {demisto.command()}')
try:
client = Client(
base_url=base_url,
verify=verify_certificate,
auth=(username, password),
proxy=proxy,
ok_codes=(200, 201, 204),
headers={'accept': "application/json"}
)
if demisto.command() == 'test-module':
result = test_module(client)
return_outputs(result)
elif demisto.command() == 'guardian-search':
result = search_by_query(client, demisto.args())
return_outputs(result[0], result[1], result[2])
elif demisto.command() == 'guardian-list-all-assets':
result = list_all_assets(client)
return_outputs(result[0], result[1], result[2])
elif demisto.command() == 'guardian-find-ip-by-mac':
result = find_ip_by_mac(client, demisto.args())
return_outputs(result[0], result[1], result[2])
except Exception as e:
return_error(str(f'Failed to execute {demisto.command()} command. Error: {str(e)}'))
|
22,569 |
def _get_valid_input_index(source_geo_def,
target_geo_def,
reduce_data,
radius_of_influence,
nprocs=1):
"""Find indices of reduced inputput data."""
source_lons, source_lats = source_geo_def.get_lonlats(nprocs=nprocs)
source_lons = np.asanyarray(source_lons).ravel()
source_lats = np.asanyarray(source_lats).ravel()
if source_lons.size == 0 or source_lats.size == 0:
raise ValueError('Cannot resample empty data set')
elif source_lons.size != source_lats.size or \
source_lons.shape != source_lats.shape:
raise ValueError('Mismatch between lons and lats')
# Remove illegal values
valid_input_index = ((source_lons >= -180) & (source_lons <= 180) & (source_lats <= 90) & (source_lats >= -90))
if reduce_data:
# Reduce dataset
griddish_types = (geometry.GridDefinition, geometry.AreaDefinition)
source_is_griddish = isinstance(source_geo_def, griddish_types)
target_is_griddish = isinstance(target_geo_def, griddish_types)
source_is_coord = isinstance(source_geo_def, geometry.CoordinateDefinition)
if (source_is_coord and target_is_griddish) or (source_is_griddish and target_is_griddish):
# Resampling from swath to grid or from grid to grid
lonlat_boundary = target_geo_def.get_boundary_lonlats()
# Combine reduced and legal values
valid_input_index &= \
data_reduce.get_valid_index_from_lonlat_boundaries(
lonlat_boundary[0],
lonlat_boundary[1],
source_lons, source_lats,
radius_of_influence)
if isinstance(valid_input_index, np.ma.core.MaskedArray):
# Make sure valid_input_index is not a masked array
valid_input_index = valid_input_index.filled(False)
return valid_input_index, source_lons, source_lats
|
def _get_valid_input_index(source_geo_def,
target_geo_def,
reduce_data,
radius_of_influence,
nprocs=1):
"""Find indices of reduced inputput data."""
source_lons, source_lats = source_geo_def.get_lonlats(nprocs=nprocs)
source_lons = np.asanyarray(source_lons).ravel()
source_lats = np.asanyarray(source_lats).ravel()
if source_lons.size == 0 or source_lats.size == 0:
raise ValueError('Cannot resample empty data set')
elif source_lons.size != source_lats.size or \
source_lons.shape != source_lats.shape:
raise ValueError('Mismatch between lons and lats')
# Remove illegal values
valid_input_index = ((source_lons >= -180) & (source_lons <= 180) & (source_lats <= 90) & (source_lats >= -90))
if reduce_data:
# Reduce dataset
griddish_types = (geometry.GridDefinition, geometry.AreaDefinition)
source_is_griddish = isinstance(source_geo_def, griddish_types)
target_is_griddish = isinstance(target_geo_def, griddish_types)
source_is_coord = isinstance(source_geo_def, geometry.CoordinateDefinition)
if (source_is_coord or source_is_griddish) and target_is_griddish:
# Resampling from swath to grid or from grid to grid
lonlat_boundary = target_geo_def.get_boundary_lonlats()
# Combine reduced and legal values
valid_input_index &= \
data_reduce.get_valid_index_from_lonlat_boundaries(
lonlat_boundary[0],
lonlat_boundary[1],
source_lons, source_lats,
radius_of_influence)
if isinstance(valid_input_index, np.ma.core.MaskedArray):
# Make sure valid_input_index is not a masked array
valid_input_index = valid_input_index.filled(False)
return valid_input_index, source_lons, source_lats
|
36,248 |
def sam(
adata: AnnData,
max_iter: int = 10,
num_norm_avg: int = 50,
k: int = 20,
distance: str = 'correlation',
standardization: Optional[str] = 'Normalizer',
weight_pcs: bool = True,
npcs: Optional[int] = None,
n_genes: Optional[int] = None,
projection: Optional[str] = 'umap',
inplace: bool = True,
verbose: bool = True,
) -> Optional[AnnData]:
"""Self-Assembling Manifolds single-cell RNA sequencing analysis tool.
SAM iteratively rescales the input gene expression matrix to emphasize
genes that are spatially variable along the intrinsic manifold of the data.
It outputs the gene weights, nearest neighbor matrix, and a 2D projection.
The AnnData input should contain unstandardized, non-negative values.
Preferably, the data should be log-normalized and no genes should be filtered out.
Parameters
----------
k - int, optional, default 20
The number of nearest neighbors to identify for each cell.
distance : string, optional, default 'correlation'
The distance metric to use when identifying nearest neighbors.
Can be any of the distance metrics supported by sklearn's 'pdist'.
max_iter - int, optional, default 10
The maximum number of iterations SAM will run.
projection - str, optional, default 'umap'
If 'tsne', generates a t-SNE embedding. If 'umap', generates a UMAP
embedding. Otherwise, no embedding will be generated.
standardization - str, optional, default 'Normalizer'
If 'Normalizer', use sklearn.preprocessing.Normalizer, which
normalizes expression data prior to PCA such that each cell has
unit L2 norm. If 'StandardScaler', use
sklearn.preprocessing.StandardScaler, which normalizes expression
data prior to PCA such that each gene has zero mean and unit
variance. Otherwise, do not normalize the expression data. We
recommend using 'StandardScaler' for large datasets with many
expected cell types and 'Normalizer' otherwise.
num_norm_avg - int, optional, default 50
The top 'num_norm_avg' dispersions are averaged to determine the
normalization factor when calculating the weights. This prevents
genes with large spatial dispersions from skewing the distribution
of weights.
weight_pcs - bool, optional, default True
If True, scale the principal components by their eigenvalues. In
datasets with many expected cell types, setting this to False might
improve the resolution as these cell types might be encoded by low-
variance principal components.
npcs - int, optional, default None,
Determines the number of top principal components selected at each
iteration of the SAM algorithm. If None, this number is chosen
automatically based on the size of the dataset. If weight_pcs is
set to True, this parameter primarily affects the runtime of the SAM
algorithm (more PCs = longer runtime).
n_genes - int, optional, default None:
Determines the number of top SAM-weighted genes to use at each iteration
of the SAM algorithm. If None, this number is chosen automatically
based on the size of the dataset. This parameter primarily affects
the runtime of the SAM algorithm (more genes = longer runtime).
inplace - bool, optional, default True:
Set fields in `adata` if True. Otherwise, returns a copy.
verbose - bool, optional, default True:
If True, displays SAM log statements.
Returns
-------
sam - SAM
The SAM object
adata - AnnData
`.var['weights']`
SAM weights for each gene.
`.var['spatial_dispersions']`
Spatial dispersions for each gene (these are used to compute the
SAM weights)
`.var['mask_genes']`
If preprocessed with SAM, this boolean vector indicates which genes
were filtered out (=False).
`.uns['preprocess_args']`
Dictionary of parameters used for preprocessing.
`.uns['run_args']`
Dictionary of parameters used for running SAM.
`.uns['pca_obj']`
The sklearn.decomposition.PCA object.
`.uns['X_processed']`
The standardized and SAM-weighted data fed into PCA.
`.uns['neighbors']`
A dictionary with key 'connectivities' containing the kNN adjacency
matrix output by SAM. If built-in scanpy dimensionality reduction
methods are to be used using the SAM-output AnnData, users
should recompute the neighbors using `.obs['X_pca']` with
`scanpy.pp.neighbors`.
`.uns['ranked_genes']`
Gene IDs ranked in descending order by their SAM weights.
`.obsm['X_pca']`
The principal components output by SAM.
`.obsm['X_umap']`
The UMAP projection output by SAM.
`.layers['X_disp']`
The expression matrix used for nearest-neighbor averaging.
`.layers['X_knn_avg']`
The nearest-neighbor-averaged expression data used for computing the
spatial dispersions of genes.
Example
-------
>>> import scanpy.external as sce
>>> import scanpy as sc
*** Running SAM ***
Assuming we are given an AnnData object called `adata`, we can run the SAM
algorithm as follows:
>>> sam,adata = sce.tl.SAM(adata,inplace=True)
The input AnnData object should contain unstandardized, non-negative
expression values. Preferably, the data should be log-normalized and no
genes should be filtered out.
Please see the documentation for a description of all available parameters.
For more detailed tutorials, please visit the original Github repository:
https://github.com/atarashansky/self-assembling-manifold/tree/master/tutorial
*** Plotting ***
To visualize the output, we can use the built-in `scatter` function (this
assumes that `matplotlib` is installed.)
>>> sam.scatter(projection = 'X_umap')
`scatter` accepts all keyword arguments used in the
`matplotlib.pyplot.scatter` function. Please visit the plotting tutorials
for more information:
https://github.com/atarashansky/self-assembling-manifold/tree/master/tutorial/SAM_Plotting
*** SAMGUI ***
SAM comes with the SAMGUI module, a graphical-user interface written with
`Plotly` and `ipythonwidgets` for interactively exploring and annotating
the scRNAseq data and running SAM.
Dependencies can be installed with Anaconda by following the instructions in
the self-assembling-manifold Github README:
https://github.com/atarashansky/self-assembling-manifold
In a Jupyter notebook, execute the following to launch the interface:
>>> from SAMGUI import SAMGUI
>>> sam_gui = SAMGUI(sam) # sam is your SAM object
>>> sam_gui.SamPlot
This can also be enabled in Jupyer Lab by following the instructions in the
self-assembling-manifold README.
"""
logg.info('Self-assembling manifold')
try:
from SAM import SAM
except ImportError:
raise ImportError(
'\nplease install sam-algorithm: \n\n'
'\tgit clone git://github.com/atarashansky/self-assembling-manifold.git\n'
'\tcd self-assembling-manifold\n'
'\tpip install .'
)
s = SAM(counts=adata, inplace=inplace)
logg.info('Running SAM')
s.run(
max_iter=max_iter,
num_norm_avg=num_norm_avg,
k=k,
distance=distance,
preprocessing=standardization,
weight_PCs=weight_pcs,
npcs=npcs,
n_genes=n_genes,
projection=projection,
verbose=verbose,
)
return (s, adata) if inplace else (s, s.adata)
|
def sam(
adata: AnnData,
max_iter: int = 10,
num_norm_avg: int = 50,
k: int = 20,
distance: str = 'correlation',
standardization: Optional[str] = 'Normalizer',
weight_pcs: bool = True,
npcs: Optional[int] = None,
n_genes: Optional[int] = None,
projection: Optional[str] = 'umap',
inplace: bool = True,
verbose: bool = True,
) -> Optional[AnnData]:
"""Self-Assembling Manifolds single-cell RNA sequencing analysis tool.
SAM iteratively rescales the input gene expression matrix to emphasize
genes that are spatially variable along the intrinsic manifold of the data.
It outputs the gene weights, nearest neighbor matrix, and a 2D projection.
The AnnData input should contain unstandardized, non-negative values.
Preferably, the data should be log-normalized and no genes should be filtered out.
Parameters
----------
k - int, optional, default 20
The number of nearest neighbors to identify for each cell.
distance : string, optional, default 'correlation'
The distance metric to use when identifying nearest neighbors.
Can be any of the distance metrics supported by sklearn's 'pdist'.
max_iter - int, optional, default 10
The maximum number of iterations SAM will run.
projection - str, optional, default 'umap'
If 'tsne', generates a t-SNE embedding. If 'umap', generates a UMAP
embedding. Otherwise, no embedding will be generated.
standardization - str, optional, default 'Normalizer'
If 'Normalizer', use sklearn.preprocessing.Normalizer, which
normalizes expression data prior to PCA such that each cell has
unit L2 norm. If 'StandardScaler', use
sklearn.preprocessing.StandardScaler, which normalizes expression
data prior to PCA such that each gene has zero mean and unit
variance. Otherwise, do not normalize the expression data. We
recommend using 'StandardScaler' for large datasets with many
expected cell types and 'Normalizer' otherwise.
num_norm_avg - int, optional, default 50
The top 'num_norm_avg' dispersions are averaged to determine the
normalization factor when calculating the weights. This prevents
genes with large spatial dispersions from skewing the distribution
of weights.
weight_pcs - bool, optional, default True
If True, scale the principal components by their eigenvalues. In
datasets with many expected cell types, setting this to False might
improve the resolution as these cell types might be encoded by low-
variance principal components.
npcs - int, optional, default None,
Determines the number of top principal components selected at each
iteration of the SAM algorithm. If None, this number is chosen
automatically based on the size of the dataset. If weight_pcs is
set to True, this parameter primarily affects the runtime of the SAM
algorithm (more PCs = longer runtime).
n_genes - int, optional, default None:
Determines the number of top SAM-weighted genes to use at each iteration
of the SAM algorithm. If None, this number is chosen automatically
based on the size of the dataset. This parameter primarily affects
the runtime of the SAM algorithm (more genes = longer runtime).
inplace - bool, optional, default True:
Set fields in `adata` if True. Otherwise, returns a copy.
verbose - bool, optional, default True:
If True, displays SAM log statements.
Returns
-------
sam - SAM
The SAM object
adata - AnnData
`.var['weights']`
SAM weights for each gene.
`.var['spatial_dispersions']`
Spatial dispersions for each gene (these are used to compute the
SAM weights)
`.var['mask_genes']`
If preprocessed with SAM, this boolean vector indicates which genes
were filtered out (=False).
`.uns['preprocess_args']`
Dictionary of parameters used for preprocessing.
`.uns['run_args']`
Dictionary of parameters used for running SAM.
`.uns['pca_obj']`
The sklearn.decomposition.PCA object.
`.uns['X_processed']`
The standardized and SAM-weighted data fed into PCA.
`.uns['neighbors']`
A dictionary with key 'connectivities' containing the kNN adjacency
matrix output by SAM. If built-in scanpy dimensionality reduction
methods are to be used using the SAM-output AnnData, users
should recompute the neighbors using `.obs['X_pca']` with
`scanpy.pp.neighbors`.
`.uns['ranked_genes']`
Gene IDs ranked in descending order by their SAM weights.
`.obsm['X_pca']`
The principal components output by SAM.
`.obsm['X_umap']`
The UMAP projection output by SAM.
`.layers['X_disp']`
The expression matrix used for nearest-neighbor averaging.
`.layers['X_knn_avg']`
The nearest-neighbor-averaged expression data used for computing the
spatial dispersions of genes.
Example
-------
>>> import scanpy.external as sce
>>> import scanpy as sc
*** Running SAM ***
Assuming we are given an AnnData object called `adata`, we can run the SAM
algorithm as follows:
>>> sam,adata = sce.tl.SAM(adata,inplace=True)
The input AnnData object should contain unstandardized, non-negative
expression values. Preferably, the data should be log-normalized and no
genes should be filtered out.
Please see the documentation for a description of all available parameters.
For more detailed tutorials, please visit the original Github repository:
https://github.com/atarashansky/self-assembling-manifold/tree/master/tutorial
*** Plotting ***
To visualize the output, we can use the built-in `scatter` function (this
assumes that `matplotlib` is installed.)
>>> sam.scatter(projection = 'X_umap')
`scatter` accepts all keyword arguments used in the
`matplotlib.pyplot.scatter` function. Please visit the plotting tutorials
for more information:
https://github.com/atarashansky/self-assembling-manifold/tree/master/tutorial/SAM_Plotting
*** SAMGUI ***
SAM comes with the SAMGUI module, a graphical-user interface written with
`Plotly` and `ipythonwidgets` for interactively exploring and annotating
the scRNAseq data and running SAM.
Dependencies can be installed with Anaconda by following the instructions in
the self-assembling-manifold Github README:
https://github.com/atarashansky/self-assembling-manifold
In a Jupyter notebook, execute the following to launch the interface:
>>> from SAMGUI import SAMGUI
>>> sam_gui = SAMGUI(sam) # sam is your SAM object
>>> sam_gui.SamPlot
This can also be enabled in Jupyer Lab by following the instructions in the
self-assembling-manifold README.
"""
logg.info('Self-assembling manifold')
try:
from SAM import SAM
except ImportError:
raise ImportError(
'\nplease install sam-algorithm: \n\n'
'\tgit clone git://github.com/atarashansky/self-assembling-manifold.git\n'
'\tcd self-assembling-manifold\n'
'\tpip install .'
)
s = SAM(counts=adata, inplace=inplace)
logg.info('Running SAM')
s.run(
max_iter=max_iter,
num_norm_avg=num_norm_avg,
k=k,
distance=distance,
preprocessing=standardization,
weight_PCs=weight_pcs,
npcs=npcs,
n_genes=n_genes,
projection=projection,
verbose=verbose,
)
return s if inplace else s.adata
|
34,564 |
def migrate_mapping_policy_to_rules(
config: Dict[Text, Any], domain: "Domain"
) -> Tuple[Dict[Text, Any], "Domain", List["StoryStep"]]:
"""
Migrate MappingPolicy to the new RulePolicy,
by updating the config, domain and generating rules.
This function modifies the config, the domain and the rules in place.
"""
policies = config.get("policies", [])
has_mapping_policy = False
has_rule_policy = False
for policy in policies:
if policy.get("name") == MappingPolicy.__name__:
has_mapping_policy = True
if policy.get("name") == RulePolicy.__name__:
has_rule_policy = True
if not has_mapping_policy:
return config, domain, []
new_config = copy.deepcopy(config)
new_domain = copy.deepcopy(domain)
new_rules = []
for intent, properties in new_domain.intent_properties.items():
# remove triggers from intents, if any
triggered_action = properties.pop("triggers", None)
if triggered_action:
trigger_rule = _get_faq_rule(
f"Rule to map `{intent}` intent to "
f"`{triggered_action}` (automatic conversion)",
intent,
triggered_action,
)
new_rules.append(*trigger_rule)
# finally update the policies
policies = _drop_policy(MappingPolicy.__name__, policies)
if new_rules and not has_rule_policy:
policies.append({"name": RulePolicy.__name__})
new_config["policies"] = policies
return new_config, new_domain, new_rules
|
def migrate_mapping_policy_to_rules(
config: Dict[Text, Any], domain: "Domain"
) -> Tuple[Dict[Text, Any], "Domain", List["StoryStep"]]:
"""
Migrate MappingPolicy to the new RulePolicy,
by updating the config, domain and generating rules.
This function modifies the config, the domain and the rules in place.
"""
policies = config.get("policies", [])
has_mapping_policy = False
has_rule_policy = False
for policy in policies:
if policy.get("name") == MappingPolicy.__name__:
has_mapping_policy = True
if policy.get("name") == RulePolicy.__name__:
has_rule_policy = True
if not has_mapping_policy:
return config, domain, []
new_config = copy.deepcopy(config)
new_domain = copy.deepcopy(domain)
new_rules = []
for intent, properties in new_domain.intent_properties.items():
# remove triggers from intents, if any
triggered_action = properties.pop("triggers", None)
if triggered_action:
trigger_rule = _get_faq_rule(
f"Rule to map `{intent}` intent to "
f"`{triggered_action}` (automatic conversion)",
intent,
triggered_action,
)
new_rules.extend(trigger_rules)
# finally update the policies
policies = _drop_policy(MappingPolicy.__name__, policies)
if new_rules and not has_rule_policy:
policies.append({"name": RulePolicy.__name__})
new_config["policies"] = policies
return new_config, new_domain, new_rules
|
20,012 |
def analyze_nir_intensity(gray_img, mask, bins=256, histplot=False):
"""This function calculates the intensity of each pixel associated with the plant and writes the values out to
a file. It can also print out a histogram plot of pixel intensity and a pseudocolor image of the plant.
Inputs:
gray_img = 8- or 16-bit grayscale image data
mask = Binary mask made from selected contours
bins = number of classes to divide spectrum into
histplot = if True plots histogram of intensity values
Returns:
analysis_images = NIR histogram image
:param gray_img: numpy array
:param mask: numpy array
:param bins: int
:param histplot: bool
:return analysis_images: plotnine ggplot
"""
# apply plant shaped mask to image
mask1 = binary_threshold(mask, 0, 255, 'light')
mask1 = (mask1 / 255)
# masked = np.multiply(gray_img, mask1)
# calculate histogram
if gray_img.dtype == 'uint16':
maxval = 65536
else:
maxval = 256
masked_array = gray_img[np.where(mask > 0)]
masked_nir_mean = np.average(masked_array)
masked_nir_median = np.median(masked_array)
masked_nir_std = np.std(masked_array)
# Make a pseudo-RGB image
rgbimg = cv2.cvtColor(gray_img, cv2.COLOR_GRAY2BGR)
# Calculate histogram
hist_nir = [float(l[0]) for l in cv2.calcHist([gray_img], [0], mask, [bins], [0, maxval])]
# Create list of bin labels
bin_width = maxval / float(bins)
b = 0
bin_labels = [float(b)]
for i in range(bins - 1):
b += bin_width
bin_labels.append(b)
# make hist percentage for plotting
pixels = cv2.countNonZero(mask1)
hist_percent = [(p / float(pixels)) * 100 for p in hist_nir]
masked1 = cv2.bitwise_and(rgbimg, rgbimg, mask=mask)
if params.debug is not None:
params.device += 1
if params.debug == "print":
print_image(masked1, os.path.join(params.debug_outdir, str(params.device) + "_masked_nir_plant.png"))
if params.debug == "plot":
plot_image(masked1)
analysis_image = None
if histplot is True:
hist_x = hist_percent
# bin_labels = np.arange(0, bins)
dataset = pd.DataFrame({'Grayscale pixel intensity': bin_labels,
'Proportion of pixels (%)': hist_x})
fig_hist = (ggplot(data=dataset,
mapping=aes(x='Grayscale pixel intensity',
y='Proportion of pixels (%)'))
+ geom_line(color='red')
+ scale_x_continuous(breaks=list(range(0, maxval, 25))))
analysis_image = fig_hist
if params.debug == "print":
fig_hist.save(os.path.join(params.debug_outdir, str(params.device) + '_nir_hist.png'))
elif params.debug == "plot":
print(fig_hist)
outputs.add_observation(variable='nir_frequencies', trait='near-infrared frequencies',
method='plantcv.plantcv.analyze_nir_intensity', scale='frequency', datatype=list,
value=hist_nir, label=bin_labels)
outputs.add_observation(variable='nir_mean', trait='near-infrared mean',
method='plantcv.plantcv.analyze_nir_intensity', scale='none', datatype=float,
value=masked_nir_mean, label='none')
outputs.add_observation(variable='nir_median', trait='near-infrared median',
method='plantcv.plantcv.analyze_nir_intensity', scale='frequency', datatype=float,
value=masked_nir_median, label='none')
outputs.add_observation(variable='nir_stdev', trait='near-infrared standard deviation',
method='plantcv.plantcv.analyze_nir_intensity', scale='frequency', datatype=float,
value=masked_nir_std, label='none')
# Store images
outputs.images.append(analysis_image)
return analysis_image
|
def analyze_nir_intensity(gray_img, mask, bins=256, histplot=False):
"""This function calculates the intensity of each pixel associated with the plant and writes the values out to
a file. It can also print out a histogram plot of pixel intensity and a pseudocolor image of the plant.
Inputs:
gray_img = 8- or 16-bit grayscale image data
mask = Binary mask made from selected contours
bins = number of classes to divide spectrum into
histplot = if True plots histogram of intensity values
Returns:
analysis_images = NIR histogram image
:param gray_img: numpy array
:param mask: numpy array
:param bins: int
:param histplot: bool
:return analysis_images: plotnine ggplot
"""
# apply plant shaped mask to image
mask1 = binary_threshold(mask, 0, 255, 'light')
mask1 = (mask1 / 255)
# masked = np.multiply(gray_img, mask1)
# calculate histogram
if gray_img.dtype == 'uint16':
maxval = 65536
else:
maxval = 256
masked_array = gray_img[np.where(mask > 0)]
masked_nir_mean = np.average(masked_array)
masked_nir_median = np.median(masked_array)
masked_nir_std = np.std(masked_array)
# Make a pseudo-RGB image
rgbimg = cv2.cvtColor(gray_img, cv2.COLOR_GRAY2BGR)
# Calculate histogram
hist_nir = [float(l[0]) for l in cv2.calcHist([gray_img], [0], mask, [bins], [0, maxval])]
# Create list of bin labels
bin_width = maxval / float(bins)
b = 0
bin_labels = [float(b)]
for i in range(bins - 1):
b += bin_width
bin_labels.append(b)
# make hist percentage for plotting
pixels = cv2.countNonZero(mask1)
hist_percent = [(p / float(pixels)) * 100 for p in hist_nir]
masked1 = cv2.bitwise_and(rgbimg, rgbimg, mask=mask)
if params.debug is not None:
params.device += 1
if params.debug == "print":
print_image(masked1, os.path.join(params.debug_outdir, str(params.device) + "_masked_nir_plant.png"))
if params.debug == "plot":
plot_image(masked1)
analysis_image = None
if histplot is True:
hist_x = hist_percent
# bin_labels = np.arange(0, bins)
dataset = pd.DataFrame({'Grayscale pixel intensity': bin_labels,
'Proportion of pixels (%)': hist_x})
fig_hist = (ggplot(data=dataset,
mapping=aes(x='Grayscale pixel intensity',
y='Proportion of pixels (%)'))
+ geom_line(color='red')
+ scale_x_continuous(breaks=list(range(0, maxval, 25))))
analysis_image = fig_hist
if params.debug == "print":
fig_hist.save(os.path.join(params.debug_outdir, str(params.device) + '_nir_hist.png'))
elif params.debug == "plot":
print(fig_hist)
outputs.add_observation(variable='nir_frequencies', trait='near-infrared frequencies',
method='plantcv.plantcv.analyze_nir_intensity', scale='frequency', datatype=list,
value=hist_nir, label=bin_labels)
outputs.add_observation(variable='nir_mean', trait='near-infrared mean',
method='plantcv.plantcv.analyze_nir_intensity', scale='none', datatype=float,
value=masked_nir_mean, label='none')
outputs.add_observation(variable='nir_median', trait='near-infrared median',
method='plantcv.plantcv.analyze_nir_intensity', scale='frequency', datatype=float,
value=masked_nir_median, label='none')
outputs.add_observation(variable='nir_stdev', trait='near-infrared standard deviation',
method='plantcv.plantcv.analyze_nir_intensity', scale='none', datatype=float,
value=masked_nir_std, label='none')
# Store images
outputs.images.append(analysis_image)
return analysis_image
|
45,560 |
def _remove_breaks_for_special_dates(
midnight_utcs, break_start_or_end, special_opens_or_closes
):
"""
Overwrite breaks in break_start_or_end with corresponding dates in
special_opens_or_closes, using midnight_utcs for alignment.
"""
# Short circuit when we have no breaks
if break_start_or_end is None:
return
# Short circuit when nothing to apply.
if not len(special_opens_or_closes):
return
len_m, len_oc = len(midnight_utcs), len(break_start_or_end)
if len_m != len_oc:
raise ValueError(
"Found misaligned dates while building calendar.\n"
"Expected midnight_utcs to be the same length as break_starts,\n"
"but len(midnight_utcs)=%d, len(break_start_or_end)=%d"
% len_m, len_oc
)
# Find the array indices corresponding to each special date.
indexer = midnight_utcs.get_indexer(special_opens_or_closes.index)
# -1 indicates that no corresponding entry was found. If any -1s are
# present, then we have special dates that doesn't correspond to any
# trading day.
if -1 in indexer:
bad_dates = list(special_opens_or_closes[indexer == -1])
raise ValueError("Special dates %s are not trading days." % bad_dates)
# NOTE: This is a slightly dirty hack. We're in-place overwriting the
# internal data of an Index, which is conceptually immutable. Since we're
# maintaining sorting, this should be ok, but this is a good place to
# sanity check if things start going haywire with calendar computations.
break_start_or_end.values[indexer] = np.int64(pd.NaT)
|
def _remove_breaks_for_special_dates(
midnight_utcs, break_start_or_end, special_opens_or_closes
):
"""
Overwrite breaks in break_start_or_end with corresponding dates in
special_opens_or_closes, using midnight_utcs for alignment.
"""
# Short circuit when we have no breaks
if break_start_or_end is None:
return
# Short circuit when nothing to apply.
if not len(special_opens_or_closes):
return
len_m, len_oc = len(midnight_utcs), len(break_start_or_end)
if len_m != len_oc:
raise ValueError(
"Found misaligned dates while building calendar.\n"
"Expected midnight_utcs to be the same length as break_starts,\n"
"but len(midnight_utcs)=%d, len(break_start_or_end)=%d"
% (len_m, len_oc)
)
# Find the array indices corresponding to each special date.
indexer = midnight_utcs.get_indexer(special_opens_or_closes.index)
# -1 indicates that no corresponding entry was found. If any -1s are
# present, then we have special dates that doesn't correspond to any
# trading day.
if -1 in indexer:
bad_dates = list(special_opens_or_closes[indexer == -1])
raise ValueError("Special dates %s are not trading days." % bad_dates)
# NOTE: This is a slightly dirty hack. We're in-place overwriting the
# internal data of an Index, which is conceptually immutable. Since we're
# maintaining sorting, this should be ok, but this is a good place to
# sanity check if things start going haywire with calendar computations.
break_start_or_end.values[indexer] = np.int64(pd.NaT)
|
27,256 |
def _array_slice_op(translator, expr):
op = expr.op()
arg, start, stop = op.args
start_ = _parenthesize(translator, start)
arg_ = translator.translate(arg)
if stop is not None:
stop_ = _parenthesize(translator, stop)
diff = f'({stop_} - {start_})'
length_ = f'''
if(
{stop_} < 0,
{stop_},
if({diff} < 0, 0, {diff})
)
'''
return f'arraySlice({arg_}, {start_} + 1, {length_})'
return f'arraySlice({arg_}, {start_} + 1)'
|
def _array_slice_op(translator, expr):
op = expr.op()
arg, start, stop = op.args
start_ = _parenthesize(translator, start)
arg_ = translator.translate(arg)
if stop is not None:
stop_ = _parenthesize(translator, stop)
diff = f'({stop_} - {start_})'
length_ = f"if({stop_} < 0, {stop_}, greatest(0, {diff}))"
return f'arraySlice({arg_}, {start_} + 1, {length_})'
return f'arraySlice({arg_}, {start_} + 1)'
|
4,850 |
def test_all():
fig, axs = plt.subplots(2, 2, figsize=(8, 8))
axs = axs.flat
# Bbox object around which the fancy box will be drawn.
bb = mtransforms.Bbox([[0.3, 0.4], [0.7, 0.6]])
ax = axs[0]
# a fancy box with round corners. pad=0.1
fancy = add_fancy_patch_around(ax, bb, boxstyle="round,pad=0.1")
ax.set(xlim=(0, 1), ylim=(0, 1), aspect=1,
title='boxstyle="round,pad=0.1"')
ax = axs[1]
# bbox=round has two optional arguments: pad and rounding_size.
# They can be set during the initialization.
fancy = add_fancy_patch_around(ax, bb, boxstyle="round,pad=0.1")
# The boxstyle and its argument can be later modified with set_boxstyle().
# Note that the old attributes are simply forgotten even if the boxstyle
# name is same.
fancy.set_boxstyle("round,pad=0.1,rounding_size=0.2")
# or: fancy.set_boxstyle("round", pad=0.1, rounding_size=0.2)
ax.set(xlim=(0, 1), ylim=(0, 1), aspect=1,
title='boxstyle="round,pad=0.1,rounding_size=0.2"')
ax = axs[2]
# mutation_scale determine overall scale of the mutation, i.e. both pad and
# rounding_size is scaled according to this value.
fancy = add_fancy_patch_around(
ax, bb, boxstyle="round,pad=0.1", mutation_scale=2)
ax.set(xlim=(0, 1), ylim=(0, 1), aspect=1,
title='boxstyle="round,pad=0.1"\n mutation_scale=2')
ax = axs[3]
# When the aspect ratio of the axes is not 1, the fancy box may not be what
# you expected (green).
fancy = add_fancy_patch_around(ax, bb, boxstyle="round,pad=0.2")
fancy.set(facecolor="none", edgecolor="green")
# You can compensate this by setting the mutation_aspect (pink).
fancy = add_fancy_patch_around(
ax, bb, boxstyle="round,pad=0.3", mutation_aspect=0.5)
ax.set(xlim=(-.5, 1.5), ylim=(0, 1), aspect=2,
title='boxstyle="round,pad=0.3"\nmutation_aspect=.5')
for ax in axs:
# Draw the control points of the fancy boxes.
for patch in ax.patches:
patch.axes.plot(*patch.get_path().vertices.T, ".",
c=patch.get_edgecolor())
# Draw the original bbox (using boxstyle=square with pad=0).
fancy = add_fancy_patch_around(ax, bb, boxstyle="square,pad=0")
fancy.set(edgecolor="black", facecolor="none", zorder=10)
fig.tight_layout()
|
def test_all():
fig, axs = plt.subplots(2, 2, figsize=(8, 8))
axs = axs.flat
# Bbox object around which the fancy box will be drawn.
bb = mtransforms.Bbox([[0.3, 0.4], [0.7, 0.6]])
ax = axs[0]
# a fancy box with round corners. pad=0.1
fancy = add_fancy_patch_around(ax, bb, boxstyle="round,pad=0.1")
ax.set(xlim=(0, 1), ylim=(0, 1), aspect=1,
title='boxstyle="round,pad=0.1"')
ax = axs[1]
# bbox=round has two optional arguments: pad and rounding_size.
# They can be set during the initialization.
fancy = add_fancy_patch_around(ax, bb, boxstyle="round,pad=0.1")
# The boxstyle and its argument can be later modified with set_boxstyle().
# Note that the old attributes are simply forgotten even if the boxstyle
# name is same.
fancy.set_boxstyle("round,pad=0.1,rounding_size=0.2")
# or: fancy.set_boxstyle("round", pad=0.1, rounding_size=0.2)
ax.set(xlim=(0, 1), ylim=(0, 1), aspect=1,
title='boxstyle="round,pad=0.1,rounding_size=0.2"')
ax = axs[2]
# mutation_scale determines overall scale of the mutation, i.e. both pad and
# rounding_size is scaled according to this value.
fancy = add_fancy_patch_around(
ax, bb, boxstyle="round,pad=0.1", mutation_scale=2)
ax.set(xlim=(0, 1), ylim=(0, 1), aspect=1,
title='boxstyle="round,pad=0.1"\n mutation_scale=2')
ax = axs[3]
# When the aspect ratio of the axes is not 1, the fancy box may not be what
# you expected (green).
fancy = add_fancy_patch_around(ax, bb, boxstyle="round,pad=0.2")
fancy.set(facecolor="none", edgecolor="green")
# You can compensate this by setting the mutation_aspect (pink).
fancy = add_fancy_patch_around(
ax, bb, boxstyle="round,pad=0.3", mutation_aspect=0.5)
ax.set(xlim=(-.5, 1.5), ylim=(0, 1), aspect=2,
title='boxstyle="round,pad=0.3"\nmutation_aspect=.5')
for ax in axs:
# Draw the control points of the fancy boxes.
for patch in ax.patches:
patch.axes.plot(*patch.get_path().vertices.T, ".",
c=patch.get_edgecolor())
# Draw the original bbox (using boxstyle=square with pad=0).
fancy = add_fancy_patch_around(ax, bb, boxstyle="square,pad=0")
fancy.set(edgecolor="black", facecolor="none", zorder=10)
fig.tight_layout()
|
45,906 |
def rgb_to_raw(image: torch.Tensor, cfa: CFA) -> torch.Tensor:
r"""Convert a RGB image to RAW version of image with the specified color filter array.
The image data is assumed to be in the range of (0, 1).
Args:
image: RGB image to be converted to bayer raw with shape :math:`(*,3,H,W)`.
cfa: Which color filter array do we want the output to mimic. I.e. which pixels are red/green/blue
Returns:
raw version of the image with shape :math:`(*,1,H,W)`.
Example:
>>> rgbinput = torch.rand(2, 3, 4, 6)
>>> raw = rgb_to_raw(rgbinput) # 2x1x4x6
"""
if not isinstance(image, torch.Tensor):
raise TypeError(f"Input type is not a torch.Tensor. Got {type(image)}")
if len(image.shape) < 3 or image.shape[-3] != 3:
raise ValueError(f"Input size must have a shape of (*, 3, H, W). Got {image.shape}")
# pick the tensor with green pixels
# clone to make sure grad works
output: torch.Tensor = image[..., 1:2, :, :].clone()
# overwrite the r/b positions (depending on the cfa configuration) with blue/red pixels
if cfa == CFA.BG:
output[..., :, ::2, ::2] = image[..., 0:1, ::2, ::2] # red
output[..., :, 1::2, 1::2] = image[..., 2:3, 1::2, 1::2] # blue
elif cfa == CFA.GB:
output[..., :, ::2, 1::2] = image[..., 0:1, ::2, 1::2] # red
output[..., :, 1::2, ::2] = image[..., 2:3, 1::2, ::2] # blue
elif cfa == CFA.RG:
output[..., :, 1::2, 1::2] = image[..., 0:1, 1::2, 1::2] # red
output[..., :, ::2, ::2] = image[..., 2:3, ::2, ::2] # blue
elif cfa == CFA.GR:
output[..., :, 1::2, ::2] = image[..., 0:1, 1::2, ::2] # red
output[..., :, ::2, 1::2] = image[..., 2:3, ::2, 1::2] # blue
return output
|
def rgb_to_raw(image: torch.Tensor, cfa: CFA) -> torch.Tensor:
r"""Convert a RGB image to RAW version of image with the specified color filter array.
The image data is assumed to be in the range of (0, 1).
Args:
image: RGB image to be converted to bayer raw with shape :math:`(*,3,H,W)`.
cfa: Which color filter array do we want the output to mimic. I.e. which pixels are red/green/blue
Returns:
raw version of the image with shape :math:`(*,1,H,W)`.
Example:
>>> rgbinput = torch.rand(2, 3, 4, 6)
>>> raw = rgb_to_raw(rgbinput, cfa=CFA.BG) # 2x1x4x6
"""
if not isinstance(image, torch.Tensor):
raise TypeError(f"Input type is not a torch.Tensor. Got {type(image)}")
if len(image.shape) < 3 or image.shape[-3] != 3:
raise ValueError(f"Input size must have a shape of (*, 3, H, W). Got {image.shape}")
# pick the tensor with green pixels
# clone to make sure grad works
output: torch.Tensor = image[..., 1:2, :, :].clone()
# overwrite the r/b positions (depending on the cfa configuration) with blue/red pixels
if cfa == CFA.BG:
output[..., :, ::2, ::2] = image[..., 0:1, ::2, ::2] # red
output[..., :, 1::2, 1::2] = image[..., 2:3, 1::2, 1::2] # blue
elif cfa == CFA.GB:
output[..., :, ::2, 1::2] = image[..., 0:1, ::2, 1::2] # red
output[..., :, 1::2, ::2] = image[..., 2:3, 1::2, ::2] # blue
elif cfa == CFA.RG:
output[..., :, 1::2, 1::2] = image[..., 0:1, 1::2, 1::2] # red
output[..., :, ::2, ::2] = image[..., 2:3, ::2, ::2] # blue
elif cfa == CFA.GR:
output[..., :, 1::2, ::2] = image[..., 0:1, 1::2, ::2] # red
output[..., :, ::2, 1::2] = image[..., 2:3, ::2, 1::2] # blue
return output
|
30,580 |
def list_collections_command(client: Client, **kwargs) -> Tuple[str, dict, list]:
raw_response = client.db.list_collection_names()
if len(raw_response):
readable_outputs = tableToMarkdown(
'MongoDB: All collections in database', raw_response, headers=['Collection']
)
outputs = {
'MongoDB.Collection(val.Name === obj.Name)': [
{'Name': collection} for collection in raw_response
]
}
return readable_outputs, outputs, raw_response
else:
return "MongoDB: No results found", {}, raw_response
|
def list_collections_command(client: Client, **kwargs) -> Tuple[str, dict, list]:
raw_response = client.db.list_collection_names()
if raw_response:
readable_outputs = tableToMarkdown(
'MongoDB: All collections in database', raw_response, headers=['Collection']
)
outputs = {
'MongoDB.Collection(val.Name === obj.Name)': [
{'Name': collection} for collection in raw_response
]
}
return readable_outputs, outputs, raw_response
else:
return "MongoDB: No results found", {}, raw_response
|
5,625 |
def quad_vec(f, a, b, epsabs=1e-200, epsrel=1e-8, norm='2', cache_size=100e6, limit=10000,
workers=1, points=None, quadrature='gk21', full_output=False):
"""Adaptive integration of a vector-valued function.
Parameters
----------
f : callable
Vector-valued function f(x) to integrate.
a : float
Initial point.
b : float
Final point.
epsabs : float, optional
Absolute tolerance.
epsrel : float, optional
Relative tolerance.
norm : {'max', '2'}, optional
Vector norm to use for error estimation.
cache_size : int, optional
Number bytes to use for memoization.
workers : int or map-like callable, optional
If `workers` is an integer, part of the computation is done in
parallel subdivided to this many tasks (using `multiprocessing.Pool`).
Supply `-1` to use all cores available to the Process.
Alternatively, supply a map-like callable, such as
`multiprocessing.Pool.map` for evaluating the population in parallel.
This evaluation is carried out as ``workers(func, iterable)``.
points : list, optional
List of additional breakpoints.
quardarture : {'gk21', 'trapz'}, optional
Quadrature rule to use on subintervals.
Options: 'gk21' (Gauss-Kronrod 21-point rule),
'trapz' (composite trapezoid rule).
full_output : bool, optional
If true, populate ``info`` return value with "alist", "blist",
"rlist", "elist", "iord" keys.
Returns
-------
res : {float, array-like}
Estimate for the result
err : float
Error estimate for the result in the given norm
info : dict
Info dictionary. Has always the keys "neval" and "last"
ier : int
Result code
Notes
-----
The algorithm mainly follows the implementation of QUADPACK's
DQAG* algorithms, implementing global error control and adaptive
subdivision.
The algorithm here has some differences to the QUADPACK approach:
Instead of subdividing one interval at a time, the algorithm
subdivides N intervals with largest errors at once. This enables
(partial) parallelization of the integration.
The logic of subdividing "next largest" intervals first is then
not implemented, and we rely on the above extension to avoid
concentrating on "small" intervals only.
The Wynn epsilon table extrapolation is not used (QUADPACK uses it
for infinite intervals). This is because the algorithm here is
supposed to work on vector-valued functions, in an user-specified
norm, and the extension of the epsilon algorithm to this case does
not appear to be widely agreed. For max-norm, using elementwise
Wynn epsilon could be possible, but we do not do this here with
the hope that the epsilon extrapolation is mainly useful in
special cases.
References
----------
[1] R. Piessens, E. de Doncker, QUADPACK (1983).
"""
a = float(a)
b = float(b)
# Use simple transformations to deal with integrals over infinite
# intervals.
args = (epsabs, epsrel, norm, cache_size, limit, workers, points, quadrature, full_output)
if np.isfinite(a) and np.isinf(b):
f2 = SemiInfiniteFunc(f, start=a, infty=b)
if points is not None:
points = tuple(f2.get_t(xp) for xp in points)
return quad_vec(f2, 0, 1, *args)
elif np.isfinite(b) and np.isinf(a):
f2 = SemiInfiniteFunc(f, start=b, infty=a)
if points is not None:
points = tuple(f2.get_t(xp) for xp in points)
res = quad_vec(f2, 0, 1, *args)
return (-res[0],) + res[1:]
elif np.isinf(a) and np.isinf(b):
sgn = -1 if b < a else 1
# NB. first interval split occurs at t=0, which separates
# positive and negative sides of the integral
f2 = DoubleInfiniteFunc(f)
if points is not None:
points = tuple(f2.get_t(xp) for xp in points)
if a != b:
res = quad_vec(f2, -1, 1, *args)
else:
res = quad_vec(f2, 1, 1, *args)
return (res[0]*sgn,) + res[1:]
elif not (np.isfinite(a) and np.isfinite(b)):
raise ValueError("invalid integration bounds a={}, b={}".format(a, b))
norm_funcs = {
None: _max_norm,
'max': _max_norm,
'2': np.linalg.norm
}
if callable(norm):
norm_func = norm
else:
norm_func = norm_funcs[norm]
mapwrapper = MapWrapper(workers)
parallel_count = 8
min_intervals = 2
try:
_quadrature = {None: _quadrature_gk21,
'gk21': _quadrature_gk21,
'trapz': _quadrature_trapz}[quadrature]
except KeyError:
raise ValueError("unknown quadrature {!r}".format(quadrature))
# Initial interval set
if points is None:
initial_intervals = [(a, b)]
else:
prev = a
initial_intervals = []
for p in sorted(points):
p = float(p)
if p <= a or p >= b or p == prev:
continue
initial_intervals.append((prev, p))
prev = p
initial_intervals.append((prev, b))
global_integral = None
global_error = None
rounding_error = None
interval_cache = None
intervals = []
neval = 0
for x1, x2 in initial_intervals:
ig, err, rnd = _quadrature(x1, x2, f, norm_func)
neval += _quadrature.num_eval
if global_integral is None:
if isinstance(ig, (float, complex)):
# Specialize for scalars
if norm_func in (_max_norm, np.linalg.norm):
norm_func = abs
global_integral = ig
global_error = float(err)
rounding_error = float(rnd)
cache_count = cache_size // _get_sizeof(ig)
interval_cache = LRUDict(cache_count)
else:
global_integral += ig
global_error += err
rounding_error += rnd
interval_cache[(x1, x2)] = copy.copy(ig)
intervals.append((-err, x1, x2))
heapq.heapify(intervals)
# Process intervals
with mapwrapper:
ier = 1
while intervals and len(intervals) < limit:
# Select intervals with largest errors for subdivision
tol = max(epsabs, epsrel*norm_func(global_integral))
to_process = []
for j in range(parallel_count):
if not intervals:
break
if j > 0 and abs(intervals[0][0]) * len(intervals) < 0.5*tol:
# avoid unnecessary parallel splitting
break
interval = heapq.heappop(intervals)
neg_old_err, a, b = interval
old_int = interval_cache.pop((a, b), None)
to_process.append(((-neg_old_err, a, b, old_int), f, norm_func, _quadrature))
# Subdivide intervals
for dint, derr, dround_err, subint, dneval in mapwrapper(_subdivide_interval, to_process):
neval += dneval
global_integral += dint
global_error += derr
rounding_error += dround_err
for x in subint:
x1, x2, ig, err = x
interval_cache[(x1, x2)] = ig
heapq.heappush(intervals, (-err, x1, x2))
# Termination check
if len(intervals) >= min_intervals:
tol = max(epsabs, epsrel*norm_func(global_integral))
if global_error < tol/8:
ier = 0
break
if global_error < rounding_error:
ier = 2
break
res = global_integral
err = global_error + rounding_error
info = dict(neval=neval,
last=len(intervals))
if full_output:
info['alist'] = np.array([z[1] for z in intervals])
info['blist'] = np.array([z[2] for z in intervals])
res_arr = np.asarray(res)
dummy = np.full(res_arr.shape, np.nan, dtype=res_arr.dtype)
info['rlist'] = np.array([interval_cache.get((z[1], z[2]), dummy)
for z in intervals], dtype=res_arr.dtype)
info['elist'] = np.array([-z[0] for z in intervals])
info['iord'] = np.argsort(-info['elist'])
return (res, err, info, ier)
|
def quad_vec(f, a, b, epsabs=1e-200, epsrel=1e-8, norm='2', cache_size=100e6, limit=10000,
workers=1, points=None, quadrature='gk21', full_output=False):
"""Adaptive integration of a vector-valued function.
Parameters
----------
f : callable
Vector-valued function f(x) to integrate.
a : float
Initial point.
b : float
Final point.
epsabs : float, optional
Absolute tolerance.
epsrel : float, optional
Relative tolerance.
norm : {'max', '2'}, optional
Vector norm to use for error estimation.
cache_size : int, optional
Number of bytes to use for memoization.
workers : int or map-like callable, optional
If `workers` is an integer, part of the computation is done in
parallel subdivided to this many tasks (using `multiprocessing.Pool`).
Supply `-1` to use all cores available to the Process.
Alternatively, supply a map-like callable, such as
`multiprocessing.Pool.map` for evaluating the population in parallel.
This evaluation is carried out as ``workers(func, iterable)``.
points : list, optional
List of additional breakpoints.
quardarture : {'gk21', 'trapz'}, optional
Quadrature rule to use on subintervals.
Options: 'gk21' (Gauss-Kronrod 21-point rule),
'trapz' (composite trapezoid rule).
full_output : bool, optional
If true, populate ``info`` return value with "alist", "blist",
"rlist", "elist", "iord" keys.
Returns
-------
res : {float, array-like}
Estimate for the result
err : float
Error estimate for the result in the given norm
info : dict
Info dictionary. Has always the keys "neval" and "last"
ier : int
Result code
Notes
-----
The algorithm mainly follows the implementation of QUADPACK's
DQAG* algorithms, implementing global error control and adaptive
subdivision.
The algorithm here has some differences to the QUADPACK approach:
Instead of subdividing one interval at a time, the algorithm
subdivides N intervals with largest errors at once. This enables
(partial) parallelization of the integration.
The logic of subdividing "next largest" intervals first is then
not implemented, and we rely on the above extension to avoid
concentrating on "small" intervals only.
The Wynn epsilon table extrapolation is not used (QUADPACK uses it
for infinite intervals). This is because the algorithm here is
supposed to work on vector-valued functions, in an user-specified
norm, and the extension of the epsilon algorithm to this case does
not appear to be widely agreed. For max-norm, using elementwise
Wynn epsilon could be possible, but we do not do this here with
the hope that the epsilon extrapolation is mainly useful in
special cases.
References
----------
[1] R. Piessens, E. de Doncker, QUADPACK (1983).
"""
a = float(a)
b = float(b)
# Use simple transformations to deal with integrals over infinite
# intervals.
args = (epsabs, epsrel, norm, cache_size, limit, workers, points, quadrature, full_output)
if np.isfinite(a) and np.isinf(b):
f2 = SemiInfiniteFunc(f, start=a, infty=b)
if points is not None:
points = tuple(f2.get_t(xp) for xp in points)
return quad_vec(f2, 0, 1, *args)
elif np.isfinite(b) and np.isinf(a):
f2 = SemiInfiniteFunc(f, start=b, infty=a)
if points is not None:
points = tuple(f2.get_t(xp) for xp in points)
res = quad_vec(f2, 0, 1, *args)
return (-res[0],) + res[1:]
elif np.isinf(a) and np.isinf(b):
sgn = -1 if b < a else 1
# NB. first interval split occurs at t=0, which separates
# positive and negative sides of the integral
f2 = DoubleInfiniteFunc(f)
if points is not None:
points = tuple(f2.get_t(xp) for xp in points)
if a != b:
res = quad_vec(f2, -1, 1, *args)
else:
res = quad_vec(f2, 1, 1, *args)
return (res[0]*sgn,) + res[1:]
elif not (np.isfinite(a) and np.isfinite(b)):
raise ValueError("invalid integration bounds a={}, b={}".format(a, b))
norm_funcs = {
None: _max_norm,
'max': _max_norm,
'2': np.linalg.norm
}
if callable(norm):
norm_func = norm
else:
norm_func = norm_funcs[norm]
mapwrapper = MapWrapper(workers)
parallel_count = 8
min_intervals = 2
try:
_quadrature = {None: _quadrature_gk21,
'gk21': _quadrature_gk21,
'trapz': _quadrature_trapz}[quadrature]
except KeyError:
raise ValueError("unknown quadrature {!r}".format(quadrature))
# Initial interval set
if points is None:
initial_intervals = [(a, b)]
else:
prev = a
initial_intervals = []
for p in sorted(points):
p = float(p)
if p <= a or p >= b or p == prev:
continue
initial_intervals.append((prev, p))
prev = p
initial_intervals.append((prev, b))
global_integral = None
global_error = None
rounding_error = None
interval_cache = None
intervals = []
neval = 0
for x1, x2 in initial_intervals:
ig, err, rnd = _quadrature(x1, x2, f, norm_func)
neval += _quadrature.num_eval
if global_integral is None:
if isinstance(ig, (float, complex)):
# Specialize for scalars
if norm_func in (_max_norm, np.linalg.norm):
norm_func = abs
global_integral = ig
global_error = float(err)
rounding_error = float(rnd)
cache_count = cache_size // _get_sizeof(ig)
interval_cache = LRUDict(cache_count)
else:
global_integral += ig
global_error += err
rounding_error += rnd
interval_cache[(x1, x2)] = copy.copy(ig)
intervals.append((-err, x1, x2))
heapq.heapify(intervals)
# Process intervals
with mapwrapper:
ier = 1
while intervals and len(intervals) < limit:
# Select intervals with largest errors for subdivision
tol = max(epsabs, epsrel*norm_func(global_integral))
to_process = []
for j in range(parallel_count):
if not intervals:
break
if j > 0 and abs(intervals[0][0]) * len(intervals) < 0.5*tol:
# avoid unnecessary parallel splitting
break
interval = heapq.heappop(intervals)
neg_old_err, a, b = interval
old_int = interval_cache.pop((a, b), None)
to_process.append(((-neg_old_err, a, b, old_int), f, norm_func, _quadrature))
# Subdivide intervals
for dint, derr, dround_err, subint, dneval in mapwrapper(_subdivide_interval, to_process):
neval += dneval
global_integral += dint
global_error += derr
rounding_error += dround_err
for x in subint:
x1, x2, ig, err = x
interval_cache[(x1, x2)] = ig
heapq.heappush(intervals, (-err, x1, x2))
# Termination check
if len(intervals) >= min_intervals:
tol = max(epsabs, epsrel*norm_func(global_integral))
if global_error < tol/8:
ier = 0
break
if global_error < rounding_error:
ier = 2
break
res = global_integral
err = global_error + rounding_error
info = dict(neval=neval,
last=len(intervals))
if full_output:
info['alist'] = np.array([z[1] for z in intervals])
info['blist'] = np.array([z[2] for z in intervals])
res_arr = np.asarray(res)
dummy = np.full(res_arr.shape, np.nan, dtype=res_arr.dtype)
info['rlist'] = np.array([interval_cache.get((z[1], z[2]), dummy)
for z in intervals], dtype=res_arr.dtype)
info['elist'] = np.array([-z[0] for z in intervals])
info['iord'] = np.argsort(-info['elist'])
return (res, err, info, ier)
|
3,736 |
def _checknames(descr, names=None):
"""
Checks that field names ``descr`` are not reserved keywords.
If this is the case, a default 'f%i' is substituted. If the argument
`names` is not None, updates the field names to valid names.
"""
ndescr = len(descr)
default_names = ['f%i' % i for i in range(ndescr)]
if names is None:
new_names = default_names
else:
if isinstance(names, (tuple, list)):
new_names = names
elif isinstance(names, str):
new_names = names.split(',')
else:
raise NameError(f'illegal input names {repr(names)}')
nnames = len(new_names)
if nnames < ndescr:
new_names += default_names[nnames:]
ndescr = []
for (n, d, t) in zip(new_names, default_names, descr.descr):
if n in reserved_fields:
if t[0] in reserved_fields:
ndescr.append((d, t[1]))
else:
ndescr.append(t)
else:
ndescr.append((n, t[1]))
return np.dtype(ndescr)
|
def _checknames(descr, names=None):
"""
Checks that field names ``descr`` are not reserved keywords.
If this is the case, a default 'f%i' is substituted. If the argument
`names` is not None, updates the field names to valid names.
"""
ndescr = len(descr)
default_names = ['f%i' % i for i in range(ndescr)]
if names is None:
new_names = default_names
else:
if isinstance(names, (tuple, list)):
new_names = names
elif isinstance(names, str):
new_names = names.split(',')
else:
raise NameError(f'illegal input names {names!r}')
nnames = len(new_names)
if nnames < ndescr:
new_names += default_names[nnames:]
ndescr = []
for (n, d, t) in zip(new_names, default_names, descr.descr):
if n in reserved_fields:
if t[0] in reserved_fields:
ndescr.append((d, t[1]))
else:
ndescr.append(t)
else:
ndescr.append((n, t[1]))
return np.dtype(ndescr)
|
31,858 |
def handle_incoming_closing_incident(incident_data):
closing_entry = {} # type: Dict
incident_status = incident_data.get('status')
if incident_status in XDR_RESOLVED_STATUS_TO_XSOAR:
demisto.debug(f"Closing XDR issue {incident_data.get('incident_id')}")
closing_entry = {
'Type': EntryType.NOTE,
'Contents': {
'dbotIncidentClose': True,
'closeReason': XDR_RESOLVED_STATUS_TO_XSOAR.get(incident_data.get("status")),
'closeNotes': incident_data.get('resolve_comment')
},
'ContentsFormat': EntryFormat.JSON
}
incident_data['closeReason'] = XDR_RESOLVED_STATUS_TO_XSOAR.get(incident_data.get("status"))
incident_data['closeNotes'] = incident_data.get('resolve_comment')
if incident_data.get('status') == 'resolved_known_issue':
closing_entry['Contents']['closeNotes'] = 'Known Issue.\n' + incident_data['closeNotes']
incident_data['closeNotes'] = 'Known Issue.\n' + incident_data['closeNotes']
else:
demisto.debug(f'Not closing the mirrored XSOAR incident. Mirrored XDR incident status is {incident_status}.')
return closing_entry
|
def handle_incoming_closing_incident(incident_data):
closing_entry = {} # type: Dict
if incident_status := incident_data.get('status') in XDR_RESOLVED_STATUS_TO_XSOAR:
demisto.debug(f"Closing XDR issue {incident_data.get('incident_id')}")
closing_entry = {
'Type': EntryType.NOTE,
'Contents': {
'dbotIncidentClose': True,
'closeReason': XDR_RESOLVED_STATUS_TO_XSOAR.get(incident_data.get("status")),
'closeNotes': incident_data.get('resolve_comment')
},
'ContentsFormat': EntryFormat.JSON
}
incident_data['closeReason'] = XDR_RESOLVED_STATUS_TO_XSOAR.get(incident_data.get("status"))
incident_data['closeNotes'] = incident_data.get('resolve_comment')
if incident_data.get('status') == 'resolved_known_issue':
closing_entry['Contents']['closeNotes'] = 'Known Issue.\n' + incident_data['closeNotes']
incident_data['closeNotes'] = 'Known Issue.\n' + incident_data['closeNotes']
else:
demisto.debug(f'Not closing the mirrored XSOAR incident. Mirrored XDR incident status is {incident_status}.')
return closing_entry
|
59,614 |
def clib_full_names(env=None):
"""
Return the full path of GMT's shared library for the current OS.
Parameters
----------
env : dict or None
A dictionary containing the environment variables. If ``None``, will
default to ``os.environ``.
Returns
-------
lib_fullnames: list of str
List of possible full names of GMT's shared library.
"""
if env is None:
env = os.environ
libnames = clib_name(os_name=sys.platform)
libpath = env.get("GMT_LIBRARY_PATH", "")
lib_fullnames = [os.path.join(libpath, libname) for libname in libnames]
# Search for DLLs in PATH if GMT_LIBRARY_PATH is not defined [Windows only]
if not libpath and sys.platform == "win32":
for libname in libnames:
libfullpath = find_library(libname)
if libfullpath:
lib_fullnames.append(libfullpath)
return lib_fullnames
|
def clib_full_names(env=None):
"""
Return the full path of GMT's shared library for the current OS.
Parameters
----------
env : dict or None
A dictionary containing the environment variables. If ``None``, will
default to ``os.environ``.
Returns
-------
lib_fullnames: list of str
List of possible full names of GMT's shared library.
"""
if env is None:
env = os.environ
libnames = clib_name(os_name=sys.platform) # e.g. libgmt.so, libgmt.dylib, gmt.dll
libpath = env.get("GMT_LIBRARY_PATH", "") # e.g. $HOME/miniconda/envs/pygmt/lib
lib_fullnames = [os.path.join(libpath, libname) for libname in libnames]
# Search for DLLs in PATH if GMT_LIBRARY_PATH is not defined [Windows only]
if not libpath and sys.platform == "win32":
for libname in libnames:
libfullpath = find_library(libname)
if libfullpath:
lib_fullnames.append(libfullpath)
return lib_fullnames
|
50,321 |
def test_transform_types_copies_data():
data = {"attr": "spam"}
new_data, _ = utils._transform_types(data, {})
assert new_data is not data
|
def test_transform_types_copies_data():
data = {"attr": "spam"}
new_data, files = utils._transform_types(data, {})
assert new_data is not data
assert new_data == data
assert files == {}
|
2,451 |
def as_float_array(X, *, copy=True, force_all_finite=True):
"""Convert an array-like to an array of floats.
The new dtype will be np.float32 or np.float64, depending on the original
type. The function can create a copy or modify the argument depending
on the argument copy.
Parameters
----------
X : {array-like, sparse matrix}
The input array.
copy : bool, default=True
If True, a copy of X will be created. If False, a copy may still be
returned if X's dtype is not a floating point type.
force_all_finite : bool or 'allow-nan', default=True
Whether to raise an error on np.inf, np.nan, pd.NA in X. The
possibilities are:
- True: Force all values of X to be finite.
- False: accepts np.inf, np.nan, pd.NA in X.
- 'allow-nan': accepts only np.nan and pd.NA values in X. Values cannot
be infinite.
.. versionadded:: 0.20
``force_all_finite`` accepts the string ``'allow-nan'``.
.. versionchanged:: 0.23
Accepts `pd.NA` and converts it into `np.nan`
Returns
-------
XT : {ndarray, sparse matrix}
An array of type float.
"""
if isinstance(X, np.matrix) or (
not isinstance(X, np.ndarray) and not sp.issparse(X)
):
return check_array(
X,
accept_sparse=["csr", "csc", "coo"],
dtype=np.float64,
copy=copy,
force_all_finite=force_all_finite,
ensure_2d=False,
)
elif sp.issparse(X) and X.dtype in [np.float32, np.float64]:
return X.copy() if copy else X
elif X.dtype in [np.float32, np.float64]: # is numpy array
return X.copy("F" if X.flags["F_CONTIGUOUS"] else "C") if copy else X
else:
if X.dtype.kind in "uib" and X.dtype.itemsize <= 4:
return_dtype = np.float32
else:
return_dtype = np.float64
return X.astype(return_dtype)
|
def as_float_array(X, *, copy=True, force_all_finite=True):
"""Convert an array-like to an array of floats.
The new dtype will be np.float32 or np.float64, depending on the original
type. The function can create a copy or modify the argument depending
on the argument copy.
Parameters
----------
X : {array-like, sparse matrix}
The input data.
copy : bool, default=True
If True, a copy of X will be created. If False, a copy may still be
returned if X's dtype is not a floating point type.
force_all_finite : bool or 'allow-nan', default=True
Whether to raise an error on np.inf, np.nan, pd.NA in X. The
possibilities are:
- True: Force all values of X to be finite.
- False: accepts np.inf, np.nan, pd.NA in X.
- 'allow-nan': accepts only np.nan and pd.NA values in X. Values cannot
be infinite.
.. versionadded:: 0.20
``force_all_finite`` accepts the string ``'allow-nan'``.
.. versionchanged:: 0.23
Accepts `pd.NA` and converts it into `np.nan`
Returns
-------
XT : {ndarray, sparse matrix}
An array of type float.
"""
if isinstance(X, np.matrix) or (
not isinstance(X, np.ndarray) and not sp.issparse(X)
):
return check_array(
X,
accept_sparse=["csr", "csc", "coo"],
dtype=np.float64,
copy=copy,
force_all_finite=force_all_finite,
ensure_2d=False,
)
elif sp.issparse(X) and X.dtype in [np.float32, np.float64]:
return X.copy() if copy else X
elif X.dtype in [np.float32, np.float64]: # is numpy array
return X.copy("F" if X.flags["F_CONTIGUOUS"] else "C") if copy else X
else:
if X.dtype.kind in "uib" and X.dtype.itemsize <= 4:
return_dtype = np.float32
else:
return_dtype = np.float64
return X.astype(return_dtype)
|
25,352 |
def underline(text: str, escape_formatting: bool = True) -> str:
"""Get the given text with an underline.
Note: This escapes text prior to underlining
Parameters
----------
text : str
The text to be marked up.
escape_formatting : `bool`, optional
Set to :code:`False` to not escape markdown formatting in the text.
Returns
-------
str
The marked up text.
"""
text = escape(text, formatting=escape_formatting)
return "__{}__".format(text)
|
def underline(text: str, escape_formatting: bool = True) -> str:
"""Get the given text with an underline.
Note: By default, this function will escape ``text`` prior to underlining.
Parameters
----------
text : str
The text to be marked up.
escape_formatting : `bool`, optional
Set to :code:`False` to not escape markdown formatting in the text.
Returns
-------
str
The marked up text.
"""
text = escape(text, formatting=escape_formatting)
return "__{}__".format(text)
|
5,259 |
def _load_vocab(fin, new_format, encoding='utf-8'):
"""Load a vocabulary from a FB binary.
Before the vocab is ready for use, call the prepare_vocab function and pass
in the relevant parameters from the model.
Parameters
----------
fin : file
An open file pointer to the binary.
new_format: boolean
True if the binary is of the newer format.
encoding : str
The encoding to use when decoding binary data into words.
Returns
-------
tuple
The loaded vocabulary. Keys are words, values are counts.
The vocabulary size.
The number of words.
The numnber of tokens.
"""
vocab_size, nwords, nlabels = _struct_unpack(fin, '@3i')
# Vocab stored by [Dictionary::save](https://github.com/facebookresearch/fastText/blob/master/src/dictionary.cc)
if nlabels > 0:
raise NotImplementedError("Supervised fastText models are not supported")
logger.info("loading %s words for fastText model from %s", vocab_size, fin.name)
ntokens, = _struct_unpack(fin, '@q') # number of tokens
if new_format:
pruneidx_size, = _struct_unpack(fin, '@q')
raw_vocab = collections.OrderedDict()
for i in range(vocab_size):
word_bytes = io.BytesIO()
char_byte = fin.read(1)
while char_byte != _END_OF_WORD_MARKER:
word_bytes.write(char_byte)
char_byte = fin.read(1)
word_bytes = word_bytes.getvalue()
try:
word = word_bytes.decode(encoding)
except UnicodeDecodeError:
word = word_bytes.decode(encoding, errors='backslashreplace')
logger.error(
'failed to decode invalid unicode bytes %r; replacing invalid characters, using %r',
word_bytes, word
)
count, _ = _struct_unpack(fin, '@qb')
raw_vocab[word] = count
if new_format:
for j in range(pruneidx_size):
_struct_unpack(fin, '@2i')
return raw_vocab, vocab_size, nwords, ntokens
|
def _load_vocab(fin, new_format, encoding='utf-8'):
"""Load a vocabulary from a FB binary.
Before the vocab is ready for use, call the prepare_vocab function and pass
in the relevant parameters from the model.
Parameters
----------
fin : file
An open file pointer to the binary.
new_format: boolean
True if the binary is of the newer format.
encoding : str
The encoding to use when decoding binary data into words.
Returns
-------
tuple
The loaded vocabulary. Keys are words, values are counts.
The vocabulary size.
The number of words.
The number of tokens.
"""
vocab_size, nwords, nlabels = _struct_unpack(fin, '@3i')
# Vocab stored by [Dictionary::save](https://github.com/facebookresearch/fastText/blob/master/src/dictionary.cc)
if nlabels > 0:
raise NotImplementedError("Supervised fastText models are not supported")
logger.info("loading %s words for fastText model from %s", vocab_size, fin.name)
ntokens, = _struct_unpack(fin, '@q') # number of tokens
if new_format:
pruneidx_size, = _struct_unpack(fin, '@q')
raw_vocab = collections.OrderedDict()
for i in range(vocab_size):
word_bytes = io.BytesIO()
char_byte = fin.read(1)
while char_byte != _END_OF_WORD_MARKER:
word_bytes.write(char_byte)
char_byte = fin.read(1)
word_bytes = word_bytes.getvalue()
try:
word = word_bytes.decode(encoding)
except UnicodeDecodeError:
word = word_bytes.decode(encoding, errors='backslashreplace')
logger.error(
'failed to decode invalid unicode bytes %r; replacing invalid characters, using %r',
word_bytes, word
)
count, _ = _struct_unpack(fin, '@qb')
raw_vocab[word] = count
if new_format:
for j in range(pruneidx_size):
_struct_unpack(fin, '@2i')
return raw_vocab, vocab_size, nwords, ntokens
|
4,430 |
def _check_tags(tags) -> Tuple[str]:
# Must be iterable, not not a string
if (isinstance(tags, str) or not isinstance(tags, (Sequence, np.ndarray))):
raise TypeError(
f'tags must be a collection of str, but got {type(tags)} '
f'instead: {tags}'
)
tags = tuple(tags)
# Check for invalid dtypes
bad_tags = [tag for tag in tags
if not isinstance(tag, str)]
if bad_tags:
raise TypeError(
f'tags must be strings, but got the following instead: '
f'{", ".join([str(tag) for tag in bad_tags])}'
)
# Check for invalid characters
invalid_chars = (' ', '"', '\n') # we'll probably find more :-)
bad_tags = []
for tag in tags:
for invalid_char in invalid_chars:
if invalid_char in tag:
bad_tags.append(tag)
break
if bad_tags:
raise ValueError(
f'The following tags contained invalid characters: '
f'{", ".join(bad_tags)}'
)
return tags
|
def _check_tags(tags) -> Tuple[str]:
# Must be iterable, but not a string
if (isinstance(tags, str) or not isinstance(tags, (Sequence, np.ndarray))):
raise TypeError(
f'tags must be a collection of str, but got {type(tags)} '
f'instead: {tags}'
)
tags = tuple(tags)
# Check for invalid dtypes
bad_tags = [tag for tag in tags
if not isinstance(tag, str)]
if bad_tags:
raise TypeError(
f'tags must be strings, but got the following instead: '
f'{", ".join([str(tag) for tag in bad_tags])}'
)
# Check for invalid characters
invalid_chars = (' ', '"', '\n') # we'll probably find more :-)
bad_tags = []
for tag in tags:
for invalid_char in invalid_chars:
if invalid_char in tag:
bad_tags.append(tag)
break
if bad_tags:
raise ValueError(
f'The following tags contained invalid characters: '
f'{", ".join(bad_tags)}'
)
return tags
|
36,272 |
def calibrate_observable_estimates(qc: QuantumComputer, expt_results: List[ExperimentResult],
n_shots: int = 500, symm_type: int = -1,
noisy_program: Program = None, active_reset: bool = False,
show_progress_bar: bool = False) \
-> Iterable[ExperimentResult]:
"""
Calibrates the expectation and std_err of the input expt_results and updates those estimates.
The input expt_results should be estimated with symmetrized readout error for this to work
properly. Calibration is done by measuring expectation values of eigenstates of the
observable, which ideally should yield either +/- 1 but in practice will have magnitude less
than 1. For default exhaustive_symmetrization the calibration expectation magnitude
averaged over all eigenvectors is recorded as calibration_expectation. The original
expectation is moved to raw_expectation and replaced with the old value scaled by the inverse
calibration expectation.
:param qc: a quantum computer object on which to run the programs necessary to calibrate each
result.
:param expt_results: a list of results, each of which will be separately calibrated.
:param n_shots: the number of shots to run for each eigenvector
:param symm_type: the type of symmetrization
* -1 -- exhaustive symmetrization uses every possible combination of flips; this option
is the default since it ensures proper calibration, but is exponential in the
weight of each observable.
* 0 -- no symmetrization
* 1 -- symmetrization using an OA with strength 1
* 2 -- symmetrization using an OA with strength 2
* 3 -- symmetrization using an OA with strength 3
TODO: accomodate calibration for weight > symmetrization strength (symm_type)
Currently, the symmetrization type must be at least the maximum weight of any observable
estimated and also match the symmetrization type used to estimate the observables of the
input ExperimentResults.
:param noisy_program: an optional program from which to inherit a noise model; only relevant
for running on a QVM
:param active_reset: whether or not to begin the program by actively resetting. If true,
execution of each of the returned programs in a loop on the QPU will generally be faster.
:param show_progress_bar: displays a progress bar via tqdm if true.
:return: a copy of the input results with updated estimates and calibration results.
"""
observables = [copy(res.setting.out_operator) for res in expt_results]
observables = list(set(observables)) # get unique observables that will need to be calibrated
programs = [get_calibration_program(obs, noisy_program, active_reset) for obs in
observables]
meas_qubits = [obs.get_qubits() for obs in observables]
calibrations = {}
for prog, meas_qs, obs in zip(tqdm(programs, disable=not show_progress_bar), meas_qubits,
observables):
results = qc.run_symmetrized_readout(prog, n_shots, symm_type, meas_qs)
# Obtain statistics from result of experiment
# TODO: we have to fabricate an ExperimentSetting to pass to _stats_from_measurements
# even though it only needs the observable.
setting = ExperimentSetting(zeros_state(meas_qs), obs)
obs_mean, obs_var = _stats_from_measurements(results,
{q: idx for idx, q in enumerate(meas_qs)},
setting)
calibrations[obs.operations_as_set()] = (obs_mean, obs_var, len(results))
for expt_result in expt_results:
# TODO: allow weight > symm_type
if -1 < symm_type < len(expt_result.setting.out_operator.get_qubits()):
warnings.warn(f'Calibration of observable {expt_result.setting.out_operator} '
f'currently not supported since it acts on more qubits than the '
f'symm_type {symm_type}.')
# get the calibration data for this observable
cal_data = calibrations[expt_result.setting.out_operator.operations_as_set()]
obs_mean, obs_var, counts = cal_data
# Use the calibration to correct the mean and var
result_mean = expt_result.expectation
result_var = expt_result.std_err ** 2
corrected_mean = result_mean / obs_mean
corrected_var = ratio_variance(result_mean, result_var, obs_mean, obs_var)
yield ExperimentResult(
setting=expt_result.setting,
expectation=corrected_mean,
std_err=np.sqrt(corrected_var),
total_counts=expt_result.total_counts,
raw_expectation=result_mean,
raw_std_err=expt_result.std_err,
calibration_expectation=obs_mean,
calibration_std_err=np.sqrt(obs_var),
calibration_counts=counts
)
|
def calibrate_observable_estimates(qc: QuantumComputer, expt_results: List[ExperimentResult],
n_shots: int = 500, symm_type: int = -1,
noisy_program: Program = None, active_reset: bool = False,
show_progress_bar: bool = False) \
-> Iterable[ExperimentResult]:
"""
Calibrates the expectation and std_err of the input expt_results and updates those estimates.
The input expt_results should be estimated with symmetrized readout error for this to work
properly. Calibration is done by measuring expectation values of eigenstates of the
observable, which ideally should yield either +/- 1 but in practice will have magnitude less
than 1. For default exhaustive_symmetrization the calibration expectation magnitude
averaged over all eigenvectors is recorded as calibration_expectation. The original
expectation is moved to raw_expectation and replaced with the old value scaled by the inverse
calibration expectation.
:param qc: a quantum computer object on which to run the programs necessary to calibrate each
result.
:param expt_results: a list of results, each of which will be separately calibrated.
:param n_shots: the number of shots to run for each eigenvector
:param symm_type: the type of symmetrization
* -1 -- exhaustive symmetrization uses every possible combination of flips; this option
is the default since it ensures proper calibration, but is exponential in the
weight of each observable.
* 0 -- no symmetrization
* 1 -- symmetrization using an OA with strength 1
* 2 -- symmetrization using an OA with strength 2
* 3 -- symmetrization using an OA with strength 3
TODO: accomodate calibration for weight > symmetrization strength (symm_type)
Currently, the symmetrization type must be at least the maximum weight of any observable
estimated and also match the symmetrization type used to estimate the observables of the
input ExperimentResults.
:param noisy_program: an optional program from which to inherit a noise model; only relevant
for running on a QVM
:param active_reset: whether or not to begin the program by actively resetting. If true,
execution of each of the returned programs in a loop on the QPU will generally be faster.
:param show_progress_bar: displays a progress bar via tqdm if true.
:return: a copy of the input results with updated estimates and calibration results.
"""
observables = {copy(res.setting.out_operator) for res in expt_results}
observables = list(set(observables)) # get unique observables that will need to be calibrated
programs = [get_calibration_program(obs, noisy_program, active_reset) for obs in
observables]
meas_qubits = [obs.get_qubits() for obs in observables]
calibrations = {}
for prog, meas_qs, obs in zip(tqdm(programs, disable=not show_progress_bar), meas_qubits,
observables):
results = qc.run_symmetrized_readout(prog, n_shots, symm_type, meas_qs)
# Obtain statistics from result of experiment
# TODO: we have to fabricate an ExperimentSetting to pass to _stats_from_measurements
# even though it only needs the observable.
setting = ExperimentSetting(zeros_state(meas_qs), obs)
obs_mean, obs_var = _stats_from_measurements(results,
{q: idx for idx, q in enumerate(meas_qs)},
setting)
calibrations[obs.operations_as_set()] = (obs_mean, obs_var, len(results))
for expt_result in expt_results:
# TODO: allow weight > symm_type
if -1 < symm_type < len(expt_result.setting.out_operator.get_qubits()):
warnings.warn(f'Calibration of observable {expt_result.setting.out_operator} '
f'currently not supported since it acts on more qubits than the '
f'symm_type {symm_type}.')
# get the calibration data for this observable
cal_data = calibrations[expt_result.setting.out_operator.operations_as_set()]
obs_mean, obs_var, counts = cal_data
# Use the calibration to correct the mean and var
result_mean = expt_result.expectation
result_var = expt_result.std_err ** 2
corrected_mean = result_mean / obs_mean
corrected_var = ratio_variance(result_mean, result_var, obs_mean, obs_var)
yield ExperimentResult(
setting=expt_result.setting,
expectation=corrected_mean,
std_err=np.sqrt(corrected_var),
total_counts=expt_result.total_counts,
raw_expectation=result_mean,
raw_std_err=expt_result.std_err,
calibration_expectation=obs_mean,
calibration_std_err=np.sqrt(obs_var),
calibration_counts=counts
)
|
44,580 |
def main():
args = argument_parser()
ecs_version = read_version(args.ref)
print('Running generator. ECS version ' + ecs_version)
# default location to save files
out_dir = 'generated'
docs_dir = 'docs'
if args.out:
default_dirs = False
out_dir = os.path.join(args.out, out_dir)
docs_dir = os.path.join(args.out, docs_dir)
else:
default_dirs = True
ecs_helpers.make_dirs(out_dir)
# To debug issues in the gradual building up of the nested structure, insert
# statements like this after any step of interest.
# ecs_helpers.yaml_dump('ecs.yml', fields)
# Detect usage of experimental changes to tweak artifact headers
if loader.EXPERIMENTAL_SCHEMA_DIR in args.include:
ecs_version += "+exp"
fields = loader.load_schemas(ref=args.ref, included_files=args.include)
if args.oss:
oss.fallback(fields)
cleaner.clean(fields, strict=args.strict)
finalizer.finalize(fields)
fields = subset_filter.filter(fields, args.subset, out_dir)
nested, flat = intermediate_files.generate(fields, os.path.join(out_dir, 'ecs'), default_dirs)
if args.intermediate_only:
exit()
csv_generator.generate(flat, ecs_version, out_dir)
es_template.generate(flat, ecs_version, out_dir, args.template_settings, args.mapping_settings)
beats.generate(nested, ecs_version, out_dir)
if args.include or args.subset:
exit()
ecs_helpers.make_dirs(docs_dir)
asciidoc_fields.generate(nested, ecs_version, docs_dir)
|
def main():
args = argument_parser()
ecs_version = read_version(args.ref)
print('Running generator. ECS version ' + ecs_version)
# default location to save files
out_dir = 'generated'
docs_dir = 'docs'
if args.out:
default_dirs = False
out_dir = os.path.join(args.out, out_dir)
docs_dir = os.path.join(args.out, docs_dir)
else:
default_dirs = True
ecs_helpers.make_dirs(out_dir)
# To debug issues in the gradual building up of the nested structure, insert
# statements like this after any step of interest.
# ecs_helpers.yaml_dump('ecs.yml', fields)
# Detect usage of experimental changes to tweak artifact version label
if loader.EXPERIMENTAL_SCHEMA_DIR in args.include:
ecs_version += "+exp"
fields = loader.load_schemas(ref=args.ref, included_files=args.include)
if args.oss:
oss.fallback(fields)
cleaner.clean(fields, strict=args.strict)
finalizer.finalize(fields)
fields = subset_filter.filter(fields, args.subset, out_dir)
nested, flat = intermediate_files.generate(fields, os.path.join(out_dir, 'ecs'), default_dirs)
if args.intermediate_only:
exit()
csv_generator.generate(flat, ecs_version, out_dir)
es_template.generate(flat, ecs_version, out_dir, args.template_settings, args.mapping_settings)
beats.generate(nested, ecs_version, out_dir)
if args.include or args.subset:
exit()
ecs_helpers.make_dirs(docs_dir)
asciidoc_fields.generate(nested, ecs_version, docs_dir)
|
53,163 |
def get_version(db):
with closing(db.cursor()) as cursor:
cursor.execute('SELECT VERSION()')
result = cursor.fetchone()
# Version might include a build, a flavor, or both
# e.g. 4.1.26-log, 4.1.26-MariaDB, 10.0.1-MariaDB-mariadb1precise-log
# See http://dev.mysql.com/doc/refman/4.1/en/information-functions.html#function_version
# https://mariadb.com/kb/en/library/version/
# and https://mariadb.com/kb/en/library/server-system-variables/#version
raw_version = result[0]
if isinstance(raw_version, bytes):
raw_version = raw_version.decode()
parts = raw_version.split('-')
version, flavor, build = [parts[0], '', '']
for data in parts:
if data == "MariaDB":
flavor = "MariaDB"
if data != "MariaDB" and flavor == '':
flavor = "MySQL"
if data in BUILDS:
build = data
if build == '':
build = 'unspecified'
return MySQLVersion(version, flavor, build)
|
def get_version(db):
with closing(db.cursor()) as cursor:
cursor.execute('SELECT VERSION()')
result = cursor.fetchone()
# Version might include a build, a flavor, or both
# e.g. 4.1.26-log, 4.1.26-MariaDB, 10.0.1-MariaDB-mariadb1precise-log
# See http://dev.mysql.com/doc/refman/4.1/en/information-functions.html#function_version
# https://mariadb.com/kb/en/library/version/
# and https://mariadb.com/kb/en/library/server-system-variables/#version
raw_version = to_native_string(result[0])
parts = raw_version.split('-')
version, flavor, build = [parts[0], '', '']
for data in parts:
if data == "MariaDB":
flavor = "MariaDB"
if data != "MariaDB" and flavor == '':
flavor = "MySQL"
if data in BUILDS:
build = data
if build == '':
build = 'unspecified'
return MySQLVersion(version, flavor, build)
|
56,683 |
def test_author_dates_match():
_atype = {'key': '/type/author'}
basic = {
'name': 'John Smith',
'death_date': '1688',
'key': '/a/OL6398451A',
'birth_date': '1650',
'type': _atype,
}
full_dates = {
'name': 'John Smith',
'death_date': '23 June 1688',
'key': '/a/OL6398452A',
'birth_date': '01 January 1650',
'type': _atype,
}
full_different = {
'name': 'John Smith',
'death_date': '12 June 1688',
'key': '/a/OL6398453A',
'birth_date': '01 December 1650',
'type': _atype,
}
no_death = {
'name': 'John Smith',
'key': '/a/OL6398454A',
'birth_date': '1650',
'type': _atype,
}
no_dates = {'name': 'John Smith', 'key': '/a/OL6398455A', 'type': _atype}
non_match = {
'name': 'John Smith',
'death_date': '1999',
'key': '/a/OL6398456A',
'birth_date': '1950',
'type': _atype,
}
different_name = {'name': 'Jane Farrier', 'key': '/a/OL6398457A', 'type': _atype}
assert author_dates_match(basic, basic)
assert author_dates_match(basic, full_dates)
assert author_dates_match(basic, no_death)
assert author_dates_match(basic, no_dates)
assert author_dates_match(no_dates, no_dates)
assert author_dates_match(
no_dates, non_match
) # Without dates, the match returns True
assert author_dates_match(
no_dates, different_name
) # This method only compares dates and ignores names
assert author_dates_match(basic, non_match) is False
# FIXME: the following should properly be False:
assert author_dates_match(
full_different, full_dates
) # this shows matches are only occurring on year, full dates are ignored!
|
def test_author_dates_match():
_atype = {'key': '/type/author'}
basic = {
'name': 'John Smith',
'death_date': '1688',
'key': '/a/OL6398451A',
'birth_date': '1650',
'type': _atype,
}
full_dates = {
'name': 'John Smith',
'death_date': '23 June 1688',
'key': '/a/OL6398452A',
'birth_date': '01 January 1650',
'type': _atype,
}
full_different = {
'name': 'John Smith',
'death_date': '12 June 1688',
'key': '/a/OL6398453A',
'birth_date': '01 December 1650',
'type': _atype,
}
no_death = {
'name': 'John Smith',
'key': '/a/OL6398454A',
'birth_date': '1650',
'type': _atype,
}
no_dates = {'name': 'John Smith', 'key': '/a/OL6398455A', 'type': _atype}
non_match = {
'name': 'John Smith',
'death_date': '1999',
'key': '/a/OL6398456A',
'birth_date': '1950',
'type': _atype,
}
different_name = {'name': 'Jane Farrier', 'key': '/a/OL6398457A', 'type': _atype}
assert author_dates_match(basic, basic)
assert author_dates_match(basic, full_dates)
assert author_dates_match(basic, no_death)
assert author_dates_match(basic, no_dates)
assert author_dates_match(no_dates, no_dates)
assert author_dates_match(
no_dates, non_match
) # Without dates, the match returns True
assert author_dates_match(
no_dates, different_name
) # This method only compares dates and ignores names
assert author_dates_match(basic, non_match) is False
# FIXME: the following should properly be False:
# Matches are only occurring on the year so full dates are ignored!
assert author_dates_match(full_different, full_dates)
|
32,006 |
def get_hosts_scan_list(client: PrismaCloudComputeClient, args: dict) -> CommandResults:
"""
Get the host scan list.
Implement the command 'prisma-cloud-compute-hosts-scan-list'
Args:
client (PrismaCloudComputeClient): prisma-cloud-compute client.
args (dict): prisma-cloud-compute-hosts-scan-list command arguments
Returns:
CommandResults: command-results object.
"""
args["limit"], args["offset"] = parse_limit_and_offset_values(
limit=args.pop("limit_record", "10"), offset=args.get("offset", "0")
)
stats_limit, _ = parse_limit_and_offset_values(limit=args.pop("limit_stats", "10"))
args["compact"] = argToBoolean(value=args.get("compact", "true"))
if hosts_scans := client.get_hosts_scan_info(params=assign_params(**args)):
for scan in hosts_scans:
if "vulnerabilities" in scan:
scan["vulnerabilities"] = filter_api_response(
api_response=scan.get("vulnerabilities"), limit=stats_limit
)
if vulnerabilities := scan.get("vulnerabilities"):
for vuln in vulnerabilities:
if "fixDate" in vuln:
vuln["fixDate"] = epochs_to_timestamp(epochs=vuln.get("fixDate", 0))
if "complianceIssues" in scan:
scan["complianceIssues"] = filter_api_response(
api_response=scan.get("complianceIssues"), limit=stats_limit
)
if compliances := scan.get("complianceIssues"):
for compliance in compliances:
if "fixDate" in compliance:
compliance["fixDate"] = epochs_to_timestamp(epochs=compliance.get("fixDate", 0))
host_description_table = tableToMarkdown(
name="Host description",
t=get_hosts_descriptions(hosts_scans=hosts_scans),
headers=[
"Hostname", "Docker Version", "OS Distribution", "Vulnerabilities Count", "Compliance Issues Count"
],
removeNull=True
)
if len(hosts_scans) == 1: # then there is only one host scan report
if args.get("compact", True):
# if the compact is True, the api will filter
# the response and send back only vulnerability/compliance statistics
vuln_statistics_table = tableToMarkdown(
name="Vulnerability Statistics",
t=hosts_scans[0].get("vulnerabilityDistribution"),
headers=["critical", "high", "medium", "low"],
removeNull=True,
headerTransform=lambda word: word[0].upper() + word[1:]
)
compliance_statistics_table = tableToMarkdown(
name="Compliance Statistics",
t=hosts_scans[0].get("complianceDistribution"),
headers=["critical", "high", "medium", "low"],
removeNull=True,
headerTransform=lambda word: word[0].upper() + word[1:]
)
table = host_description_table + vuln_statistics_table + compliance_statistics_table
else:
# handle the case where there is an host scan without vulnerabilities
vulnerabilities = hosts_scans[0].get("vulnerabilities")
if vulnerabilities is None:
vulnerabilities = []
vulnerabilities_table = tableToMarkdown(
name="Vulnerabilities",
t=vulnerabilities,
headers=["cve", "description", "severity", "packageName", "status", "fixDate"],
removeNull=True,
headerTransform=pascalToSpace,
)
# handle the case where there is an host scan without compliances
compliances = hosts_scans[0].get("complianceIssues")
if compliances is None:
compliances = []
compliances_table = tableToMarkdown(
name="Compliances",
t=compliances,
headers=["id", "severity", "status", "description", "packageName", "fixDate"],
removeNull=True,
headerTransform=pascalToSpace
)
table = host_description_table + vulnerabilities_table + compliances_table
else:
table = host_description_table
else:
table = "No results found"
return CommandResults(
outputs_prefix="PrismaCloudCompute.ReportHostScan",
outputs_key_field="_id",
outputs=hosts_scans,
readable_output=table,
)
|
def get_hosts_scan_list(client: PrismaCloudComputeClient, args: dict) -> CommandResults:
"""
Get the host scan list.
Implement the command 'prisma-cloud-compute-hosts-scan-list'
Args:
client (PrismaCloudComputeClient): prisma-cloud-compute client.
args (dict): prisma-cloud-compute-hosts-scan-list command arguments
Returns:
CommandResults: command-results object.
"""
args["limit"], args["offset"] = parse_limit_and_offset_values(
limit=args.pop("limit_record", "10"), offset=args.get("offset", "0")
)
stats_limit, _ = parse_limit_and_offset_values(limit=args.pop("limit_stats", "10"))
args["compact"] = argToBoolean(value=args.get("compact", "true"))
if hosts_scans := client.get_hosts_scan_info(params=assign_params(**args)):
for scan in hosts_scans:
if "vulnerabilities" in scan:
scan["vulnerabilities"] = filter_api_response(
api_response=scan.get("vulnerabilities"), limit=stats_limit
)
if vulnerabilities := scan.get("vulnerabilities"):
for vuln in vulnerabilities:
if "fixDate" in vuln:
vuln["fixDate"] = epochs_to_timestamp(epochs=vuln.get("fixDate", 0))
if "complianceIssues" in scan:
scan["complianceIssues"] = filter_api_response(
api_response=scan.get("complianceIssues"), limit=stats_limit
)
if compliances := scan.get("complianceIssues"):
for compliance in compliances:
if "fixDate" in compliance:
compliance["fixDate"] = epochs_to_timestamp(epochs=compliance.get("fixDate", 0))
host_description_table = tableToMarkdown(
name="Host description",
t=get_hosts_descriptions(hosts_scans=hosts_scans),
headers=[
"Hostname", "Docker Version", "OS Distribution", "Vulnerabilities Count", "Compliance Issues Count"
],
removeNull=True
)
if len(hosts_scans) == 1: # then there is only one host scan report
if args.get("compact", True):
# if the compact is True, the api will filter
# the response and send back only vulnerability/compliance statistics
vuln_statistics_table = tableToMarkdown(
name="Vulnerability Statistics",
t=hosts_scans[0].get("vulnerabilityDistribution"),
headers=["critical", "high", "medium", "low"],
removeNull=True,
headerTransform=lambda word: word[0].upper() + word[1:]
)
compliance_statistics_table = tableToMarkdown(
name="Compliance Statistics",
t=hosts_scans[0].get("complianceDistribution"),
headers=["critical", "high", "medium", "low"],
removeNull=True,
headerTransform=lambda word: word[0].upper() + word[1:]
)
table = host_description_table + vuln_statistics_table + compliance_statistics_table
else:
# handle the case where there is an host scan without vulnerabilities
vulnerabilities = hosts_scans[0].get("vulnerabilities")
if vulnerabilities is None:
vulnerabilities = []
vulnerabilities_table = tableToMarkdown(
name="Vulnerabilities",
t=vulnerabilities,
headers=["cve", "description", "severity", "packageName", "status", "fixDate"],
removeNull=True,
headerTransform=pascalToSpace,
)
# handle the case where there is an host scan without compliances
compliances = hosts_scans[0].get("complianceIssues")
if compliances is None:
compliances = []
compliances_table = tableToMarkdown(
name="Compliances",
t=compliances,
headers=["id", "severity", "status", "description", "packageName", "fixDate"],
removeNull=True,
headerTransform=pascalToSpace
)
table = host_description_table + vulnerabilities_table + compliances_table
else:
table = host_description_table
else:
table = "No results found."
return CommandResults(
outputs_prefix="PrismaCloudCompute.ReportHostScan",
outputs_key_field="_id",
outputs=hosts_scans,
readable_output=table,
)
|
6,641 |
def get_columns():
columns = [
{
"label": _("Sales Order"),
"fieldname": "name",
"fieldtype": "Link",
"options": "Sales Order",
"read_only": 1,
},
{
"label": _("Submitted"),
"fieldname": "submitted",
"fieldtype": "Date",
"read_only": 1
},
{
"label": _("Payment Term"),
"fieldname": "payment_term",
"fieldtype": "Data",
"read_only": 1
},
{
"label": _("Description"),
"fieldname": "description",
"fieldtype": "Data",
"read_only": 1
},
{
"label": _("Due Date"),
"fieldname": "due_date",
"fieldtype": "Date",
"read_only": 1
},
{
"label": _("Invoice Portion"),
"fieldname": "invoice_portion",
"fieldtype": "Percent",
"read_only": 1,
},
{
"label": _("Payment Amount"),
"fieldname": "payment_amount",
"fieldtype": "Currency",
"read_only": 1,
},
{
"label": _("Paid Amount"),
"fieldname": "paid_amount",
"fieldtype": "Currency",
"read_only": 1
},
{
"label": _("Invoices"),
"fieldname": "invoices",
"fieldtype": "Link",
"options": "Sales Invoice",
"read_only": 1,
},
{
"label": _("Status"),
"fieldname": "status",
"fieldtype": "Data",
"read_only": 1
}
]
return columns
|
def get_columns():
columns = [
{
"label": _("Sales Order"),
"fieldname": "name",
"fieldtype": "Link",
"options": "Sales Order",
"read_only": 1,
},
{
"label": _("Posting Date"),
"fieldname": "submitted",
"fieldtype": "Date",
"read_only": 1
},
{
"label": _("Payment Term"),
"fieldname": "payment_term",
"fieldtype": "Data",
"read_only": 1
},
{
"label": _("Description"),
"fieldname": "description",
"fieldtype": "Data",
"read_only": 1
},
{
"label": _("Due Date"),
"fieldname": "due_date",
"fieldtype": "Date",
"read_only": 1
},
{
"label": _("Invoice Portion"),
"fieldname": "invoice_portion",
"fieldtype": "Percent",
"read_only": 1,
},
{
"label": _("Payment Amount"),
"fieldname": "payment_amount",
"fieldtype": "Currency",
"read_only": 1,
},
{
"label": _("Paid Amount"),
"fieldname": "paid_amount",
"fieldtype": "Currency",
"read_only": 1
},
{
"label": _("Invoices"),
"fieldname": "invoices",
"fieldtype": "Link",
"options": "Sales Invoice",
"read_only": 1,
},
{
"label": _("Status"),
"fieldname": "status",
"fieldtype": "Data",
"read_only": 1
}
]
return columns
|
30,817 |
def enable_user_command(client, args):
"""
Enable user using PATCH to Servicenow API , if Connection to the service is successful.
Args: demisto command line argument
client: Service Client
Returns:
success : success=True, id, email, login as username, details, active status
fail : success=False, id, login as username, errorCod, errorMessage, details
"""
scim = verify_and_load_scim_data(args.get('scim'))
parsed_scim_data = map_scim(scim)
user_id = parsed_scim_data.get('id')
if not (user_id):
raise Exception('You must provide sys id of the user')
custom_mapping = demisto.params().get('customMappingUpdateUser')
servicenow_user = client.build_servicenow_user_profile(args, scim, custom_mapping)
servicenow_user['active'] = True
servicenow_user['locked_out'] = False
res = client.update_user(user_id, servicenow_user)
res_json = res.json()
if res.status_code == 200:
result = res_json['result']
active = True if result['active'] == 'true' else False
id = result['sys_id']
email = result['email']
username = result['user_name']
generic_iam_context = OutputContext(success=True, iden=id, email=email,
username=username, details=result, active=active)
else:
generic_iam_context = OutputContext(success=False, iden=user_id, errorCode=res.status_code,
errorMessage=res_json.get('error', {}).get('message'), details=res_json)
generic_iam_context_dt = f'{generic_iam_context.command}(val.id == obj.id && val.instanceName == obj.instanceName)'
outputs = {
generic_iam_context_dt: generic_iam_context.data
}
readable_output = tableToMarkdown('Enable ServiceNow User:', t=generic_iam_context.data,
headers=["brand", "instanceName", "success", "active", "id", "username", "email",
"errorCode", "errorMessage", "details"],
removeNull=True
)
return (
readable_output,
outputs,
generic_iam_context.data
)
|
def enable_user_command(client, args):
"""
Enable user using PATCH to Servicenow API , if Connection to the service is successful.
Args: demisto command line argument
client: Service Client
Returns:
success : success=True, id, email, login as username, details, active status
fail : success=False, id, login as username, errorCod, errorMessage, details
"""
scim = verify_and_load_scim_data(args.get('scim'))
parsed_scim_data = map_scim(scim)
user_id = parsed_scim_data.get('id')
if not user_id:
raise Exception('You must provide sys id of the user')
custom_mapping = demisto.params().get('customMappingUpdateUser')
servicenow_user = client.build_servicenow_user_profile(args, scim, custom_mapping)
servicenow_user['active'] = True
servicenow_user['locked_out'] = False
res = client.update_user(user_id, servicenow_user)
res_json = res.json()
if res.status_code == 200:
result = res_json['result']
active = True if result['active'] == 'true' else False
id = result['sys_id']
email = result['email']
username = result['user_name']
generic_iam_context = OutputContext(success=True, iden=id, email=email,
username=username, details=result, active=active)
else:
generic_iam_context = OutputContext(success=False, iden=user_id, errorCode=res.status_code,
errorMessage=res_json.get('error', {}).get('message'), details=res_json)
generic_iam_context_dt = f'{generic_iam_context.command}(val.id == obj.id && val.instanceName == obj.instanceName)'
outputs = {
generic_iam_context_dt: generic_iam_context.data
}
readable_output = tableToMarkdown('Enable ServiceNow User:', t=generic_iam_context.data,
headers=["brand", "instanceName", "success", "active", "id", "username", "email",
"errorCode", "errorMessage", "details"],
removeNull=True
)
return (
readable_output,
outputs,
generic_iam_context.data
)
|
45,683 |
def Clustergram(
data=None,
generate_curves_dict=False,
return_computed_traces=False,
computed_traces=None,
row_labels=None,
column_labels=None,
hide_labels=None,
standardize='none',
cluster='all',
row_dist='euclidean',
col_dist='euclidean',
dist_fun=scs.distance.pdist,
link_fun=lambda x, **kwargs: sch.linkage(x, 'complete', **kwargs),
color_threshold=None,
optimal_leaf_order=False,
color_map=None,
color_list=None,
display_range=3,
symmetric_value=True,
log_transform=False,
display_ratio=0.2,
imputer_parameters=None,
row_group_marker=None, # group number, annotation, color
col_group_marker=None, # same as above
tick_font=None,
annotation_font=None,
line_width=0.5,
paper_bg_color='rgba(0,0,0,0)',
plot_bg_color='rgba(0,0,0,0)',
height=500,
width=500
):
"""Function that returns a Dash Bio Clustergram object.
Keyword arguments:
- data (ndarray; required): Matrix of observations as array of arrays
- generate_curves_dict (bool; default false): Whether or not to return a
dictionary containing information about the cluster number
associated with each curve number in the graph. (May be useful
if one wishes to capture the cluster number that is clicked.)
- return_computed_traces (bool; default false): Whether or not to return
the precomputed dendrogram traces. (May be useful if one wishes
to add, e.g., group markers to the figure without recalculating
the clustering in the entire figure.)
- computed_traces (dict; optional): The dendrogram traces from another
Clustergram component.
- row_labels (list; optional): List of row category labels
(observation labels)
- column_labels (list; optional): List of column category labels
(observation labels)
- hide_labels (list; optional): List of labels not to display on the
final plot.
- standardize (string; default 'none'): The dimension for standardizing
values, so that the mean is 0 and the standard deviation is 1
along the specified dimension: 'row', 'column', or 'none'.
- cluster (string; default 'all'): The dimension along which the data will
be clustered: 'row', 'column', or 'all'; 'all' means data to be
clustered along columns, then clustered along rows of
row-clustered data.
- row_dist (string; default 'euclidean'): String specifying the
distance metric for rows. It will be passed as the argument
'metric' into the function specified in dist_fun (see
scipy.spatial.distance.pdist).
- col_dist (string; default 'euclidean'): String specifying the
distance metric for columns. It will be passed as the argument
'metric' into the function specified in dist_fun (see
scipy.spatial.distance.pdist).
- dist_fun (function; default scipy.spatial.distance.pdist): Function
to compute the pairwise distance from the observations (see
scipy.spatial.distance.pdist).
- link_fun (function; default scipy.cluster.hierarchy.linkage): Function to
compute the linkage matrix from the pairwise distances (see
scipy.cluster.hierarchy.linkage).
- color_threshold (dict; default {'row': 0, 'col': 0}): Maximum
linkage value for which unique colors are assigned to clusters;
'row' for rows, and 'col' for columns.
- optimal_leaf_order (bool; default false): Enabling/disabling of the
option to determine leaf order that maximizes similarity between
neighboring leaves.
- color_map (list; default [[0.0, 'rgb(255,0,0)'], [0.5,
'rgb(0,0,0)'], [1.0, 'rgb(0,255,0)']]): The colorscale for the
heatmap. Each list element contains two elements; the first
element refers to the portion of the maximum data point under
which a cell will be colored, and the second element refers to the
color. e.g., a colorscale [[0.0, 'white'], [0.5, 'gray'], [1.0,
'black']] mean that for all cells with a value in the 50th or
lower percentile of the dataset, the color on the heatmap would be
white; all cells with a value in the 50th or higher percentile,
excluding the 100th percentile, would be gray; and the cell(s) in
the 100th percentile would be colored black.
- color_list (dict; optional): The list of colors to use for different
clusters in the dendrogram that have a root under the threshold for
each dimension. If there are fewer colors than there are clusters
along a specific dimension. The keys are: 'row' (for row clusters),
'col' (for column clusters), and 'bg' (for all traces above the
clustering threshold for both row and column.
- display_range (double; default 3.0): In the heatmap, standardized
values from the dataset that are below the negative of this value
will be colored with one shade, and the values that are above this
value will be colored with another.
- symmetric_value (bool; default true): Whether or not to center the
values of the heatmap about zero.
- log_transform (bool; default false): Whether or not to transforms
the data by taking the base-two logarithm of all values in the
dataset.
- display_ratio (list | number; default 0.2): The dendrograms' heights with
respect to the size of the heatmap; with one element, both the row
and column dendrograms have the same ratio; with two, the row
dendrogram ratio corresponds to the first element of the list and
the column dendrogram ratio corresponds to the second element of
the list.
- imputer_parameters (dict; optional): Specifies the parameters
'missing_values' and 'strategy' of the SimpleImputer class from
scikit-learn 0.20.1 (both of these parameters must be keys in the
dictionary). An additional parameter, 'axis', is used to specify
the direction along which to impute (a parameter of Imputer, which
was deprecated in scikit-learn 0.20.0). 'axis=0' indicates that
imputing should happen along columns, while 'axis=1' indicates
that it should happen along rows (see: https://scikit
-learn.org/stable/modules/generated/sklearn.preprocessing.Imputer.html).
- row_group_marker (list; optional) A list containing the annotations
for row clusters in the dendrogram. Each annotation is a
dictionary with the keys 'group_number' (the cluster number to
highlight), 'annotation' (a string containing the text of the
annotation), and 'color' (a string representation of the color of
the annotation).
- col_group_marker (list; optional): A list containing the annotations for
column clusters in the dendrogram. Each annotation is a dictionary
with the keys 'group_number' (the cluster number to highlight),
'annotation' (a string containing the text of the annotation), and
'color' (a string representation of the color of the
annotation).
- tick_font (dict; optional): The font options for ticks, as specified
in the Plotly graph_objs documentation (see:
https://plot.ly/python/reference/#bar-marker-colorbar-tickfont).
- annotation_font (dict; optional): The font options for annotations,
as specified in the Plotly graph_objs documentation (see:
https://plot.ly/python/reference/#layout-scene-annotations-items-annotation-font).
- line_width (list | number; default 0.5): The line width for the
dendrograms. If in list format, the first element corresponds to
the width of the row dendrogram traces, and the second corresponds
to the width of the column dendrogram traces.
- paper_bg_color (string; default 'rgba(0,0,0,0)`): The background
color of the paper on the graph.
- plot_bg_color (string; default 'rgba(0,0,0,0)'): The background
color of the subplots on the graph.
- height (number; default 500): The height of the graph, in px.
- width (number; default 500): The width of the graph, in px.
"""
if hide_labels is None:
hide_labels = []
if color_threshold is None:
color_threshold = dict(row=0, col=0)
# get rid of arguments that are not used by _Clustergram
kwargs = locals()
kwargs.pop('return_computed_traces')
kwargs.pop('computed_traces')
kwargs.pop('generate_curves_dict')
(fig, ct, curves_dict) = _Clustergram(
**kwargs
).figure(
computed_traces=computed_traces
)
return_values = [go.Figure(fig)]
if generate_curves_dict:
return_values.append(curves_dict)
if return_computed_traces:
return_values.append(ct)
# return only the figure by default
if len(return_values) == 1:
return return_values[0]
# otherwise, return all requested values
return tuple(return_values)
|
def Clustergram(
data=None,
generate_curves_dict=False,
return_computed_traces=False,
computed_traces=None,
row_labels=None,
column_labels=None,
hide_labels=None,
standardize='none',
cluster='all',
row_dist='euclidean',
col_dist='euclidean',
dist_fun=scs.distance.pdist,
link_fun=lambda x, **kwargs: sch.linkage(x, 'complete', **kwargs),
color_threshold=None,
optimal_leaf_order=False,
color_map=None,
color_list=None,
display_range=3,
symmetric_value=True,
log_transform=False,
display_ratio=0.2,
imputer_parameters=None,
row_group_marker=None, # group number, annotation, color
col_group_marker=None, # same as above
tick_font=None,
annotation_font=None,
line_width=0.5,
paper_bg_color='rgba(0,0,0,0)',
plot_bg_color='rgba(0,0,0,0)',
height=500,
width=500
):
"""Function that returns a Dash Bio Clustergram object.
Keyword arguments:
- data (ndarray; required): Matrix of observations as array of arrays
- generate_curves_dict (bool; default false): Whether or not to return a
dictionary containing information about the cluster number
associated with each curve number in the graph. (May be useful
if one wishes to capture the cluster number that is clicked.)
- return_computed_traces (bool; default false): Whether or not to return
the precomputed dendrogram traces. (May be useful if one wishes
to add, e.g., group markers to the figure without recalculating
the clustering in the entire figure.)
- computed_traces (dict; optional): The dendrogram traces from another
Clustergram component.
- row_labels (list; optional): List of row category labels
(observation labels)
- column_labels (list; optional): List of column category labels
(observation labels)
- hide_labels (list; optional): List of labels not to display on the
final plot.
- standardize (string; default 'none'): The dimension for standardizing
values, so that the mean is 0 and the standard deviation is 1
along the specified dimension: 'row', 'column', or 'none'.
- cluster (string; default 'all'): The dimension along which the data will
be clustered: 'row', 'column', or 'all'; 'all' means data to be
clustered along columns, then clustered along rows of
row-clustered data.
- row_dist (string; default 'euclidean'): String specifying the
distance metric for rows. It will be passed as the argument
'metric' into the function specified in dist_fun (see
scipy.spatial.distance.pdist).
- col_dist (string; default 'euclidean'): String specifying the
distance metric for columns. It will be passed as the argument
'metric' into the function specified in dist_fun (see
scipy.spatial.distance.pdist).
- dist_fun (function; default scipy.spatial.distance.pdist): Function
to compute the pairwise distance from the observations (see
scipy.spatial.distance.pdist).
- link_fun (function; default scipy.cluster.hierarchy.linkage): Function to
compute the linkage matrix from the pairwise distances (see
scipy.cluster.hierarchy.linkage).
- color_threshold (dict; default {'row': 0, 'col': 0}): Maximum
linkage value for which unique colors are assigned to clusters;
'row' for rows, and 'col' for columns.
- optimal_leaf_order (bool; default false): Enabling/disabling of the
option to determine leaf order that maximizes similarity between
neighboring leaves.
- color_map (list; default [[0.0, 'rgb(255,0,0)'], [0.5,
'rgb(0,0,0)'], [1.0, 'rgb(0,255,0)']]): The colorscale for the
heatmap. Each list element contains two elements; the first
element refers to the portion of the maximum data point under
which a cell will be colored, and the second element refers to the
color, e.g., a colorscale [[0.0, 'white'], [0.5, 'gray'], [1.0,
'black']] mean that for all cells with a value in the 50th or
lower percentile of the dataset, the color on the heatmap would be
white; all cells with a value in the 50th or higher percentile,
excluding the 100th percentile, would be gray; and the cell(s) in
the 100th percentile would be colored black.
- color_list (dict; optional): The list of colors to use for different
clusters in the dendrogram that have a root under the threshold for
each dimension. If there are fewer colors than there are clusters
along a specific dimension. The keys are: 'row' (for row clusters),
'col' (for column clusters), and 'bg' (for all traces above the
clustering threshold for both row and column.
- display_range (double; default 3.0): In the heatmap, standardized
values from the dataset that are below the negative of this value
will be colored with one shade, and the values that are above this
value will be colored with another.
- symmetric_value (bool; default true): Whether or not to center the
values of the heatmap about zero.
- log_transform (bool; default false): Whether or not to transforms
the data by taking the base-two logarithm of all values in the
dataset.
- display_ratio (list | number; default 0.2): The dendrograms' heights with
respect to the size of the heatmap; with one element, both the row
and column dendrograms have the same ratio; with two, the row
dendrogram ratio corresponds to the first element of the list and
the column dendrogram ratio corresponds to the second element of
the list.
- imputer_parameters (dict; optional): Specifies the parameters
'missing_values' and 'strategy' of the SimpleImputer class from
scikit-learn 0.20.1 (both of these parameters must be keys in the
dictionary). An additional parameter, 'axis', is used to specify
the direction along which to impute (a parameter of Imputer, which
was deprecated in scikit-learn 0.20.0). 'axis=0' indicates that
imputing should happen along columns, while 'axis=1' indicates
that it should happen along rows (see: https://scikit
-learn.org/stable/modules/generated/sklearn.preprocessing.Imputer.html).
- row_group_marker (list; optional) A list containing the annotations
for row clusters in the dendrogram. Each annotation is a
dictionary with the keys 'group_number' (the cluster number to
highlight), 'annotation' (a string containing the text of the
annotation), and 'color' (a string representation of the color of
the annotation).
- col_group_marker (list; optional): A list containing the annotations for
column clusters in the dendrogram. Each annotation is a dictionary
with the keys 'group_number' (the cluster number to highlight),
'annotation' (a string containing the text of the annotation), and
'color' (a string representation of the color of the
annotation).
- tick_font (dict; optional): The font options for ticks, as specified
in the Plotly graph_objs documentation (see:
https://plot.ly/python/reference/#bar-marker-colorbar-tickfont).
- annotation_font (dict; optional): The font options for annotations,
as specified in the Plotly graph_objs documentation (see:
https://plot.ly/python/reference/#layout-scene-annotations-items-annotation-font).
- line_width (list | number; default 0.5): The line width for the
dendrograms. If in list format, the first element corresponds to
the width of the row dendrogram traces, and the second corresponds
to the width of the column dendrogram traces.
- paper_bg_color (string; default 'rgba(0,0,0,0)`): The background
color of the paper on the graph.
- plot_bg_color (string; default 'rgba(0,0,0,0)'): The background
color of the subplots on the graph.
- height (number; default 500): The height of the graph, in px.
- width (number; default 500): The width of the graph, in px.
"""
if hide_labels is None:
hide_labels = []
if color_threshold is None:
color_threshold = dict(row=0, col=0)
# get rid of arguments that are not used by _Clustergram
kwargs = locals()
kwargs.pop('return_computed_traces')
kwargs.pop('computed_traces')
kwargs.pop('generate_curves_dict')
(fig, ct, curves_dict) = _Clustergram(
**kwargs
).figure(
computed_traces=computed_traces
)
return_values = [go.Figure(fig)]
if generate_curves_dict:
return_values.append(curves_dict)
if return_computed_traces:
return_values.append(ct)
# return only the figure by default
if len(return_values) == 1:
return return_values[0]
# otherwise, return all requested values
return tuple(return_values)
|
42,470 |
def normalize_invisible_parens(
node: Node, parens_after: Set[str], *, preview: bool
) -> None:
"""Make existing optional parentheses invisible or create new ones.
`parens_after` is a set of string leaf values immediately after which parens
should be put.
Standardizes on visible parentheses for single-element tuples, and keeps
existing visible parentheses for other tuples and generator expressions.
"""
for pc in list_comments(node.prefix, is_endmarker=False, preview=preview):
if pc.value in FMT_OFF:
# This `node` has a prefix with `# fmt: off`, don't mess with parens.
return
check_lpar = False
for index, child in enumerate(list(node.children)):
# Fixes a bug where invisible parens are not properly stripped from
# assignment statements that contain type annotations.
if isinstance(child, Node) and child.type == syms.annassign:
normalize_invisible_parens(
child, parens_after=parens_after, preview=preview
)
# Add parentheses around long tuple unpacking in assignments.
if (
index == 0
and isinstance(child, Node)
and child.type == syms.testlist_star_expr
):
check_lpar = True
if check_lpar:
if child.type == syms.atom:
if maybe_make_parens_invisible_in_atom(
child,
parent=node,
preview=preview,
):
wrap_in_parentheses(node, child, visible=False)
elif (
preview
and isinstance(child, Node)
and child.type == syms.asexpr_test
and not any(leaf.type == token.COLONEQUAL for leaf in child.leaves())
):
# make parentheses invisible,
# unless the asexpr contains an assignment expression.
if maybe_make_parens_invisible_in_atom(
child.children[0],
parent=child,
preview=preview,
):
wrap_in_parentheses(child, child.children[0], visible=False)
elif is_one_tuple(child):
wrap_in_parentheses(node, child, visible=True)
elif node.type == syms.import_from:
# "import from" nodes store parentheses directly as part of
# the statement
if is_lpar_token(child):
assert is_rpar_token(node.children[-1])
# make parentheses invisible
child.value = ""
node.children[-1].value = ""
elif child.type != token.STAR:
# insert invisible parentheses
node.insert_child(index, Leaf(token.LPAR, ""))
node.append_child(Leaf(token.RPAR, ""))
break
elif not (isinstance(child, Leaf) and is_multiline_string(child)):
wrap_in_parentheses(node, child, visible=False)
comma_check = child.type == token.COMMA if preview else False
check_lpar = (
isinstance(child, Leaf) and child.value in parens_after or comma_check
)
|
def normalize_invisible_parens(
node: Node, parens_after: Set[str], *, preview: bool
) -> None:
"""Make existing optional parentheses invisible or create new ones.
`parens_after` is a set of string leaf values immediately after which parens
should be put.
Standardizes on visible parentheses for single-element tuples, and keeps
existing visible parentheses for other tuples and generator expressions.
"""
for pc in list_comments(node.prefix, is_endmarker=False, preview=preview):
if pc.value in FMT_OFF:
# This `node` has a prefix with `# fmt: off`, don't mess with parens.
return
check_lpar = False
for index, child in enumerate(list(node.children)):
# Fixes a bug where invisible parens are not properly stripped from
# assignment statements that contain type annotations.
if isinstance(child, Node) and child.type == syms.annassign:
normalize_invisible_parens(
child, parens_after=parens_after, preview=preview
)
# Add parentheses around long tuple unpacking in assignments.
if (
index == 0
and isinstance(child, Node)
and child.type == syms.testlist_star_expr
):
check_lpar = True
if check_lpar:
if child.type == syms.atom:
if maybe_make_parens_invisible_in_atom(
child,
parent=node,
preview=preview,
):
wrap_in_parentheses(node, child, visible=False)
elif (
preview
and isinstance(child, Node)
and child.type == syms.asexpr_test
and not any(leaf.type == token.COLONEQUAL for leaf in child.leaves())
):
# make parentheses invisible,
# unless the asexpr contains an assignment expression.
if maybe_make_parens_invisible_in_atom(
child.children[0],
parent=child,
preview=preview,
):
wrap_in_parentheses(child, child.children[0], visible=False)
elif is_one_tuple(child):
wrap_in_parentheses(node, child, visible=True)
elif node.type == syms.import_from:
# "import from" nodes store parentheses directly as part of
# the statement
if is_lpar_token(child):
assert is_rpar_token(node.children[-1])
# make parentheses invisible
child.value = ""
node.children[-1].value = ""
elif child.type != token.STAR:
# insert invisible parentheses
node.insert_child(index, Leaf(token.LPAR, ""))
node.append_child(Leaf(token.RPAR, ""))
break
elif not (isinstance(child, Leaf) and is_multiline_string(child)):
wrap_in_parentheses(node, child, visible=False)
comma_check = child.type == token.COMMA if preview else False
check_lpar = (
child.value in parens_after if isinstance(child, Leaf) else comma_check
)
|
35,965 |
def delete_nodes(
pks, verbosity=0, dry_run=False, force=False, create_forward=True, call_calc_forward=False, call_work_forward=False
):
"""
Delete nodes by a list of pks.
This command will delete not only the specified nodes, but also the ones that are
linked to these and should be also deleted in order to keep a consistent provenance
according to the rules explained in the concepts section of the documentation.
In summary:
1. If a DATA node is deleted, any process nodes linked to it will also be deleted.
2. If a CALC node is deleted, any incoming WORK node (callers) will be deleted as
well whereas any incoming DATA node (inputs) will be kept. Outgoing DATA nodes
(outputs) will be deleted by default but this can be disabled.
3. If a WORK node is deleted, any incoming WORK node (callers) will be deleted as
well, but all DATA nodes will be kept. Outgoing WORK or CALC nodes will be kept by
default, but deletion of either of both kind of connected nodes can be enabled.
These rules are 'recursive', so if a CALC node is deleted, then its output DATA
nodes will be deleted as well, and then any CALC node that may have those as
inputs, and so on.
:param pks: a list of the PKs of the nodes to delete
:param bool force: do not ask for confirmation to delete nodes.
:param int verbosity: 0 prints nothing,
1 prints just sums and total,
2 prints individual nodes.
:param bool create_forward:
This will delete all output data created by any deleted calculation.
:param bool call_calc_forward:
This will also delete all calculations called by any workflow that is going to
be deleted. Note that when you delete a workflow, also all parent workflows are
deleted (recursively). Therefore, setting this flag to True may delete
calculations that are 'unrelated' to what has been chosen to be deleted, just
because they are connected at some point in the upwards provenance. Use with
care, and it is advisable to never combine it with force.
:param bool call_work_forward:
This will also delete all calculations called by any workflow that is going to
be deleted. The same disclaimer as forward_calcs applies here as well.
:param bool dry_run:
Do not delete, a dry run, with statistics printed according to verbosity levels.
:param bool force:
Do not ask for confirmation to delete nodes.
"""
# pylint: disable=too-many-arguments,too-many-branches,too-many-locals,too-many-statements
from aiida.backends.utils import delete_nodes_and_connections
from aiida.common import exceptions
from aiida.common.links import LinkType
from aiida.orm import Node, QueryBuilder, load_node
starting_pks = []
for pk in pks:
try:
load_node(pk)
except exceptions.NotExistent:
echo.echo_warning('warning: node with pk<{}> does not exist, skipping'.format(pk))
else:
starting_pks.append(pk)
# An empty set might be problematic for the queries done below.
if not starting_pks:
if verbosity:
echo.echo('Nothing to delete')
return
follow_upwards = []
follow_upwards.append(LinkType.CREATE.value)
follow_upwards.append(LinkType.RETURN.value)
follow_upwards.append(LinkType.CALL_CALC.value)
follow_upwards.append(LinkType.CALL_WORK.value)
follow_downwards = []
follow_downwards.append(LinkType.INPUT_CALC.value)
follow_downwards.append(LinkType.INPUT_WORK.value)
if create_forward:
follow_downwards.append(LinkType.CREATE.value)
if call_calc_forward:
follow_downwards.append(LinkType.CALL_CALC.value)
if call_work_forward:
follow_downwards.append(LinkType.CALL_WORK.value)
links_upwards = {'type': {'in': follow_upwards}}
links_downwards = {'type': {'in': follow_downwards}}
operational_set = set().union(set(starting_pks))
accumulator_set = set().union(set(starting_pks))
while operational_set:
new_pks_set = set()
query_nodes = QueryBuilder()
query_nodes.append(Node, filters={'id': {'in': operational_set}}, tag='sources')
query_nodes.append(
Node,
filters={'id': {
'!in': accumulator_set
}},
edge_filters=links_downwards,
with_incoming='sources',
project='id'
)
new_pks_set = new_pks_set.union(set(i for i, in query_nodes.iterall()))
query_nodes = QueryBuilder()
query_nodes.append(Node, filters={'id': {'in': operational_set}}, tag='sources')
query_nodes.append(
Node,
filters={'id': {
'!in': accumulator_set
}},
edge_filters=links_upwards,
with_outgoing='sources',
project='id'
)
new_pks_set = new_pks_set.union(set(i for i, in query_nodes.iterall()))
operational_set = new_pks_set.difference(accumulator_set)
accumulator_set = new_pks_set.union(accumulator_set)
pks_set_to_delete = accumulator_set
if verbosity > 0:
echo.echo(
'I {} delete {} node{}'.format(
'would' if dry_run else 'will', len(pks_set_to_delete), 's' if len(pks_set_to_delete) > 1 else ''
)
)
if verbosity > 1:
builder = QueryBuilder().append(
Node, filters={'id': {
'in': pks_set_to_delete
}}, project=('uuid', 'id', 'node_type', 'label')
)
echo.echo('The nodes I {} delete:'.format('would' if dry_run else 'will'))
for uuid, pk, type_string, label in builder.iterall():
try:
short_type_string = type_string.split('.')[-2]
except IndexError:
short_type_string = type_string
echo.echo(' {} {} {} {}'.format(uuid, pk, short_type_string, label))
if dry_run:
if verbosity > 0:
echo.echo('\nThis was a dry run, exiting without deleting anything')
return
# Asking for user confirmation here
if force:
pass
else:
echo.echo_warning('YOU ARE ABOUT TO DELETE {} NODES! THIS CANNOT BE UNDONE!'.format(len(pks_set_to_delete)))
if not click.confirm('Shall I continue?'):
echo.echo('Exiting without deleting')
return
# Recover the list of folders to delete before actually deleting the nodes. I will delete the folders only later,
# so that if there is a problem during the deletion of the nodes in the DB, I don't delete the folders
repositories = [load_node(pk)._repository for pk in pks_set_to_delete] # pylint: disable=protected-access
if verbosity > 0:
echo.echo('I am starting node deletion.')
delete_nodes_and_connections(pks_set_to_delete)
if verbosity > 0:
echo.echo('I have finished node deletion and I am starting folder deletion.')
# If we are here, we managed to delete the entries from the DB.
# I can now delete the folders
for repository in repositories:
repository.erase(force=True)
if verbosity > 0:
echo.echo('I have finished folder deletion. Deletion completed.')
|
def delete_nodes(
pks, verbosity=0, dry_run=False, force=False, create_forward=True, call_calc_forward=False, call_work_forward=False
):
"""
Delete nodes by a list of pks.
This command will delete not only the specified nodes, but also the ones that are
linked to these and should be also deleted in order to keep a consistent provenance
according to the rules explained in the concepts section of the documentation.
In summary:
1. If a DATA node is deleted, any process nodes linked to it will also be deleted.
2. If a CALC node is deleted, any incoming WORK node (callers) will be deleted as
well whereas any incoming DATA node (inputs) will be kept. Outgoing DATA nodes
(outputs) will be deleted by default but this can be disabled.
3. If a WORK node is deleted, any incoming WORK node (callers) will be deleted as
well, but all DATA nodes will be kept. Outgoing WORK or CALC nodes will be kept by
default, but deletion of either of both kind of connected nodes can be enabled.
These rules are 'recursive', so if a CALC node is deleted, then its output DATA
nodes will be deleted as well, and then any CALC node that may have those as
inputs, and so on.
:param pks: a list of the PKs of the nodes to delete
:param bool force: do not ask for confirmation to delete nodes.
:param int verbosity: 0 prints nothing,
1 prints just sums and total,
2 prints individual nodes.
:param bool create_forward:
This will delete all output data created by any deleted calculation.
:param bool call_calc_forward:
This will also delete all calculations called by any workflow that is going to
be deleted. Note that when you delete a workflow, also all parent workflows are
deleted (recursively). Therefore, setting this flag to True may delete
calculations that are 'unrelated' to what has been chosen to be deleted, just
because they are connected at some point in the upwards provenance. Use with
care, and it is advisable to never combine it with force.
:param bool call_work_forward:
This will also delete all workflows called by any workflow that is going to
be deleted. The same disclaimer as forward_calcs applies here as well.
:param bool dry_run:
Do not delete, a dry run, with statistics printed according to verbosity levels.
:param bool force:
Do not ask for confirmation to delete nodes.
"""
# pylint: disable=too-many-arguments,too-many-branches,too-many-locals,too-many-statements
from aiida.backends.utils import delete_nodes_and_connections
from aiida.common import exceptions
from aiida.common.links import LinkType
from aiida.orm import Node, QueryBuilder, load_node
starting_pks = []
for pk in pks:
try:
load_node(pk)
except exceptions.NotExistent:
echo.echo_warning('warning: node with pk<{}> does not exist, skipping'.format(pk))
else:
starting_pks.append(pk)
# An empty set might be problematic for the queries done below.
if not starting_pks:
if verbosity:
echo.echo('Nothing to delete')
return
follow_upwards = []
follow_upwards.append(LinkType.CREATE.value)
follow_upwards.append(LinkType.RETURN.value)
follow_upwards.append(LinkType.CALL_CALC.value)
follow_upwards.append(LinkType.CALL_WORK.value)
follow_downwards = []
follow_downwards.append(LinkType.INPUT_CALC.value)
follow_downwards.append(LinkType.INPUT_WORK.value)
if create_forward:
follow_downwards.append(LinkType.CREATE.value)
if call_calc_forward:
follow_downwards.append(LinkType.CALL_CALC.value)
if call_work_forward:
follow_downwards.append(LinkType.CALL_WORK.value)
links_upwards = {'type': {'in': follow_upwards}}
links_downwards = {'type': {'in': follow_downwards}}
operational_set = set().union(set(starting_pks))
accumulator_set = set().union(set(starting_pks))
while operational_set:
new_pks_set = set()
query_nodes = QueryBuilder()
query_nodes.append(Node, filters={'id': {'in': operational_set}}, tag='sources')
query_nodes.append(
Node,
filters={'id': {
'!in': accumulator_set
}},
edge_filters=links_downwards,
with_incoming='sources',
project='id'
)
new_pks_set = new_pks_set.union(set(i for i, in query_nodes.iterall()))
query_nodes = QueryBuilder()
query_nodes.append(Node, filters={'id': {'in': operational_set}}, tag='sources')
query_nodes.append(
Node,
filters={'id': {
'!in': accumulator_set
}},
edge_filters=links_upwards,
with_outgoing='sources',
project='id'
)
new_pks_set = new_pks_set.union(set(i for i, in query_nodes.iterall()))
operational_set = new_pks_set.difference(accumulator_set)
accumulator_set = new_pks_set.union(accumulator_set)
pks_set_to_delete = accumulator_set
if verbosity > 0:
echo.echo(
'I {} delete {} node{}'.format(
'would' if dry_run else 'will', len(pks_set_to_delete), 's' if len(pks_set_to_delete) > 1 else ''
)
)
if verbosity > 1:
builder = QueryBuilder().append(
Node, filters={'id': {
'in': pks_set_to_delete
}}, project=('uuid', 'id', 'node_type', 'label')
)
echo.echo('The nodes I {} delete:'.format('would' if dry_run else 'will'))
for uuid, pk, type_string, label in builder.iterall():
try:
short_type_string = type_string.split('.')[-2]
except IndexError:
short_type_string = type_string
echo.echo(' {} {} {} {}'.format(uuid, pk, short_type_string, label))
if dry_run:
if verbosity > 0:
echo.echo('\nThis was a dry run, exiting without deleting anything')
return
# Asking for user confirmation here
if force:
pass
else:
echo.echo_warning('YOU ARE ABOUT TO DELETE {} NODES! THIS CANNOT BE UNDONE!'.format(len(pks_set_to_delete)))
if not click.confirm('Shall I continue?'):
echo.echo('Exiting without deleting')
return
# Recover the list of folders to delete before actually deleting the nodes. I will delete the folders only later,
# so that if there is a problem during the deletion of the nodes in the DB, I don't delete the folders
repositories = [load_node(pk)._repository for pk in pks_set_to_delete] # pylint: disable=protected-access
if verbosity > 0:
echo.echo('I am starting node deletion.')
delete_nodes_and_connections(pks_set_to_delete)
if verbosity > 0:
echo.echo('I have finished node deletion and I am starting folder deletion.')
# If we are here, we managed to delete the entries from the DB.
# I can now delete the folders
for repository in repositories:
repository.erase(force=True)
if verbosity > 0:
echo.echo('I have finished folder deletion. Deletion completed.')
|
34,167 |
def add_subparser(
subparsers: argparse._SubParsersAction, parents: List[argparse.ArgumentParser]
):
import rasa.nlu.convert as convert
data_parser = subparsers.add_parser(
"data",
conflict_handler="resolve",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
parents=parents,
help="Utils for the Rasa training files.",
)
data_parser.set_defaults(func=lambda _: data_parser.print_help(None))
data_subparsers = data_parser.add_subparsers()
convert_parser = data_subparsers.add_parser(
"convert",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
parents=parents,
help="Converts Rasa data between different formats.",
)
convert_parser.set_defaults(func=lambda _: convert_parser.print_help(None))
convert_subparsers = convert_parser.add_subparsers()
convert_nlu_parser = convert_subparsers.add_parser(
"nlu",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
parents=parents,
help="Converts NLU data between markdown and json.",
)
convert_nlu_parser.set_defaults(func=convert.main)
arguments.set_convert_arguments(convert_nlu_parser)
split_parser = data_subparsers.add_parser(
"split",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
parents=parents,
help="Splits Rasa data in training and test data.",
)
split_parser.set_defaults(func=lambda _: split_parser.print_help(None))
split_subparsers = split_parser.add_subparsers()
nlu_split_parser = split_subparsers.add_parser(
"nlu",
parents=parents,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
help="Performs a split of your NLU data according to the specified "
"percentages.",
)
nlu_split_parser.set_defaults(func=split_nlu_data)
arguments.set_split_arguments(nlu_split_parser)
|
def add_subparser(
subparsers: argparse._SubParsersAction, parents: List[argparse.ArgumentParser]
):
import rasa.nlu.convert as convert
data_parser = subparsers.add_parser(
"data",
conflict_handler="resolve",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
parents=parents,
help="Utils for the Rasa training files.",
)
data_parser.set_defaults(func=lambda _: data_parser.print_help(None))
data_subparsers = data_parser.add_subparsers()
convert_parser = data_subparsers.add_parser(
"convert",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
parents=parents,
help="Converts Rasa data between different formats.",
)
convert_parser.set_defaults(func=lambda _: convert_parser.print_help(None))
convert_subparsers = convert_parser.add_subparsers()
convert_nlu_parser = convert_subparsers.add_parser(
"nlu",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
parents=parents,
help="Converts NLU data between markdown and json.",
)
convert_nlu_parser.set_defaults(func=convert.main)
arguments.set_convert_arguments(convert_nlu_parser)
split_parser = data_subparsers.add_parser(
"split",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
parents=parents,
help="Splits Rasa data into training and test data.",
)
split_parser.set_defaults(func=lambda _: split_parser.print_help(None))
split_subparsers = split_parser.add_subparsers()
nlu_split_parser = split_subparsers.add_parser(
"nlu",
parents=parents,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
help="Performs a split of your NLU data according to the specified "
"percentages.",
)
nlu_split_parser.set_defaults(func=split_nlu_data)
arguments.set_split_arguments(nlu_split_parser)
|
18,397 |
def view_copy(src, dst, view, spec):
shutil.copyfile(src, dst)
if spec:
# Not metadata, we have to relocate it
# What type of file are we relocating
relocate_method = spack.relocate.relocate_text_bin \
if spack.relocate.is_binary(dst) else spack.relocate.relocate_text
# Get information on where to relocate from/to
prefix_to_prefix = dict(
(dep.prefix, view.get_projection_for_spec(dep))
for dep in spec.traverse()
)
# Call actual relocation method
relocate_method(
[dst], spack.store.layout.root, view._root,
spec.prefix, view.get_projection_for_spec(spec),
spack.paths.spack_root, view._root, prefix_to_prefix
)
|
def view_copy(src, dst, view, spec):
shutil.copyfile(src, dst)
if spec:
# Not metadata, we have to relocate it
# What type of file are we relocating
relocate_method = spack.relocate.relocate_text_bin \
if spack.relocate.is_binary(dst) else spack.relocate.relocate_text
# Get information on where to relocate from/to
prefix_to_projection = dict(
(dep.prefix, view.get_projection_for_spec(dep))
for dep in spec.traverse()
)
# Call actual relocation method
relocate_method(
[dst], spack.store.layout.root, view._root,
spec.prefix, view.get_projection_for_spec(spec),
spack.paths.spack_root, view._root, prefix_to_prefix
)
|
46,211 |
def force_name_unique(name, names):
if names.count(name) == 0:
return name
else:
return force_name_unique(inc_name_count(name), names)
|
def force_name_unique(name, names):
if name not in names:
return name
else:
return force_name_unique(inc_name_count(name), names)
|
13,080 |
def test_update_order_display_gross_prices_use_default_tax_settings(order):
# given
tax_config = order.channel.tax_configuration
tax_config.display_gross_prices = True
tax_config.save()
tax_config.country_exceptions.all().delete()
order.display_gross_prices = False
order.save()
# when
update_order_display_gross_prices(order)
# then
assert order.display_gross_prices
|
def test_update_order_display_gross_prices_use_default_tax_settings(order):
# given
tax_config = order.channel.tax_configuration
tax_config.display_gross_prices = True
tax_config.save()
tax_config.country_exceptions.all().delete()
order.display_gross_prices = False
order.save(update_fields=["display_gross_prices"])
# when
update_order_display_gross_prices(order)
# then
assert order.display_gross_prices
|
49,881 |
def test__prepare_temperature_arrays_weather(sapm_dc_snl_ac_system_same_arrays,
location, weather,
total_irrad):
data = weather.copy()
data[['poa_global', 'poa_direct', 'poa_diffuse']] = total_irrad
data_two = data.copy()
mc = ModelChain(sapm_dc_snl_ac_system_same_arrays, location,
aoi_model='no_loss', spectral_model='no_loss')
# prepare_temperature expects mc.total_irrad and mc.results.weather
# to be set
mc._assign_weather((data, data_two))
mc._assign_total_irrad((data, data_two))
mc._prepare_temperature((data, data_two))
expected = pd.Series([48.928025, 38.080016], index=data.index)
assert_series_equal(mc.results.cell_temperature[0], expected)
assert_series_equal(mc.results.cell_temperature[1], expected)
data['module_temperature'] = [40., 30.]
mc._prepare_temperature((data, data_two))
expected = pd.Series([42.4, 31.5], index=data.index)
assert (mc.results.cell_temperature[1] != expected).all()
assert_series_equal(mc.results.cell_temperature[0], expected)
data['cell_temperature'] = [50., 35.]
mc._prepare_temperature((data, data_two))
assert_series_equal(
mc.results.cell_temperature[0], data['cell_temperature'])
data_two['module_temperature'] = [40., 30.]
mc._prepare_temperature((data, data_two))
assert_series_equal(mc.results.cell_temperature[1], expected)
assert_series_equal(
mc.results.cell_temperature[0], data['cell_temperature'])
data_two['cell_temperature'] = [10.0, 20.0]
mc._prepare_temperature((data, data_two))
assert_series_equal(
mc.results.cell_temperature[1], data_two['cell_temperature'])
assert_series_equal(
mc.results.cell_temperature[0], data['cell_temperature'])
|
def test__prepare_temperature_arrays_weather(sapm_dc_snl_ac_system_same_arrays,
location, weather,
total_irrad):
data = weather.copy()
data[['poa_global', 'poa_direct', 'poa_diffuse']] = total_irrad
data_two = data.copy()
mc = ModelChain(sapm_dc_snl_ac_system_same_arrays, location,
aoi_model='no_loss', spectral_model='no_loss')
# prepare_temperature expects mc.results.total_irrad and mc.results.weather
# to be set
mc._assign_weather((data, data_two))
mc._assign_total_irrad((data, data_two))
mc._prepare_temperature((data, data_two))
expected = pd.Series([48.928025, 38.080016], index=data.index)
assert_series_equal(mc.results.cell_temperature[0], expected)
assert_series_equal(mc.results.cell_temperature[1], expected)
data['module_temperature'] = [40., 30.]
mc._prepare_temperature((data, data_two))
expected = pd.Series([42.4, 31.5], index=data.index)
assert (mc.results.cell_temperature[1] != expected).all()
assert_series_equal(mc.results.cell_temperature[0], expected)
data['cell_temperature'] = [50., 35.]
mc._prepare_temperature((data, data_two))
assert_series_equal(
mc.results.cell_temperature[0], data['cell_temperature'])
data_two['module_temperature'] = [40., 30.]
mc._prepare_temperature((data, data_two))
assert_series_equal(mc.results.cell_temperature[1], expected)
assert_series_equal(
mc.results.cell_temperature[0], data['cell_temperature'])
data_two['cell_temperature'] = [10.0, 20.0]
mc._prepare_temperature((data, data_two))
assert_series_equal(
mc.results.cell_temperature[1], data_two['cell_temperature'])
assert_series_equal(
mc.results.cell_temperature[0], data['cell_temperature'])
|
796 |
def Gumbel(name, beta, mu, **kwargs):
r"""
Create a Continuous Random Variable with Gumbel distribution.
The density of the Gumbel distribution is given by,
For Maximum
.. math::
f(x) := \dfrac{1}{\beta} \exp \left( -\dfrac{x-\mu}{\beta}
- \exp \left( -\dfrac{x - \mu}{\beta} \right) \right)
with :math:`x \in [ - \infty, \infty ]`.
For Minimum
.. math::
f(x) := \frac{e^{- e^{\frac{- \mu + x}{\beta}}
+ \frac{- \mu + x}{\beta}}}{\beta}
with :math:`x \in [ - \infty, \infty ]`.
Parameters
==========
mu: Real number, 'mu' is a location
beta: Real number, 'beta > 0' is a scale
for_min: Boolean, optional, False, by default
For enabling the minimum distribution
Returns
==========
A RandomSymbol
Examples
==========
>>> from sympy.stats import Gumbel, density, E, variance, cdf
>>> from sympy import Symbol, simplify, pprint
>>> x = Symbol("x")
>>> y = Symbol("y")
>>> mu = Symbol("mu")
>>> beta = Symbol("beta", positive=True)
>>> X = Gumbel("x", beta, mu)
>>> Y = Gumbel("y", beta, mu, for_min=True)
>>> density(X)(x)
exp(-exp(-(-mu + x)/beta) - (-mu + x)/beta)/beta
>>> cdf(X)(x)
exp(-exp(-(-mu + x)/beta))
>>> density(Y)(y)
exp(-exp((-mu + y)/beta) + (-mu + y)/beta)/beta
References
==========
.. [1] http://mathworld.wolfram.com/GumbelDistribution.html
.. [2] https://en.wikipedia.org/wiki/Gumbel_distribution
.. [3] http://www.mathwave.com/help/easyfit/html/analyses/distributions/gumbel_max.html
.. [4] http://www.mathwave.com/help/easyfit/html/analyses/distributions/gumbel_min.html
"""
if kwargs.get('for_min', False):
return rv(name, GumbelDistributionMinimum, (beta, mu))
return rv(name, GumbelDistributionMaximum, (beta, mu))
|
def Gumbel(name, beta, mu, **kwargs):
r"""
Create a Continuous Random Variable with Gumbel distribution.
The density of the Gumbel distribution is given by,
For Maximum
.. math::
f(x) := \dfrac{1}{\beta} \exp \left( -\dfrac{x-\mu}{\beta}
- \exp \left( -\dfrac{x - \mu}{\beta} \right) \right)
with :math:`x \in [ - \infty, \infty ]`.
For Minimum
.. math::
f(x) := \frac{e^{- e^{\frac{- \mu + x}{\beta}}
+ \frac{- \mu + x}{\beta}}}{\beta}
with :math:`x \in [ - \infty, \infty ]`.
Parameters
==========
mu: Real number, 'mu' is a location
beta: Real number, 'beta > 0' is a scale
for_min: Boolean, optional, False, by default
For enabling the minimum distribution
Returns
==========
A RandomSymbol
Examples
==========
>>> from sympy.stats import Gumbel, density, E, variance, cdf
>>> from sympy import Symbol, simplify, pprint
>>> x = Symbol("x")
>>> y = Symbol("y")
>>> mu = Symbol("mu")
>>> beta = Symbol("beta", positive=True)
>>> X = Gumbel("x", beta, mu)
>>> Y = Gumbel("y", beta, mu, for_min=True)
>>> density(X)(x)
exp(-exp(-(-mu + x)/beta) - (-mu + x)/beta)/beta
>>> cdf(X)(x)
exp(-exp(-(-mu + x)/beta))
>>> density(Y)(y)
exp(-exp((-mu + y)/beta) + (-mu + y)/beta)/beta
References
==========
.. [1] http://mathworld.wolfram.com/GumbelDistribution.html
.. [2] https://en.wikipedia.org/wiki/Gumbel_distribution
.. [3] http://www.mathwave.com/help/easyfit/html/analyses/distributions/gumbel_max.html
.. [4] http://www.mathwave.com/help/easyfit/html/analyses/distributions/gumbel_min.html
"""
if kwargs.get('for_min', False):
return rv(name, GumbelDistributionMinimum, (beta, mu))
return rv(name, GumbelDistributionMaximum, (beta, mu))
|
13,404 |
def test_03_verify_the_first_pool_created_with_encrypted_root_dataset_become_the_system_dataset(request, pool_data):
pool_disk = [POST('/disk/get_unused/').json()[0]['name']]
payload = {
'name': 'encrypted',
'encryption': True,
'encryption_options': {
'algorithm': 'AES-128-CCM',
'passphrase': 'my_pool_passphrase',
},
'topology': {
'data': [
{'type': 'STRIPE', 'disks': pool_disk}
],
}
}
results = POST('/pool/', payload)
assert results.status_code == 200, results.text
job_id = results.json()
job_status = wait_on_job(job_id, 240)
assert job_status['state'] == 'SUCCESS', str(job_status['results'])
pool_data['encrypted'] = job_status['results']['result']
results = GET("/systemdataset/")
assert results.status_code == 200, results.text
assert isinstance(results.json(), dict), results.text
assert results.json()['pool'] == 'encrypted', results.text
assert results.json()['basename'] == 'encrypted/.system', results.textZ
|
def test_03_verify_the_first_pool_created_with_encrypted_root_dataset_become_the_system_dataset(request, pool_data):
pool_disk = [POST('/disk/get_unused/').json()[0]['name']]
payload = {
'name': 'encrypted',
'encryption': True,
'encryption_options': {
'algorithm': 'AES-128-CCM',
'passphrase': 'my_pool_passphrase',
},
'topology': {
'data': [
{'type': 'STRIPE', 'disks': pool_disk}
],
}
}
results = POST('/pool/', payload)
assert results.status_code == 200, results.text
job_id = results.json()
job_status = wait_on_job(job_id, 240)
assert job_status['state'] == 'SUCCESS', str(job_status['results'])
pool_data['encrypted'] = job_status['results']['result']
results = GET("/systemdataset/")
assert results.status_code == 200, results.text
assert isinstance(results.json(), dict), results.text
assert results.json()['pool'] == 'encrypted', results.text
assert results.json()['basename'] == 'encrypted/.system', results.text
|
31,627 |
def update_user_iam(default_base_dn, args, create_if_not_exists, mapper_out, disabled_users_group_cn):
"""Update an AD user by User Profile.
:param default_base_dn: The location in the DIT where the search will start
:param args: Demisto args.
:param create_if_not_exists: Created the user if it does not exists.
:param mapper_out: Mapping User Profiles to AD users.
:param disabled_users_group_cn: The disabled group cn, the user will be removed from this group when enabled
:return: Updated User
"""
assert conn is not None
try:
user_profile = args.get("user-profile")
allow_enable = args.get('allow-enable') == 'true'
old_sam_account_name = ''
old_user_exists = ''
user_profile_delta = args.get('user-profile-delta')
iam_user_profile = IAMUserProfile(user_profile=user_profile, user_profile_delta=user_profile_delta)
ad_user = iam_user_profile.map_object(mapper_name=mapper_out)
# check it user exists and if it doesn't, create it
sam_account_name = ad_user.get("samaccountname")
if not sam_account_name:
raise DemistoException("User must have a sAMAccountName, please make sure a mapping "
"exists in \"" + mapper_out + "\" outgoing mapper.")
if not ad_user.get('ou'):
raise DemistoException("User must have an Organizational Unit (OU). Please make sure you've added a "
"transformer script which determines the OU of the user "
"in \"" + mapper_out + "\" outgoing mapper, in the User Profile incident type "
"and schema type, under the \"ou\" field.")
new_ou = ad_user.get("ou")
user_exists = check_if_user_exists_by_samaccountname(default_base_dn, sam_account_name)
user_profile = json.loads(user_profile)
if old_user_data := user_profile.get('olduserdata'):
# if olduserdata exists - the user's email is updated:
old_sam_account_name = get_old_samaccountname(old_user_data, mapper_out)
old_user_exists = check_if_user_exists_by_samaccountname(default_base_dn, old_sam_account_name)
if not user_exists and not old_user_exists and create_if_not_exists:
iam_user_profile = create_user_iam(default_base_dn, args, mapper_out, disabled_users_group_cn)
elif old_user_exists and user_exists:
# In this case we update the user but using an email that is already in use
raise DemistoException("The sAMAccountName \"" + sam_account_name + "\" already exists."
"Try to update \"" + old_sam_account_name + "\" with a different sAMAccountName.")
elif user_exists or old_user_exists:
# There are 2 options here:
# 1. We update the user, the email stays the same - therefore user_exists=True and old_user_exists=False
# 2. We update the user, the email changes too - therefore user_exists=False and old_user_exists=True
if not old_user_exists:
# In this case the sAMAccountName doesn't change
old_sam_account_name = sam_account_name
dn = user_dn(old_sam_account_name, default_base_dn)
if allow_enable:
enable_user_iam(default_base_dn, dn, disabled_users_group_cn)
# fields that can't be modified
# notice that we are changing the ou and that effects the dn and cn
for field in FIELDS_THAT_CANT_BE_MODIFIED:
if ad_user.get(field):
ad_user.pop(field)
fail_to_modify = []
for key in ad_user:
modification = {key: [('MODIFY_REPLACE', ad_user.get(key))]}
success = conn.modify(dn, modification)
if not success:
fail_to_modify.append(key)
ou_modified_succeed = modify_user_ou(dn, new_ou)
if not ou_modified_succeed:
fail_to_modify.append("ou")
if manager_email := ad_user.get('manageremail'):
manager_dn = get_user_dn_by_email(manager_email)
modification = {'manager': [('MODIFY_REPLACE', manager_dn)]}
success = conn.modify(dn, modification)
if not success:
fail_to_modify.append(key)
if fail_to_modify:
error_list = '\n'.join(fail_to_modify)
error_message = f"Failed to modify the following attributes: {error_list}"
raise DemistoException(error_message)
else:
active = get_user_activity_by_samaccountname(default_base_dn, sam_account_name)
iam_user_profile.set_result(success=True,
email=ad_user.get('email'),
username=ad_user.get('name'),
action=IAMActions.UPDATE_USER,
details=ad_user,
active=active)
return iam_user_profile
except Exception as e:
error_code, _ = IAMErrors.BAD_REQUEST
iam_user_profile.set_result(success=False,
error_code=error_code,
error_message=str(e),
action=IAMActions.UPDATE_USER
)
return iam_user_profile
|
def update_user_iam(default_base_dn, args, create_if_not_exists, mapper_out, disabled_users_group_cn):
"""Update an AD user by User Profile.
:param default_base_dn: The location in the DIT where the search will start
:param args: Demisto args.
:param create_if_not_exists: Created the user if it does not exists.
:param mapper_out: Mapping User Profiles to AD users.
:param disabled_users_group_cn: The disabled group cn, the user will be removed from this group when enabled
:return: Updated User
"""
assert conn is not None
try:
user_profile = args.get("user-profile")
allow_enable = args.get('allow-enable') == 'true'
old_sam_account_name = ''
old_user_exists = ''
user_profile_delta = args.get('user-profile-delta')
iam_user_profile = IAMUserProfile(user_profile=user_profile, user_profile_delta=user_profile_delta)
ad_user = iam_user_profile.map_object(mapper_name=mapper_out)
# check it user exists and if it doesn't, create it
sam_account_name = ad_user.get("samaccountname")
if not sam_account_name:
raise DemistoException("User must have a sAMAccountName, please make sure a mapping "
"exists in \"" + mapper_out + "\" outgoing mapper.")
if not ad_user.get('ou'):
raise DemistoException("User must have an Organizational Unit (OU). Please make sure you've added a "
"transformer script which determines the OU of the user "
"in \"" + mapper_out + "\" outgoing mapper, in the User Profile incident type "
"and schema type, under the \"ou\" field.")
new_ou = ad_user.get("ou")
user_exists = check_if_user_exists_by_samaccountname(default_base_dn, sam_account_name)
user_profile = json.loads(user_profile)
if old_user_data := user_profile.get('olduserdata'):
# if olduserdata exists - the user's email is updated:
old_sam_account_name = get_old_samaccountname(old_user_data, mapper_out)
old_user_exists = check_if_user_exists_by_samaccountname(default_base_dn, old_sam_account_name)
if not user_exists and not old_user_exists and create_if_not_exists:
iam_user_profile = create_user_iam(default_base_dn, args, mapper_out, disabled_users_group_cn)
elif old_user_exists and user_exists:
# In this case we update the user but using an email that is already in use
raise DemistoException("The sAMAccountName \"" + sam_account_name + "\" already exists."
"Try to update \"" + old_sam_account_name + "\" with a different sAMAccountName.")
elif user_exists or old_user_exists:
# There are 2 options here:
# 1. We update the user, the email stays the same - therefore user_exists=True and old_user_exists=False
# 2. We update the user, the email changes too - therefore user_exists=False and old_user_exists=True
if not old_user_exists:
# In this case the sAMAccountName doesn't change
old_sam_account_name = sam_account_name
dn = user_dn(old_sam_account_name, default_base_dn)
if allow_enable:
enable_user_iam(default_base_dn, dn, disabled_users_group_cn)
# fields that can't be modified
# notice that we are changing the ou and that effects the dn and cn
for field in FIELDS_THAT_CANT_BE_MODIFIED:
if ad_user.get(field):
ad_user.pop(field)
fail_to_modify = []
for key in ad_user:
modification = {key: [('MODIFY_REPLACE', ad_user.get(key))]}
success = conn.modify(dn, modification)
if not success:
fail_to_modify.append(key)
ou_modified_succeed = modify_user_ou(dn, new_ou)
if not ou_modified_succeed:
fail_to_modify.append("ou")
if manager_email := ad_user.get('manageremail'):
manager_dn = get_user_dn_by_email(manager_email)
modification = {'manager': [('MODIFY_REPLACE', manager_dn)]}
success = conn.modify(dn, modification)
if not success:
fail_to_modify.append('manager')
if fail_to_modify:
error_list = '\n'.join(fail_to_modify)
error_message = f"Failed to modify the following attributes: {error_list}"
raise DemistoException(error_message)
else:
active = get_user_activity_by_samaccountname(default_base_dn, sam_account_name)
iam_user_profile.set_result(success=True,
email=ad_user.get('email'),
username=ad_user.get('name'),
action=IAMActions.UPDATE_USER,
details=ad_user,
active=active)
return iam_user_profile
except Exception as e:
error_code, _ = IAMErrors.BAD_REQUEST
iam_user_profile.set_result(success=False,
error_code=error_code,
error_message=str(e),
action=IAMActions.UPDATE_USER
)
return iam_user_profile
|
4,650 |
def str_cast_to_int(object, name, value):
""" A function that validates the value is a str and then convert
it to an int using its length.
"""
if not isinstance(value, str):
raise TraitError("Not an string!")
return len(value)
|
def str_cast_to_int(object, name, value):
""" A function that validates the value is a str and then convert
it to an int using its length.
"""
if not isinstance(value, str):
raise TraitError("Not a string!")
return len(value)
|
20,163 |
def filter_aggregate(filters: list, installable: Installable, filter_match_all: bool = True) -> bool:
# if there are no filters, accept it
if not filters:
return True
if filter_match_all:
# if installable matches all filters, accept it
return all((False for filt in filters if not filter_match(filt, installable)))
else:
# if installable matches any filter, accept it
return any((True for filt in filters if filter_match(filt, installable)))
|
def filter_aggregate(filters: list, installable: Installable, filter_match_all: bool = True) -> bool:
# if there are no filters, accept it
if not filters:
return True
if filter_match_all:
# if installable matches all filters, accept it
return all((False for filt in filters if not filter_match(filt, installable)))
else:
# if installable matches any filter, accept it
return any(filter_match(filt, installable) for filt in filters)
|
40,666 |
def create_supervised_evaluator(model, metrics={},
device=None, non_blocking=False,
prepare_batch=_prepare_batch):
"""
Factory function for creating an evaluator for supervised models.
Args:
model (`torch.nn.Module`): the model to train.
metrics (dict of str - :class:`ignite.metrics.Metric`): a map of metric names to Metrics.
device (str, optional): device type specification (default: None).
Applies to both model and batches.
non_blocking (bool, optional): if True and this copy is between CPU and GPU, the copy may occur asynchronously
with respect to the host. For other cases, this argument has no effect.
prepare_batch (callable, optional): function that receives `batch`, `device`, `non_blocking` and outputs
tuple of tensors `(batch_x, batch_y)`.
Returns:
:class:`~ignite.engine.Engine`: an evaluator engine with supervised inference function.
"""
if device:
model.to(device)
def _inference(engine, batch):
model.eval()
with torch.no_grad():
x, y = prepare_batch(batch, device=device, non_blocking=non_blocking)
y_pred = model(x)
return y_pred, y
engine = Engine(_inference)
for name, metric in metrics.items():
metric.attach(engine, name)
return engine
|
def create_supervised_evaluator(model, metrics={},
device=None, non_blocking=False,
prepare_batch=_prepare_batch):
"""
Factory function for creating an evaluator for supervised models.
Args:
model (`torch.nn.Module`): the model to train.
metrics (dict of str - :class:`ignite.metrics.Metric`): a map of metric names to Metrics.
device (str, optional): device type specification (default: None).
Applies to both model and batches.
non_blocking (bool, optional): if True and this copy is between CPU and GPU, the copy may occur asynchronously
with respect to the host. For other cases, this argument has no effect.
prepare_batch (callable, optional): function that receives `batch`, `device`, `non_blocking` and outputs
tuple of tensors `(batch_x, batch_y)`.
Returns:
Engine: an evaluator engine with supervised inference function.
"""
if device:
model.to(device)
def _inference(engine, batch):
model.eval()
with torch.no_grad():
x, y = prepare_batch(batch, device=device, non_blocking=non_blocking)
y_pred = model(x)
return y_pred, y
engine = Engine(_inference)
for name, metric in metrics.items():
metric.attach(engine, name)
return engine
|
8,841 |
def unblockable(function):
"""Decorate a function to exempt it from the ignore/blocks system.
For example, this can be used to ensure that important events such as
``JOIN`` are always recorded::
from sopel import module
@module.event('JOIN')
@module.unblockable
def on_join_callable(bot, trigger):
# do something when a user JOIN a channel
# a blocked nickname or hostname *will* trigger this
pass
.. seealso::
Sopel's :meth:`~sopel.bot.Sopel.dispatch`.
"""
function.unblockable = True
return function
|
def unblockable(function):
"""Decorate a function to exempt it from the ignore/blocks system.
For example, this can be used to ensure that important events such as
``JOIN`` are always recorded::
from sopel import module
@module.event('JOIN')
@module.unblockable
def on_join_callable(bot, trigger):
# do something when a user JOIN a channel
# a blocked nickname or hostname *will* trigger this
pass
.. seealso::
Sopel's :meth:`~sopel.bot.Sopel.dispatch` method.
"""
function.unblockable = True
return function
|
315 |
def sample(
draws=1000,
step=None,
init="auto",
n_init=200000,
start=None,
trace=None,
chain_idx=0,
chains=None,
cores=None,
tune=1000,
progressbar=True,
model=None,
random_seed=None,
discard_tuned_samples=True,
compute_convergence_checks=True,
callback=None,
jitter_max_retries=10,
*,
return_inferencedata=None,
idata_kwargs: dict = None,
mp_ctx=None,
pickle_backend: str = "pickle",
**kwargs,
):
r"""Draw samples from the posterior using the given step methods.
Multiple step methods are supported via compound step methods.
Parameters
----------
draws : int
The number of samples to draw. Defaults to 1000. The number of tuned samples are discarded
by default. See ``discard_tuned_samples``.
init : str
Initialization method to use for auto-assigned NUTS samplers.
* auto: Choose a default initialization method automatically.
Currently, this is ``jitter+adapt_diag``, but this can change in the future.
If you depend on the exact behaviour, choose an initialization method explicitly.
* adapt_diag: Start with a identity mass matrix and then adapt a diagonal based on the
variance of the tuning samples. All chains use the test value (usually the prior mean)
as starting point.
* jitter+adapt_diag: Same as ``adapt_diag``, but add uniform jitter in [-1, 1] to the
starting point in each chain.
* advi+adapt_diag: Run ADVI and then adapt the resulting diagonal mass matrix based on the
sample variance of the tuning samples.
* advi+adapt_diag_grad: Run ADVI and then adapt the resulting diagonal mass matrix based
on the variance of the gradients during tuning. This is **experimental** and might be
removed in a future release.
* advi: Run ADVI to estimate posterior mean and diagonal mass matrix.
* advi_map: Initialize ADVI with MAP and use MAP as starting point.
* map: Use the MAP as starting point. This is discouraged.
* adapt_full: Adapt a dense mass matrix using the sample covariances
step : function or iterable of functions
A step function or collection of functions. If there are variables without step methods,
step methods for those variables will be assigned automatically. By default the NUTS step
method will be used, if appropriate to the model; this is a good default for beginning
users.
n_init : int
Number of iterations of initializer. Only works for 'ADVI' init methods.
start : dict, or array of dict
Starting point in parameter space (or partial point)
Defaults to ``trace.point(-1))`` if there is a trace provided and model.test_point if not
(defaults to empty dict). Initialization methods for NUTS (see ``init`` keyword) can
overwrite the default.
trace : backend, list, or MultiTrace
This should be a backend instance, a list of variables to track, or a MultiTrace object
with past values. If a MultiTrace object is given, it must contain samples for the chain
number ``chain``. If None or a list of variables, the NDArray backend is used.
chain_idx : int
Chain number used to store sample in backend. If ``chains`` is greater than one, chain
numbers will start here.
chains : int
The number of chains to sample. Running independent chains is important for some
convergence statistics and can also reveal multiple modes in the posterior. If ``None``,
then set to either ``cores`` or 2, whichever is larger.
cores : int
The number of chains to run in parallel. If ``None``, set to the number of CPUs in the
system, but at most 4.
tune : int
Number of iterations to tune, defaults to 1000. Samplers adjust the step sizes, scalings or
similar during tuning. Tuning samples will be drawn in addition to the number specified in
the ``draws`` argument, and will be discarded unless ``discard_tuned_samples`` is set to
False.
progressbar : bool, optional default=True
Whether or not to display a progress bar in the command line. The bar shows the percentage
of completion, the sampling speed in samples per second (SPS), and the estimated remaining
time until completion ("expected time of arrival"; ETA).
model : Model (optional if in ``with`` context)
random_seed : int or list of ints
A list is accepted if ``cores`` is greater than one.
discard_tuned_samples : bool
Whether to discard posterior samples of the tune interval.
compute_convergence_checks : bool, default=True
Whether to compute sampler statistics like Gelman-Rubin and ``effective_n``.
callback : function, default=None
A function which gets called for every sample from the trace of a chain. The function is
called with the trace and the current draw and will contain all samples for a single trace.
the ``draw.chain`` argument can be used to determine which of the active chains the sample
is drawn from.
Sampling can be interrupted by throwing a ``KeyboardInterrupt`` in the callback.
jitter_max_retries: int
Maximum number of repeated attempts (per chain) at creating an initial matrix with uniform jitter
that yields a finite probability. This applies to ``jitter+adapt_diag`` and ``jitter+adapt_full``
init methods.
return_inferencedata : bool, default=False
Whether to return the trace as an :class:`arviz:arviz.InferenceData` (True) object or a `MultiTrace` (False)
Defaults to `False`, but we'll switch to `True` in an upcoming release.
idata_kwargs : dict, optional
Keyword arguments for :func:`arviz:arviz.from_pymc3`
mp_ctx : multiprocessing.context.BaseContent
A multiprocessing context for parallel sampling. See multiprocessing
documentation for details.
pickle_backend : str
One of `'pickle'` or `'dill'`. The library used to pickle models
in parallel sampling if the multiprocessing context is not of type
`fork`.
Returns
-------
trace : pymc3.backends.base.MultiTrace or arviz.InferenceData
A ``MultiTrace`` or ArviZ ``InferenceData`` object that contains the samples.
Notes
-----
Optional keyword arguments can be passed to ``sample`` to be delivered to the
``step_method``\ s used during sampling.
If your model uses only one step method, you can address step method kwargs
directly. In particular, the NUTS step method has several options including:
* target_accept : float in [0, 1]. The step size is tuned such that we
approximate this acceptance rate. Higher values like 0.9 or 0.95 often
work better for problematic posteriors
* max_treedepth : The maximum depth of the trajectory tree
* step_scale : float, default 0.25
The initial guess for the step size scaled down by :math:`1/n**(1/4)`
If your model uses multiple step methods, aka a Compound Step, then you have
two ways to address arguments to each step method:
A. If you let ``sample()`` automatically assign the ``step_method``\ s,
and you can correctly anticipate what they will be, then you can wrap
step method kwargs in a dict and pass that to sample() with a kwarg set
to the name of the step method.
e.g. for a CompoundStep comprising NUTS and BinaryGibbsMetropolis,
you could send:
1. ``target_accept`` to NUTS: nuts={'target_accept':0.9}
2. ``transit_p`` to BinaryGibbsMetropolis: binary_gibbs_metropolis={'transit_p':.7}
Note that available names are:
``nuts``, ``hmc``, ``metropolis``, ``binary_metropolis``,
``binary_gibbs_metropolis``, ``categorical_gibbs_metropolis``,
``DEMetropolis``, ``DEMetropolisZ``, ``slice``
B. If you manually declare the ``step_method``\ s, within the ``step``
kwarg, then you can address the ``step_method`` kwargs directly.
e.g. for a CompoundStep comprising NUTS and BinaryGibbsMetropolis,
you could send ::
step=[pm.NUTS([freeRV1, freeRV2], target_accept=0.9),
pm.BinaryGibbsMetropolis([freeRV3], transit_p=.7)]
You can find a full list of arguments in the docstring of the step methods.
Examples
--------
.. code:: ipython
In [1]: import pymc3 as pm
...: n = 100
...: h = 61
...: alpha = 2
...: beta = 2
In [2]: with pm.Model() as model: # context management
...: p = pm.Beta("p", alpha=alpha, beta=beta)
...: y = pm.Binomial("y", n=n, p=p, observed=h)
...: trace = pm.sample()
In [3]: pm.summary(trace, kind="stats")
Out[3]:
mean sd hdi_3% hdi_97%
p 0.609 0.047 0.528 0.699
"""
model = modelcontext(model)
if start is None:
check_start_vals(model.test_point, model)
else:
if isinstance(start, dict):
update_start_vals(start, model.test_point, model)
else:
for chain_start_vals in start:
update_start_vals(chain_start_vals, model.test_point, model)
check_start_vals(start, model)
if cores is None:
cores = min(4, _cpu_count())
if chains is None:
chains = max(2, cores)
if isinstance(start, dict):
start = [start] * chains
if random_seed == -1:
random_seed = None
if chains == 1 and isinstance(random_seed, int):
random_seed = [random_seed]
if random_seed is None or isinstance(random_seed, int):
if random_seed is not None:
np.random.seed(random_seed)
random_seed = [np.random.randint(2 ** 30) for _ in range(chains)]
if not isinstance(random_seed, Iterable):
raise TypeError("Invalid value for `random_seed`. Must be tuple, list or int")
if not discard_tuned_samples and not return_inferencedata:
warnings.warn(
"Tuning samples will be included in the returned `MultiTrace` object, which can lead to"
" complications in your downstream analysis. Please consider to switch to `InferenceData`:\n"
"`pm.sample(..., return_inferencedata=True)`",
UserWarning,
)
if return_inferencedata is None:
v = packaging.version.parse(pm.__version__)
if v.release[0] > 3 or v.release[1] >= 10: # type: ignore
warnings.warn(
"In an upcoming release, pm.sample will return an `arviz.InferenceData` object instead of a `MultiTrace` by default. "
"You can pass return_inferencedata=True or return_inferencedata=False to be safe and silence this warning.",
FutureWarning,
)
# set the default
return_inferencedata = False
if start is not None:
for start_vals in start:
_check_start_shape(model, start_vals)
# small trace warning
if draws == 0:
msg = "Tuning was enabled throughout the whole trace."
_log.warning(msg)
elif draws < 500:
msg = "Only %s samples in chain." % draws
_log.warning(msg)
draws += tune
if model.ndim == 0:
raise ValueError("The model does not contain any free variables.")
if step is None and init is not None and all_continuous(model.vars):
try:
# By default, try to use NUTS
_log.info("Auto-assigning NUTS sampler...")
start_, step = init_nuts(
init=init,
chains=chains,
n_init=n_init,
model=model,
random_seed=random_seed,
progressbar=progressbar,
jitter_max_retries=jitter_max_retries,
**kwargs,
)
if start is None:
start = start_
check_start_vals(start, model)
except (AttributeError, NotImplementedError, tg.NullTypeGradError):
# gradient computation failed
_log.info("Initializing NUTS failed. " "Falling back to elementwise auto-assignment.")
_log.debug("Exception in init nuts", exec_info=True)
step = assign_step_methods(model, step, step_kwargs=kwargs)
else:
step = assign_step_methods(model, step, step_kwargs=kwargs)
if isinstance(step, list):
step = CompoundStep(step)
if start is None:
start = {}
if isinstance(start, dict):
start = [start] * chains
sample_args = {
"draws": draws,
"step": step,
"start": start,
"trace": trace,
"chain": chain_idx,
"chains": chains,
"tune": tune,
"progressbar": progressbar,
"model": model,
"random_seed": random_seed,
"cores": cores,
"callback": callback,
"discard_tuned_samples": discard_tuned_samples,
}
parallel_args = {
"pickle_backend": pickle_backend,
"mp_ctx": mp_ctx,
}
sample_args.update(kwargs)
has_population_samplers = np.any(
[
isinstance(m, arraystep.PopulationArrayStepShared)
for m in (step.methods if isinstance(step, CompoundStep) else [step])
]
)
parallel = cores > 1 and chains > 1 and not has_population_samplers
t_start = time.time()
if parallel:
_log.info(f"Multiprocess sampling ({chains} chains in {cores} jobs)")
_print_step_hierarchy(step)
try:
trace = _mp_sample(**sample_args, **parallel_args)
except pickle.PickleError:
_log.warning("Could not pickle model, sampling singlethreaded.")
_log.debug("Pickling error:", exec_info=True)
parallel = False
except AttributeError as e:
if str(e).startswith("AttributeError: Can't pickle"):
_log.warning("Could not pickle model, sampling singlethreaded.")
_log.debug("Pickling error:", exec_info=True)
parallel = False
else:
raise
if not parallel:
if has_population_samplers:
has_demcmc = np.any(
[
isinstance(m, DEMetropolis)
for m in (step.methods if isinstance(step, CompoundStep) else [step])
]
)
_log.info(f"Population sampling ({chains} chains)")
if has_demcmc and chains < 3:
raise ValueError(
"DEMetropolis requires at least 3 chains. "
"For this {}-dimensional model you should use ≥{} chains".format(
model.ndim, model.ndim + 1
)
)
if has_demcmc and chains <= model.ndim:
warnings.warn(
"DEMetropolis should be used with more chains than dimensions! "
"(The model has {} dimensions.)".format(model.ndim),
UserWarning,
)
_print_step_hierarchy(step)
trace = _sample_population(parallelize=cores > 1, **sample_args)
else:
_log.info(f"Sequential sampling ({chains} chains in 1 job)")
_print_step_hierarchy(step)
trace = _sample_many(**sample_args)
t_sampling = time.time() - t_start
# count the number of tune/draw iterations that happened
# ideally via the "tune" statistic, but not all samplers record it!
if "tune" in trace.stat_names:
stat = trace.get_sampler_stats("tune", chains=0)
# when CompoundStep is used, the stat is 2 dimensional!
if len(stat.shape) == 2:
stat = stat[:, 0]
stat = tuple(stat)
n_tune = stat.count(True)
n_draws = stat.count(False)
else:
# these may be wrong when KeyboardInterrupt happened, but they're better than nothing
n_tune = min(tune, len(trace))
n_draws = max(0, len(trace) - n_tune)
if discard_tuned_samples:
trace = trace[n_tune:]
# save metadata in SamplerReport
trace.report._n_tune = n_tune
trace.report._n_draws = n_draws
trace.report._t_sampling = t_sampling
if "variable_inclusion" in trace.stat_names:
variable_inclusion = np.stack(trace.get_sampler_stats("variable_inclusion")).mean(0)
trace.report.variable_importance = variable_inclusion / variable_inclusion.sum()
n_chains = len(trace.chains)
_log.info(
f'Sampling {n_chains} chain{"s" if n_chains > 1 else ""} for {n_tune:_d} tune and {n_draws:_d} draw iterations '
f"({n_tune*n_chains:_d} + {n_draws*n_chains:_d} draws total) "
f"took {trace.report.t_sampling:.0f} seconds."
)
idata = None
if compute_convergence_checks or return_inferencedata:
ikwargs = dict(model=model, save_warmup=not discard_tuned_samples)
if idata_kwargs:
ikwargs.update(idata_kwargs)
idata = arviz.from_pymc3(trace, **ikwargs)
if compute_convergence_checks:
if draws - tune < 100:
warnings.warn("The number of samples is too small to check convergence reliably.")
else:
trace.report._run_convergence_checks(idata, model)
trace.report._log_summary()
if return_inferencedata:
return idata
else:
return trace
|
def sample(
draws=1000,
step=None,
init="auto",
n_init=200000,
start=None,
trace=None,
chain_idx=0,
chains=None,
cores=None,
tune=1000,
progressbar=True,
model=None,
random_seed=None,
discard_tuned_samples=True,
compute_convergence_checks=True,
callback=None,
jitter_max_retries=10,
*,
return_inferencedata=None,
idata_kwargs: dict = None,
mp_ctx=None,
pickle_backend: str = "pickle",
**kwargs,
):
r"""Draw samples from the posterior using the given step methods.
Multiple step methods are supported via compound step methods.
Parameters
----------
draws : int
The number of samples to draw. Defaults to 1000. The number of tuned samples are discarded
by default. See ``discard_tuned_samples``.
init : str
Initialization method to use for auto-assigned NUTS samplers.
* auto: Choose a default initialization method automatically.
Currently, this is ``jitter+adapt_diag``, but this can change in the future.
If you depend on the exact behaviour, choose an initialization method explicitly.
* adapt_diag: Start with a identity mass matrix and then adapt a diagonal based on the
variance of the tuning samples. All chains use the test value (usually the prior mean)
as starting point.
* jitter+adapt_diag: Same as ``adapt_diag``, but add uniform jitter in [-1, 1] to the
starting point in each chain.
* advi+adapt_diag: Run ADVI and then adapt the resulting diagonal mass matrix based on the
sample variance of the tuning samples.
* advi+adapt_diag_grad: Run ADVI and then adapt the resulting diagonal mass matrix based
on the variance of the gradients during tuning. This is **experimental** and might be
removed in a future release.
* advi: Run ADVI to estimate posterior mean and diagonal mass matrix.
* advi_map: Initialize ADVI with MAP and use MAP as starting point.
* map: Use the MAP as starting point. This is discouraged.
* adapt_full: Adapt a dense mass matrix using the sample covariances
step : function or iterable of functions
A step function or collection of functions. If there are variables without step methods,
step methods for those variables will be assigned automatically. By default the NUTS step
method will be used, if appropriate to the model; this is a good default for beginning
users.
n_init : int
Number of iterations of initializer. Only works for 'ADVI' init methods.
start : dict, or array of dict
Starting point in parameter space (or partial point)
Defaults to ``trace.point(-1))`` if there is a trace provided and model.test_point if not
(defaults to empty dict). Initialization methods for NUTS (see ``init`` keyword) can
overwrite the default.
trace : backend, list, or MultiTrace
This should be a backend instance, a list of variables to track, or a MultiTrace object
with past values. If a MultiTrace object is given, it must contain samples for the chain
number ``chain``. If None or a list of variables, the NDArray backend is used.
chain_idx : int
Chain number used to store sample in backend. If ``chains`` is greater than one, chain
numbers will start here.
chains : int
The number of chains to sample. Running independent chains is important for some
convergence statistics and can also reveal multiple modes in the posterior. If ``None``,
then set to either ``cores`` or 2, whichever is larger.
cores : int
The number of chains to run in parallel. If ``None``, set to the number of CPUs in the
system, but at most 4.
tune : int
Number of iterations to tune, defaults to 1000. Samplers adjust the step sizes, scalings or
similar during tuning. Tuning samples will be drawn in addition to the number specified in
the ``draws`` argument, and will be discarded unless ``discard_tuned_samples`` is set to
False.
progressbar : bool, optional default=True
Whether or not to display a progress bar in the command line. The bar shows the percentage
of completion, the sampling speed in samples per second (SPS), and the estimated remaining
time until completion ("expected time of arrival"; ETA).
model : Model (optional if in ``with`` context)
random_seed : int or list of ints
A list is accepted if ``cores`` is greater than one.
discard_tuned_samples : bool
Whether to discard posterior samples of the tune interval.
compute_convergence_checks : bool, default=True
Whether to compute sampler statistics like Gelman-Rubin and ``effective_n``.
callback : function, default=None
A function which gets called for every sample from the trace of a chain. The function is
called with the trace and the current draw and will contain all samples for a single trace.
the ``draw.chain`` argument can be used to determine which of the active chains the sample
is drawn from.
Sampling can be interrupted by throwing a ``KeyboardInterrupt`` in the callback.
jitter_max_retries : int
Maximum number of repeated attempts (per chain) at creating an initial matrix with uniform jitter
that yields a finite probability. This applies to ``jitter+adapt_diag`` and ``jitter+adapt_full``
init methods.
return_inferencedata : bool, default=False
Whether to return the trace as an :class:`arviz:arviz.InferenceData` (True) object or a `MultiTrace` (False)
Defaults to `False`, but we'll switch to `True` in an upcoming release.
idata_kwargs : dict, optional
Keyword arguments for :func:`arviz:arviz.from_pymc3`
mp_ctx : multiprocessing.context.BaseContent
A multiprocessing context for parallel sampling. See multiprocessing
documentation for details.
pickle_backend : str
One of `'pickle'` or `'dill'`. The library used to pickle models
in parallel sampling if the multiprocessing context is not of type
`fork`.
Returns
-------
trace : pymc3.backends.base.MultiTrace or arviz.InferenceData
A ``MultiTrace`` or ArviZ ``InferenceData`` object that contains the samples.
Notes
-----
Optional keyword arguments can be passed to ``sample`` to be delivered to the
``step_method``\ s used during sampling.
If your model uses only one step method, you can address step method kwargs
directly. In particular, the NUTS step method has several options including:
* target_accept : float in [0, 1]. The step size is tuned such that we
approximate this acceptance rate. Higher values like 0.9 or 0.95 often
work better for problematic posteriors
* max_treedepth : The maximum depth of the trajectory tree
* step_scale : float, default 0.25
The initial guess for the step size scaled down by :math:`1/n**(1/4)`
If your model uses multiple step methods, aka a Compound Step, then you have
two ways to address arguments to each step method:
A. If you let ``sample()`` automatically assign the ``step_method``\ s,
and you can correctly anticipate what they will be, then you can wrap
step method kwargs in a dict and pass that to sample() with a kwarg set
to the name of the step method.
e.g. for a CompoundStep comprising NUTS and BinaryGibbsMetropolis,
you could send:
1. ``target_accept`` to NUTS: nuts={'target_accept':0.9}
2. ``transit_p`` to BinaryGibbsMetropolis: binary_gibbs_metropolis={'transit_p':.7}
Note that available names are:
``nuts``, ``hmc``, ``metropolis``, ``binary_metropolis``,
``binary_gibbs_metropolis``, ``categorical_gibbs_metropolis``,
``DEMetropolis``, ``DEMetropolisZ``, ``slice``
B. If you manually declare the ``step_method``\ s, within the ``step``
kwarg, then you can address the ``step_method`` kwargs directly.
e.g. for a CompoundStep comprising NUTS and BinaryGibbsMetropolis,
you could send ::
step=[pm.NUTS([freeRV1, freeRV2], target_accept=0.9),
pm.BinaryGibbsMetropolis([freeRV3], transit_p=.7)]
You can find a full list of arguments in the docstring of the step methods.
Examples
--------
.. code:: ipython
In [1]: import pymc3 as pm
...: n = 100
...: h = 61
...: alpha = 2
...: beta = 2
In [2]: with pm.Model() as model: # context management
...: p = pm.Beta("p", alpha=alpha, beta=beta)
...: y = pm.Binomial("y", n=n, p=p, observed=h)
...: trace = pm.sample()
In [3]: pm.summary(trace, kind="stats")
Out[3]:
mean sd hdi_3% hdi_97%
p 0.609 0.047 0.528 0.699
"""
model = modelcontext(model)
if start is None:
check_start_vals(model.test_point, model)
else:
if isinstance(start, dict):
update_start_vals(start, model.test_point, model)
else:
for chain_start_vals in start:
update_start_vals(chain_start_vals, model.test_point, model)
check_start_vals(start, model)
if cores is None:
cores = min(4, _cpu_count())
if chains is None:
chains = max(2, cores)
if isinstance(start, dict):
start = [start] * chains
if random_seed == -1:
random_seed = None
if chains == 1 and isinstance(random_seed, int):
random_seed = [random_seed]
if random_seed is None or isinstance(random_seed, int):
if random_seed is not None:
np.random.seed(random_seed)
random_seed = [np.random.randint(2 ** 30) for _ in range(chains)]
if not isinstance(random_seed, Iterable):
raise TypeError("Invalid value for `random_seed`. Must be tuple, list or int")
if not discard_tuned_samples and not return_inferencedata:
warnings.warn(
"Tuning samples will be included in the returned `MultiTrace` object, which can lead to"
" complications in your downstream analysis. Please consider to switch to `InferenceData`:\n"
"`pm.sample(..., return_inferencedata=True)`",
UserWarning,
)
if return_inferencedata is None:
v = packaging.version.parse(pm.__version__)
if v.release[0] > 3 or v.release[1] >= 10: # type: ignore
warnings.warn(
"In an upcoming release, pm.sample will return an `arviz.InferenceData` object instead of a `MultiTrace` by default. "
"You can pass return_inferencedata=True or return_inferencedata=False to be safe and silence this warning.",
FutureWarning,
)
# set the default
return_inferencedata = False
if start is not None:
for start_vals in start:
_check_start_shape(model, start_vals)
# small trace warning
if draws == 0:
msg = "Tuning was enabled throughout the whole trace."
_log.warning(msg)
elif draws < 500:
msg = "Only %s samples in chain." % draws
_log.warning(msg)
draws += tune
if model.ndim == 0:
raise ValueError("The model does not contain any free variables.")
if step is None and init is not None and all_continuous(model.vars):
try:
# By default, try to use NUTS
_log.info("Auto-assigning NUTS sampler...")
start_, step = init_nuts(
init=init,
chains=chains,
n_init=n_init,
model=model,
random_seed=random_seed,
progressbar=progressbar,
jitter_max_retries=jitter_max_retries,
**kwargs,
)
if start is None:
start = start_
check_start_vals(start, model)
except (AttributeError, NotImplementedError, tg.NullTypeGradError):
# gradient computation failed
_log.info("Initializing NUTS failed. " "Falling back to elementwise auto-assignment.")
_log.debug("Exception in init nuts", exec_info=True)
step = assign_step_methods(model, step, step_kwargs=kwargs)
else:
step = assign_step_methods(model, step, step_kwargs=kwargs)
if isinstance(step, list):
step = CompoundStep(step)
if start is None:
start = {}
if isinstance(start, dict):
start = [start] * chains
sample_args = {
"draws": draws,
"step": step,
"start": start,
"trace": trace,
"chain": chain_idx,
"chains": chains,
"tune": tune,
"progressbar": progressbar,
"model": model,
"random_seed": random_seed,
"cores": cores,
"callback": callback,
"discard_tuned_samples": discard_tuned_samples,
}
parallel_args = {
"pickle_backend": pickle_backend,
"mp_ctx": mp_ctx,
}
sample_args.update(kwargs)
has_population_samplers = np.any(
[
isinstance(m, arraystep.PopulationArrayStepShared)
for m in (step.methods if isinstance(step, CompoundStep) else [step])
]
)
parallel = cores > 1 and chains > 1 and not has_population_samplers
t_start = time.time()
if parallel:
_log.info(f"Multiprocess sampling ({chains} chains in {cores} jobs)")
_print_step_hierarchy(step)
try:
trace = _mp_sample(**sample_args, **parallel_args)
except pickle.PickleError:
_log.warning("Could not pickle model, sampling singlethreaded.")
_log.debug("Pickling error:", exec_info=True)
parallel = False
except AttributeError as e:
if str(e).startswith("AttributeError: Can't pickle"):
_log.warning("Could not pickle model, sampling singlethreaded.")
_log.debug("Pickling error:", exec_info=True)
parallel = False
else:
raise
if not parallel:
if has_population_samplers:
has_demcmc = np.any(
[
isinstance(m, DEMetropolis)
for m in (step.methods if isinstance(step, CompoundStep) else [step])
]
)
_log.info(f"Population sampling ({chains} chains)")
if has_demcmc and chains < 3:
raise ValueError(
"DEMetropolis requires at least 3 chains. "
"For this {}-dimensional model you should use ≥{} chains".format(
model.ndim, model.ndim + 1
)
)
if has_demcmc and chains <= model.ndim:
warnings.warn(
"DEMetropolis should be used with more chains than dimensions! "
"(The model has {} dimensions.)".format(model.ndim),
UserWarning,
)
_print_step_hierarchy(step)
trace = _sample_population(parallelize=cores > 1, **sample_args)
else:
_log.info(f"Sequential sampling ({chains} chains in 1 job)")
_print_step_hierarchy(step)
trace = _sample_many(**sample_args)
t_sampling = time.time() - t_start
# count the number of tune/draw iterations that happened
# ideally via the "tune" statistic, but not all samplers record it!
if "tune" in trace.stat_names:
stat = trace.get_sampler_stats("tune", chains=0)
# when CompoundStep is used, the stat is 2 dimensional!
if len(stat.shape) == 2:
stat = stat[:, 0]
stat = tuple(stat)
n_tune = stat.count(True)
n_draws = stat.count(False)
else:
# these may be wrong when KeyboardInterrupt happened, but they're better than nothing
n_tune = min(tune, len(trace))
n_draws = max(0, len(trace) - n_tune)
if discard_tuned_samples:
trace = trace[n_tune:]
# save metadata in SamplerReport
trace.report._n_tune = n_tune
trace.report._n_draws = n_draws
trace.report._t_sampling = t_sampling
if "variable_inclusion" in trace.stat_names:
variable_inclusion = np.stack(trace.get_sampler_stats("variable_inclusion")).mean(0)
trace.report.variable_importance = variable_inclusion / variable_inclusion.sum()
n_chains = len(trace.chains)
_log.info(
f'Sampling {n_chains} chain{"s" if n_chains > 1 else ""} for {n_tune:_d} tune and {n_draws:_d} draw iterations '
f"({n_tune*n_chains:_d} + {n_draws*n_chains:_d} draws total) "
f"took {trace.report.t_sampling:.0f} seconds."
)
idata = None
if compute_convergence_checks or return_inferencedata:
ikwargs = dict(model=model, save_warmup=not discard_tuned_samples)
if idata_kwargs:
ikwargs.update(idata_kwargs)
idata = arviz.from_pymc3(trace, **ikwargs)
if compute_convergence_checks:
if draws - tune < 100:
warnings.warn("The number of samples is too small to check convergence reliably.")
else:
trace.report._run_convergence_checks(idata, model)
trace.report._log_summary()
if return_inferencedata:
return idata
else:
return trace
|
27,661 |
def _spike_test(stream, percent=0.99, multiplier=1e7):
"""
Check for very large spikes in data and raise an error if found.
:param stream: Stream to look for spikes in.
:type stream: :class:`obspy.core.stream.Stream`
:param percent: Percentage as a decimal to calculate range for.
:type percent: float
:param multiplier: Multiplier of range to define a spike.
:type multiplier: float
"""
list_ids = []
for tr in stream:
if (tr.data > 2 * np.max(np.sort(
np.abs(tr.data))[0:int(percent * len(tr.data))]
) * multiplier).sum() > 0:
list_ids.append(tr.id)
if list_ids != []:
ids = ', '.join(list_ids)
msg = ('Spikes above ' + str(multiplier) +
' of the range of ' + str(percent) +
' of the data present, check:\n' + ids + '.\n'
'This would otherwise likely result in an issue during ' +
'FFT prior to cross-correlation.\n' +
'If you think this spike is real please report ' +
'this as a bug.')
print(msg)
for ID in list_ids:
stream.remove(stream.select(id=ID)[0])
print('%s got removed by EQcorrscan because it had spike' % ID)
|
def _spike_test(stream, percent=0.99, multiplier=1e7):
"""
Check for very large spikes in data and raise an error if found.
:param stream: Stream to look for spikes in.
:type stream: :class:`obspy.core.stream.Stream`
:param percent: Percentage as a decimal to calculate range for.
:type percent: float
:param multiplier: Multiplier of range to define a spike.
:type multiplier: float
"""
list_ids = []
for tr in stream:
if (tr.data > 2 * np.max(np.sort(
np.abs(tr.data))[0:int(percent * len(tr.data))]
) * multiplier).sum() > 0:
list_ids.append(tr.id)
if list_ids != []:
ids = ', '.join(list_ids)
msg = ('Spikes above ' + str(multiplier) +
' of the range of ' + str(percent) +
' of the data present, check:\n' + ids + '.\n'
'This would otherwise likely result in an issue during ' +
'FFT prior to cross-correlation.\n' +
'If you think this spike is real please report ' +
'this as a bug.')
print(msg)
for ID in list_ids:
stream.remove(stream.select(id=ID)[0])
Logger.info(f"{_id} was removed by EQcorrscan because it had spikes")
|
784 |
def rumba_deconv_global(data, kernel, mask, n_iter=600, recon_type='smf',
n_coils=1, R=1, use_tv=True, verbose=False):
'''
Fit fODF for a all voxels simultaneously using RUMBA-SD.
Deconvolves the kernel from the diffusion-weighted signal at each voxel by
computing a maximum likelihood estimation of the fODF [1]_. Global fitting
also permits the use of total variation regularization (RUMBA-SD + TV). The
spatial dependence introduced by TV promotes smoother solutions (i.e.
prevents oscillations), while still allowing for sharp discontinuities
[2]_. This promots smoothness and continuity along individual tracts while
preventing smoothing of adjacent tracts.
Generally, global_fit will proceed more quickly than the voxelwise fit
provided that the computer has adequate RAM (>= 16 GB will be more than
sufficient.).
Parameters
----------
data : 4d ndarray (x, y, z, N)
Signal values for entire brain. None of the volume dimensions x, y, z
can be 1 if TV regularization is required.
kernel : 2d ndarray (N, M)
Deconvolution kernel mapping volume fractions of the M compartments to
N-length signal. Last two columns should be for GM and CSF.
mask : 3d ndarray(x, y, z)
Binary mask specifying voxels of interest with 1; fODF will only be
fit at these voxels (0 elsewhere).
n_iter : int, optional
Number of iterations for fODF estimation. Must be a positive int.
Default: 600
recon_type : {'smf', 'sos'}, optional
MRI reconstruction method: spatial matched filter (SMF) or
sum-of-squares (SoS). SMF reconstruction generates Rician noise while
SoS reconstruction generates Noncentral Chi noise. Default: 'smf'
n_coils : int, optional
Number of coils in MRI scanner -- only relevant in SoS reconstruction.
Must be a positive int. Default: 1
use_tv : bool, optional
If true, applies total variation regularization. This requires a brain
volume with no singleton dimensions. Default: True
verbose : bool, optional
If true, logs updates on estimated signal-to-noise ratio after each
iteration. Default: False
Returns
-------
fodf : 4d ndarray (x, y, z, M-1)
fODF computed for each voxel.
f_gm : 3d ndarray (x, y, z)
GM volume fraction at each voxel.
f_csf : 3d ndarray (x, y, z)
CSF volume fraction at each voxel.
f_wm : 3d ndarray (x, y, z)
White matter volume fraction at each voxel.
f_iso : 3d ndarray (x, y, z)
Isotropic volume fraction at each voxel (GM + CSF)
combined : 4d ndarray (x, y, z, M-1)
fODF combined with isotropic compartment for each voxel.
Notes
-----
TV modifies our cost function as follows:
$ J(\bold{f}) = -\log{P(\bold{S}|\bold{H}, \bold{f}, \sigma^2, n)}) +
\alpha_{TV}TV(\bold{f}) $
where the first term is the negative log likelihood described in the notes
of `rumba_deconv`, and the second term is the TV energy, or the sum of
gradient absolute values for the fODF across the entire brain. This results
in a new multiplicative factor in the iterative scheme, now becoming:
$ \bold{f}^{k+1} = \bold{f}^k \circ \frac{\bold{H}^T\left[\bold{S}\circ
\frac{I_n(\bold{S}\circ\bold{Hf}^k/\sigma^2)} {I_{n-1}(\bold{S}\circ
\bold{Hf}^k/\sigma^2)} \right ]} {\bold{H}^T\bold{Hf}^k}\circ\bold{R}^k $
where $\bold{R}^k$ is computed voxelwise by:
$ (\bold{R}^k)_j = \frac{1}{1 - \alpha_{TV}div\left(\frac{\triangledown[
\bold{f}^k_{3D}]_j}{\lvert\triangledown[\bold{f}^k_{3D}]_j \rvert}
\right)\biggr\rvert_{x, y, z}} $
Here, $\triangledown$ is the symbol for the 3D gradient at any voxel.
The regularization strength, $\alpha_{TV}$ is updated after each iteration
by the discrepancy principle -- specifically, it is selected to match the
estimated variance after each iteration [3]_.
References
----------
.. [1] Canales-Rodríguez, E. J., Daducci, A., Sotiropoulos, S. N., Caruyer,
E., Aja-Fernández, S., Radua, J., Mendizabal, J. M. Y.,
Iturria-Medina, Y., Melie-García, L., Alemán-Gómez, Y., Thiran,
J.-P., Sarró, S., Pomarol-Clotet, E., & Salvador, R. (2015).
Spherical Deconvolution of Multichannel Diffusion MRI Data with
Non-Gaussian Noise Models and Spatial Regularization. PLOS ONE,
10(10), e0138910. https://doi.org/10.1371/journal.pone.0138910
.. [2] Rudin, L. I., Osher, S., & Fatemi, E. (1992). Nonlinear total
variation based noise removal algorithms. Physica D: Nonlinear
Phenomena, 60(1), 259–268.
https://doi.org/10.1016/0167-2789(92)90242-F
.. [3] Chambolle A. An algorithm for total variation minimization and
applications. Journal of Mathematical Imaging and Vision. 2004;
20:89–97.
'''
# Crop data to reduce memory consumption
dim_orig = data.shape
ixmin, ixmax = bounding_box(mask)
data = crop(data, ixmin, ixmax)
mask = crop(mask, ixmin, ixmax)
if np.any(np.array(data.shape[:3]) == 1) and use_tv:
raise ValueError("Cannot use TV regularization if any spatial" +
"dimensions are 1; " +
f"provided dimensions were {data.shape[:3]}")
epsilon = 1e-7
n_grad = kernel.shape[0] # gradient directions
n_comp = kernel.shape[1] # number of compartments
dim = data.shape
n_v_tot = np.prod(dim[:3]) # total number of voxels
# Initial guess is iso-probable
fodf0 = np.ones((n_comp, 1), dtype=np.float32)
fodf0 = fodf0 / np.sum(fodf0, axis=0)
if recon_type == "smf":
n_order = 1 # Rician noise (same as Noncentral Chi with order 1)
elif recon_type == "sos":
n_order = n_coils # Noncentral Chi noise (order = # of coils)
else:
raise ValueError("Invalid recon_type. Should be 'smf' or 'sos', " +
f"received f{recon_type}")
mask_vec = np.ravel(mask)
# Indices of target voxels
index_mask = np.atleast_1d(np.squeeze(np.argwhere(mask_vec)))
n_v_true = len(index_mask) # number of target voxels
data_2d = np.zeros((n_v_true, n_grad), dtype=np.float32)
for i in range(n_grad):
data_2d[:, i] = np.ravel(data[:, :, :, i])[
index_mask] # only keep voxels of interest
data_2d = data_2d.T
fodf = np.tile(fodf0, (1, n_v_true))
reblurred = np.matmul(kernel, fodf)
# For use later
kernel_t = kernel.T
f_zero = 0
# Initialize algorithm parameters
sigma0 = 1/15
sigma2 = sigma0**2
tv_lambda = sigma2 # initial guess for TV regularization strength
# Expand into matrix form for iterations
sigma2 = sigma2 * np.ones(data_2d.shape, dtype=np.float32)
tv_lambda_aux = np.zeros((n_v_tot), dtype=np.float32)
reblurred_s = data_2d * reblurred / sigma2
for i in range(n_iter):
fodf_i = fodf
ratio = mbessel_ratio(n_order, reblurred_s).astype(np.float32)
rl_factor = np.matmul(kernel_t, data_2d*ratio) / \
(np.matmul(kernel_t, reblurred) + _EPS)
if use_tv: # apply TV regularization
tv_factor = np.ones(fodf_i.shape, dtype=np.float32)
fodf_4d = _reshape_2d_4d(fodf_i.T, mask)
# Compute gradient, divergence
gr = _grad(fodf_4d)
d_inv = 1 / np.sqrt(epsilon**2 + np.sum(gr**2, axis=3))
gr_norm = (gr * d_inv[:, :, :, None, :])
div_f = _divergence(gr_norm)
g0 = np.abs(1 - tv_lambda * div_f)
tv_factor_4d = 1 / (g0 + _EPS)
for j in range(n_comp):
tv_factor_1d = np.ravel(tv_factor_4d[:, :, :, j])[index_mask]
tv_factor[j, :] = tv_factor_1d
# Apply TV regularization to iteration factor
rl_factor = rl_factor * tv_factor
fodf = fodf_i * rl_factor # result of iteration
fodf = np.maximum(f_zero, fodf) # positivity constraint
# Update other variables
reblurred = np.matmul(kernel, fodf)
reblurred_s = data_2d * reblurred / sigma2
# Iterate variance
sigma2_i = (1 / (n_grad * n_order)) * \
np.sum((data_2d**2 + reblurred**2) / 2 - (
sigma2 * reblurred_s) * ratio, axis=0)
sigma2_i = np.minimum((1 / 8)**2, np.maximum(sigma2_i, (1 / 80)**2))
if verbose:
logger.info("Iteration %d of %d", i+1, n_iter)
snr_mean = np.mean(1 / np.sqrt(sigma2_i))
snr_std = np.std(1 / np.sqrt(sigma2_i))
logger.info(
"Mean SNR (S0/sigma) estimated to be %.3f +/- %.3f",
snr_mean, snr_std)
# Expand into matrix
sigma2 = np.tile(sigma2_i[None, :], (data_2d.shape[0], 1))
# Update TV regularization strength using the discrepancy principle
if use_tv:
if R == 1:
tv_lambda = np.mean(sigma2_i)
if tv_lambda < (1/30)**2:
tv_lambda = (1/30)**2
else: # different factor for each voxel
tv_lambda_aux[index_mask] = sigma2_i
tv_lambda = np.reshape(tv_lambda_aux, (*dim[:3], 1))
fodf = fodf.astype(np.float64)
fodf = fodf / (np.sum(fodf, axis=0)[None, ...] + _EPS) # normalize fODF
# Extract compartments
fodf_4d = np.zeros((*dim_orig[:3], n_comp))
_reshape_2d_4d(fodf.T, mask, out=fodf_4d[ixmin[0]:ixmax[0],
ixmin[1]:ixmax[1],
ixmin[2]:ixmax[2]])
fodf = fodf_4d[:, :, :, :-2] # WM compartment
f_gm = fodf_4d[:, :, :, -2] # GM compartment
f_csf = fodf_4d[:, :, :, -1] # CSF compartment
f_wm = np.sum(fodf, axis=3) # white matter volume fraction
combined = fodf + (f_gm[..., None] + f_csf[..., None]) \
/ fodf.shape[3]
f_iso = f_gm + f_csf
return fodf, f_gm, f_csf, f_wm, f_iso, combined
|
def rumba_deconv_global(data, kernel, mask, n_iter=600, recon_type='smf',
n_coils=1, R=1, use_tv=True, verbose=False):
'''
Fit fODF for a all voxels simultaneously using RUMBA-SD.
Deconvolves the kernel from the diffusion-weighted signal at each voxel by
computing a maximum likelihood estimation of the fODF [1]_. Global fitting
also permits the use of total variation regularization (RUMBA-SD + TV). The
spatial dependence introduced by TV promotes smoother solutions (i.e.
prevents oscillations), while still allowing for sharp discontinuities
[2]_. This promotes smoothness and continuity along individual tracts while
preventing smoothing of adjacent tracts.
Generally, global_fit will proceed more quickly than the voxelwise fit
provided that the computer has adequate RAM (>= 16 GB will be more than
sufficient.).
Parameters
----------
data : 4d ndarray (x, y, z, N)
Signal values for entire brain. None of the volume dimensions x, y, z
can be 1 if TV regularization is required.
kernel : 2d ndarray (N, M)
Deconvolution kernel mapping volume fractions of the M compartments to
N-length signal. Last two columns should be for GM and CSF.
mask : 3d ndarray(x, y, z)
Binary mask specifying voxels of interest with 1; fODF will only be
fit at these voxels (0 elsewhere).
n_iter : int, optional
Number of iterations for fODF estimation. Must be a positive int.
Default: 600
recon_type : {'smf', 'sos'}, optional
MRI reconstruction method: spatial matched filter (SMF) or
sum-of-squares (SoS). SMF reconstruction generates Rician noise while
SoS reconstruction generates Noncentral Chi noise. Default: 'smf'
n_coils : int, optional
Number of coils in MRI scanner -- only relevant in SoS reconstruction.
Must be a positive int. Default: 1
use_tv : bool, optional
If true, applies total variation regularization. This requires a brain
volume with no singleton dimensions. Default: True
verbose : bool, optional
If true, logs updates on estimated signal-to-noise ratio after each
iteration. Default: False
Returns
-------
fodf : 4d ndarray (x, y, z, M-1)
fODF computed for each voxel.
f_gm : 3d ndarray (x, y, z)
GM volume fraction at each voxel.
f_csf : 3d ndarray (x, y, z)
CSF volume fraction at each voxel.
f_wm : 3d ndarray (x, y, z)
White matter volume fraction at each voxel.
f_iso : 3d ndarray (x, y, z)
Isotropic volume fraction at each voxel (GM + CSF)
combined : 4d ndarray (x, y, z, M-1)
fODF combined with isotropic compartment for each voxel.
Notes
-----
TV modifies our cost function as follows:
$ J(\bold{f}) = -\log{P(\bold{S}|\bold{H}, \bold{f}, \sigma^2, n)}) +
\alpha_{TV}TV(\bold{f}) $
where the first term is the negative log likelihood described in the notes
of `rumba_deconv`, and the second term is the TV energy, or the sum of
gradient absolute values for the fODF across the entire brain. This results
in a new multiplicative factor in the iterative scheme, now becoming:
$ \bold{f}^{k+1} = \bold{f}^k \circ \frac{\bold{H}^T\left[\bold{S}\circ
\frac{I_n(\bold{S}\circ\bold{Hf}^k/\sigma^2)} {I_{n-1}(\bold{S}\circ
\bold{Hf}^k/\sigma^2)} \right ]} {\bold{H}^T\bold{Hf}^k}\circ\bold{R}^k $
where $\bold{R}^k$ is computed voxelwise by:
$ (\bold{R}^k)_j = \frac{1}{1 - \alpha_{TV}div\left(\frac{\triangledown[
\bold{f}^k_{3D}]_j}{\lvert\triangledown[\bold{f}^k_{3D}]_j \rvert}
\right)\biggr\rvert_{x, y, z}} $
Here, $\triangledown$ is the symbol for the 3D gradient at any voxel.
The regularization strength, $\alpha_{TV}$ is updated after each iteration
by the discrepancy principle -- specifically, it is selected to match the
estimated variance after each iteration [3]_.
References
----------
.. [1] Canales-Rodríguez, E. J., Daducci, A., Sotiropoulos, S. N., Caruyer,
E., Aja-Fernández, S., Radua, J., Mendizabal, J. M. Y.,
Iturria-Medina, Y., Melie-García, L., Alemán-Gómez, Y., Thiran,
J.-P., Sarró, S., Pomarol-Clotet, E., & Salvador, R. (2015).
Spherical Deconvolution of Multichannel Diffusion MRI Data with
Non-Gaussian Noise Models and Spatial Regularization. PLOS ONE,
10(10), e0138910. https://doi.org/10.1371/journal.pone.0138910
.. [2] Rudin, L. I., Osher, S., & Fatemi, E. (1992). Nonlinear total
variation based noise removal algorithms. Physica D: Nonlinear
Phenomena, 60(1), 259–268.
https://doi.org/10.1016/0167-2789(92)90242-F
.. [3] Chambolle A. An algorithm for total variation minimization and
applications. Journal of Mathematical Imaging and Vision. 2004;
20:89–97.
'''
# Crop data to reduce memory consumption
dim_orig = data.shape
ixmin, ixmax = bounding_box(mask)
data = crop(data, ixmin, ixmax)
mask = crop(mask, ixmin, ixmax)
if np.any(np.array(data.shape[:3]) == 1) and use_tv:
raise ValueError("Cannot use TV regularization if any spatial" +
"dimensions are 1; " +
f"provided dimensions were {data.shape[:3]}")
epsilon = 1e-7
n_grad = kernel.shape[0] # gradient directions
n_comp = kernel.shape[1] # number of compartments
dim = data.shape
n_v_tot = np.prod(dim[:3]) # total number of voxels
# Initial guess is iso-probable
fodf0 = np.ones((n_comp, 1), dtype=np.float32)
fodf0 = fodf0 / np.sum(fodf0, axis=0)
if recon_type == "smf":
n_order = 1 # Rician noise (same as Noncentral Chi with order 1)
elif recon_type == "sos":
n_order = n_coils # Noncentral Chi noise (order = # of coils)
else:
raise ValueError("Invalid recon_type. Should be 'smf' or 'sos', " +
f"received f{recon_type}")
mask_vec = np.ravel(mask)
# Indices of target voxels
index_mask = np.atleast_1d(np.squeeze(np.argwhere(mask_vec)))
n_v_true = len(index_mask) # number of target voxels
data_2d = np.zeros((n_v_true, n_grad), dtype=np.float32)
for i in range(n_grad):
data_2d[:, i] = np.ravel(data[:, :, :, i])[
index_mask] # only keep voxels of interest
data_2d = data_2d.T
fodf = np.tile(fodf0, (1, n_v_true))
reblurred = np.matmul(kernel, fodf)
# For use later
kernel_t = kernel.T
f_zero = 0
# Initialize algorithm parameters
sigma0 = 1/15
sigma2 = sigma0**2
tv_lambda = sigma2 # initial guess for TV regularization strength
# Expand into matrix form for iterations
sigma2 = sigma2 * np.ones(data_2d.shape, dtype=np.float32)
tv_lambda_aux = np.zeros((n_v_tot), dtype=np.float32)
reblurred_s = data_2d * reblurred / sigma2
for i in range(n_iter):
fodf_i = fodf
ratio = mbessel_ratio(n_order, reblurred_s).astype(np.float32)
rl_factor = np.matmul(kernel_t, data_2d*ratio) / \
(np.matmul(kernel_t, reblurred) + _EPS)
if use_tv: # apply TV regularization
tv_factor = np.ones(fodf_i.shape, dtype=np.float32)
fodf_4d = _reshape_2d_4d(fodf_i.T, mask)
# Compute gradient, divergence
gr = _grad(fodf_4d)
d_inv = 1 / np.sqrt(epsilon**2 + np.sum(gr**2, axis=3))
gr_norm = (gr * d_inv[:, :, :, None, :])
div_f = _divergence(gr_norm)
g0 = np.abs(1 - tv_lambda * div_f)
tv_factor_4d = 1 / (g0 + _EPS)
for j in range(n_comp):
tv_factor_1d = np.ravel(tv_factor_4d[:, :, :, j])[index_mask]
tv_factor[j, :] = tv_factor_1d
# Apply TV regularization to iteration factor
rl_factor = rl_factor * tv_factor
fodf = fodf_i * rl_factor # result of iteration
fodf = np.maximum(f_zero, fodf) # positivity constraint
# Update other variables
reblurred = np.matmul(kernel, fodf)
reblurred_s = data_2d * reblurred / sigma2
# Iterate variance
sigma2_i = (1 / (n_grad * n_order)) * \
np.sum((data_2d**2 + reblurred**2) / 2 - (
sigma2 * reblurred_s) * ratio, axis=0)
sigma2_i = np.minimum((1 / 8)**2, np.maximum(sigma2_i, (1 / 80)**2))
if verbose:
logger.info("Iteration %d of %d", i+1, n_iter)
snr_mean = np.mean(1 / np.sqrt(sigma2_i))
snr_std = np.std(1 / np.sqrt(sigma2_i))
logger.info(
"Mean SNR (S0/sigma) estimated to be %.3f +/- %.3f",
snr_mean, snr_std)
# Expand into matrix
sigma2 = np.tile(sigma2_i[None, :], (data_2d.shape[0], 1))
# Update TV regularization strength using the discrepancy principle
if use_tv:
if R == 1:
tv_lambda = np.mean(sigma2_i)
if tv_lambda < (1/30)**2:
tv_lambda = (1/30)**2
else: # different factor for each voxel
tv_lambda_aux[index_mask] = sigma2_i
tv_lambda = np.reshape(tv_lambda_aux, (*dim[:3], 1))
fodf = fodf.astype(np.float64)
fodf = fodf / (np.sum(fodf, axis=0)[None, ...] + _EPS) # normalize fODF
# Extract compartments
fodf_4d = np.zeros((*dim_orig[:3], n_comp))
_reshape_2d_4d(fodf.T, mask, out=fodf_4d[ixmin[0]:ixmax[0],
ixmin[1]:ixmax[1],
ixmin[2]:ixmax[2]])
fodf = fodf_4d[:, :, :, :-2] # WM compartment
f_gm = fodf_4d[:, :, :, -2] # GM compartment
f_csf = fodf_4d[:, :, :, -1] # CSF compartment
f_wm = np.sum(fodf, axis=3) # white matter volume fraction
combined = fodf + (f_gm[..., None] + f_csf[..., None]) \
/ fodf.shape[3]
f_iso = f_gm + f_csf
return fodf, f_gm, f_csf, f_wm, f_iso, combined
|
58,039 |
def rasterize_html_command():
entry_id = demisto.args().get('EntryID')
w = demisto.args().get('width', DEFAULT_W).rstrip('px')
h = demisto.args().get('height', DEFAULT_H).rstrip('px')
r_type = demisto.args().get('type', 'png')
file_name = demisto.args().get('file_name', 'email')
file_name = f'{file_name}.{"pdf" if r_type.lower() == "pdf" else "png"}' # type: ignore
file_path = demisto.getFilePath(entry_id).get('path')
with open(file_path, 'rb') as f:
output = rasterize(path=f'file://{os.path.realpath(f.name)}', width=w, height=h, r_type=r_type)
res = fileResult(filename=file_name, data=output)
if r_type == 'png':
res['Type'] = entryTypes['image']
demisto.results(res)
|
def rasterize_html_command():
args = demisto.args()
entry_id = args.get('EntryID')
w = args.get('width', DEFAULT_W).rstrip('px')
h = args.get('height', DEFAULT_H).rstrip('px')
r_type = args.get('type', 'png')
file_name = args.get('file_name', 'email')
file_name = f'{file_name}.{"pdf" if r_type.lower() == "pdf" else "png"}' # type: ignore
file_path = demisto.getFilePath(entry_id).get('path')
with open(file_path, 'rb') as f:
output = rasterize(path=f'file://{os.path.realpath(f.name)}', width=w, height=h, r_type=r_type)
res = fileResult(filename=file_name, data=output)
if r_type == 'png':
res['Type'] = entryTypes['image']
demisto.results(res)
|
45,787 |
def rgb_to_grayscale(image: torch.Tensor,
rgb_weights: torch.Tensor = torch.tensor([0.299, 0.587, 0.114])) -> torch.Tensor:
r"""Convert a RGB image to grayscale version of image.
The image data is assumed to be in the range of (0, 1).
Args:
image (torch.Tensor): RGB image to be converted to grayscale with shape :math:`(*,3,H,W)`.
rgb_weights (torch.Tensor): Weights that will be applied on each channel (RGB).
The sum of the weights must add up to one.
Returns:
torch.Tensor: grayscale version of the image with shape :math:`(*,1,H,W)`.
Example:
>>> input = torch.rand(2, 3, 4, 5)
>>> gray = rgb_to_grayscale(input) # 2x1x4x5
"""
if not isinstance(image, torch.Tensor):
raise TypeError("Input type is not a torch.Tensor. Got {}".format(
type(image)))
if len(image.shape) < 3 or image.shape[-3] != 3:
raise ValueError("Input size must have a shape of (*, 3, H, W). Got {}"
.format(image.shape))
if not isinstance(rgb_weights, torch.Tensor):
raise TypeError("rgb_weights is not a torch.Tensor. Got {}".format(
type(rgb_weights)))
if len(rgb_weights.shape) != 1 or rgb_weights.shape[0] != 3:
raise ValueError("rgb_weights must have a shape of (3). Got {}"
.format(rgb_weights.shape))
if not torch.isclose(torch.sum(rgb_weights), torch.tensor(1.0)):
raise ValueError("The sum of rgb_weights must be 1. Got {}"
.format(torch.sum(rgb_weights)))
r: torch.Tensor = image[..., 0:1, :, :]
g: torch.Tensor = image[..., 1:2, :, :]
b: torch.Tensor = image[..., 2:3, :, :]
gray: torch.Tensor = rgb_weights[0] * r + rgb_weights[1] * g + rgb_weights[2] * b
return gray
|
def rgb_to_grayscale(image: torch.Tensor,
rgb_weights: torch.Tensor = torch.tensor([0.299, 0.587, 0.114])) -> torch.Tensor:
r"""Convert a RGB image to grayscale version of image.
The image data is assumed to be in the range of (0, 1).
Args:
image (torch.Tensor): RGB image to be converted to grayscale with shape :math:`(*,3,H,W)`.
rgb_weights (torch.Tensor): Weights that will be applied on each channel (RGB).
The sum of the weights must add up to one.
Returns:
torch.Tensor: grayscale version of the image with shape :math:`(*,1,H,W)`.
Example:
>>> input = torch.rand(2, 3, 4, 5)
>>> gray = rgb_to_grayscale(input) # 2x1x4x5
"""
if not isinstance(image, torch.Tensor):
raise TypeError("Input type is not a torch.Tensor. Got {}".format(
type(image)))
if len(image.shape) < 3 or image.shape[-3] != 3:
raise ValueError("Input size must have a shape of (*, 3, H, W). Got {}"
.format(image.shape))
if not isinstance(rgb_weights, torch.Tensor):
raise TypeError("rgb_weights is not a torch.Tensor. Got {}".format(
type(rgb_weights)))
if len(rgb_weights.shape) != 1 or rgb_weights.shape[0] != 3:
raise ValueError("rgb_weights must have a shape of (3). Got {}"
.format(rgb_weights.shape))
if not torch.isclose(torch.sum(rgb_weights), torch.tensor(1.0)):
raise ValueError("The sum of rgb_weights must be 1. Got {}"
.format(torch.sum(rgb_weights)))
r: torch.Tensor = image[..., 0:1, :, :]
g: torch.Tensor = image[..., 1:2, :, :]
b: torch.Tensor = image[..., 2:3, :, :]
gray: torch.Tensor = rgb_weights[..., 0] * r + rgb_weights[..., 1] * g + rgb_weights[..., 2] * b
return gray
|
32,193 |
def scan_value_command(client: Client, args: Dict[str, Any], api_key) -> List[CommandResults]:
values = argToList(args.get('value'))
if len(values) == 0:
raise ValueError('Value(s) not specified')
if args.get('scan_type') == 'passiv':
scan_type_value = '0'
else:
scan_type_value = '1'
command_results: List[CommandResults] = []
for value in values:
try:
value_data = client.post_value_scan(value, scan_type_value, api_key)
value_data.update({'value': value})
command_results.append(CommandResults(
readable_output=tableToMarkdown('Value Details:', value_data),
outputs_prefix='Pulsedive.Scan',
outputs_key_field='value',
outputs=value_data
))
except DemistoException:
return_error(f'Failed to execute {demisto.command()} command. Error: Problem submitting the data for scanning')
return command_results
|
def scan_value_command(client: Client, args: Dict[str, Any], api_key) -> List[CommandResults]:
values = argToList(args.get('value'))
if len(values) == 0:
raise ValueError('Value(s) not specified')
if args.get('scan_type') == 'passiv':
scan_type_value = '0'
else:
scan_type_value = '1'
command_results: List[CommandResults] = []
for value in values:
try:
value_data = client.post_value_scan(value, scan_type_value, api_key)
value_data.update({'value': value})
command_results.append(CommandResults(
readable_output=tableToMarkdown('Value Details:', value_data),
outputs_prefix='Pulsedive.Scan',
outputs_key_field='value',
outputs=value_data
))
except DemistoException:
raise DemistoException(f'Failed to execute {demisto.command()} command. Error: Problem submitting the data for scanning')
return command_results
|
55,481 |
def test_dtype_empty():
modin_s = pd.Series()
pandas_s = pandas.Series()
assert modin_s.dtype == pandas_s.dtype
|
def test_dtype_empty():
modin_s, pandas_s = pd.Series(), pandas.Series()
assert modin_s.dtype == pandas_s.dtype
|
12,126 |
def pk_baer(reltrc, samp_int, tdownmax, tupevent, thr1, thr2, preset_len,
p_dur, return_cf=False):
"""
Wrapper for P-picker routine by M. Baer, Schweizer Erdbebendienst.
:param reltrc: time series as numpy.ndarray float32 data, possibly filtered
:param samp_int: number of samples per second
:param tdownmax: if dtime exceeds tdownmax, the trigger is examined for
validity
:param tupevent: min nr of samples for itrm to be accepted as a pick
:param thr1: threshold to trigger for pick (c.f. paper)
:param thr2: threshold for updating sigma (c.f. paper)
:param preset_len: no of points taken for the estimation of variance of
SF(t) on preset()
:param p_dur: p_dur defines the time interval for which the maximum
amplitude is evaluated Originally set to 6 secs
:type return_cf: bool
:param return_cf: If ``True``, also return the charachteristic function
calculated by the C-routine.
:return: (pptime, pfm [,cf]) pptime sample number of parrival;
pfm direction of first motion (U or D), optionally also the
numpy.ndarray float32 containing the values of the characteristic
function.
.. note:: currently the first sample is not taken into account
.. seealso:: [Baer1987]_
"""
pptime = C.c_int()
# c_chcar_p strings are immutable, use string_buffer for pointers
pfm = C.create_string_buffer(b" ", 5)
# be nice and adapt type if necessary
reltrc = np.ascontiguousarray(reltrc, np.float32)
# Initiliaze CF array (MB)
c_float_p = C.POINTER(C.c_float)
cf_arr = np.ascontiguousarray(np.zeros(len(reltrc) - 1), np.float32)
cf_p = cf_arr.ctypes.data_as(c_float_p)
# index in pk_mbaer.c starts with 1, 0 index is lost, length must be
# one shorter
args = (len(reltrc) - 1, C.byref(pptime), pfm, samp_int,
tdownmax, tupevent, thr1, thr2, preset_len, p_dur, cf_p)
errcode = clibsignal.ppick(reltrc, *args)
if errcode != 0:
raise MemoryError("Error in function ppick of mk_mbaer.c")
# Switch cf_arr param (MB)
# add the sample to the time which is not taken into account
# pfm has to be decoded from byte to string
if return_cf:
return pptime.value + 1, pfm.value.decode('utf-8'), cf_arr
else:
return pptime.value + 1, pfm.value.decode('utf-8')
|
def pk_baer(reltrc, samp_int, tdownmax, tupevent, thr1, thr2, preset_len,
p_dur, return_cf=False):
"""
Wrapper for P-picker routine by M. Baer, Schweizer Erdbebendienst.
:param reltrc: time series as numpy.ndarray float32 data, possibly filtered
:param samp_int: number of samples per second
:param tdownmax: if dtime exceeds tdownmax, the trigger is examined for
validity
:param tupevent: min nr of samples for itrm to be accepted as a pick
:param thr1: threshold to trigger for pick (c.f. paper)
:param thr2: threshold for updating sigma (c.f. paper)
:param preset_len: no of points taken for the estimation of variance of
SF(t) on preset()
:param p_dur: p_dur defines the time interval for which the maximum
amplitude is evaluated Originally set to 6 secs
:type return_cf: bool
:param return_cf: If ``True``, also return the charachteristic function
calculated by the C-routine.
:return: (pptime, pfm [,cf]) pptime sample number of parrival;
pfm direction of first motion (U or D), optionally also the
numpy.ndarray float32 containing the values of the characteristic
function.
.. note:: currently the first sample is not taken into account
.. seealso:: [Baer1987]_
"""
pptime = C.c_int()
# c_chcar_p strings are immutable, use string_buffer for pointers
pfm = C.create_string_buffer(b" ", 5)
# be nice and adapt type if necessary
reltrc = np.ascontiguousarray(reltrc, np.float32)
# Initiliaze CF array (MB)
c_float_p = C.POINTER(C.c_float)
cf_arr = np.zeros(len(reltrc) - 1, dtype=np.float32, order="C")
cf_p = cf_arr.ctypes.data_as(c_float_p)
# index in pk_mbaer.c starts with 1, 0 index is lost, length must be
# one shorter
args = (len(reltrc) - 1, C.byref(pptime), pfm, samp_int,
tdownmax, tupevent, thr1, thr2, preset_len, p_dur, cf_p)
errcode = clibsignal.ppick(reltrc, *args)
if errcode != 0:
raise MemoryError("Error in function ppick of mk_mbaer.c")
# Switch cf_arr param (MB)
# add the sample to the time which is not taken into account
# pfm has to be decoded from byte to string
if return_cf:
return pptime.value + 1, pfm.value.decode('utf-8'), cf_arr
else:
return pptime.value + 1, pfm.value.decode('utf-8')
|
31,448 |
def main():
install_logging('Prepare_Content_Packs_For_Testing.log')
option = option_handler()
packs_artifacts_path = option.artifacts_path
extract_destination_path = option.extract_path
storage_bucket_name = option.bucket_name
service_account = option.service_account
target_packs = option.pack_names if option.pack_names else ""
build_number = option.ci_build_number if option.ci_build_number else str(uuid.uuid4())
override_all_packs = option.override_all_packs
signature_key = option.key_string
id_set_path = option.id_set_path
packs_dependencies_mapping = load_json(option.pack_dependencies) if option.pack_dependencies else {}
storage_base_path = option.storage_base_path
remove_test_playbooks = option.remove_test_playbooks
is_bucket_upload_flow = option.bucket_upload
private_bucket_name = option.private_bucket_name
circle_branch = option.circle_branch
force_upload = option.force_upload
# google cloud storage client initialized
storage_client = init_storage_client(service_account)
storage_bucket = storage_client.bucket(storage_bucket_name)
# google cloud bigquery client initialized
bq_client = init_bigquery_client(service_account)
if storage_base_path:
GCPConfig.STORAGE_BASE_PATH = storage_base_path
# Relevant when triggering test upload flow
if storage_bucket_name:
GCPConfig.PRODUCTION_BUCKET = storage_bucket_name
# download and extract index from public bucket
index_folder_path, index_blob, index_generation = download_and_extract_index(storage_bucket,
extract_destination_path)
landing_page_sections = load_json(LANDING_PAGE_SECTIONS_PATH)
trending_packs = get_trending_packs(bq_client, index_folder_path)
# content repo client initialized
content_repo = get_content_git_client(CONTENT_ROOT_PATH)
current_commit_hash, previous_commit_hash = get_recent_commits_data(content_repo, index_folder_path,
is_bucket_upload_flow, circle_branch)
# detect packs to upload
pack_names = get_packs_names(target_packs, previous_commit_hash)
extract_packs_artifacts(packs_artifacts_path, extract_destination_path)
packs_list = [Pack(pack_name, os.path.join(extract_destination_path, pack_name)) for pack_name in pack_names
if os.path.exists(os.path.join(extract_destination_path, pack_name))]
diff_files_list = content_repo.commit(current_commit_hash).diff(content_repo.commit(previous_commit_hash))
# taking care of private packs
is_private_content_updated, private_packs, updated_private_packs_ids = handle_private_content(
index_folder_path, private_bucket_name, extract_destination_path, storage_client, pack_names
)
if not option.override_all_packs:
check_if_index_is_updated(index_folder_path, content_repo, current_commit_hash, previous_commit_hash,
storage_bucket, is_private_content_updated)
packs_statistic_df = get_packs_statistics_dataframe(bq_client)
# clean index and gcs from non existing or invalid packs
clean_non_existing_packs(index_folder_path, private_packs, storage_bucket)
# starting iteration over packs
for pack in packs_list:
task_status, user_metadata = pack.load_user_metadata()
if not task_status:
pack.status = PackStatus.FAILED_LOADING_USER_METADATA.value
pack.cleanup()
continue
task_status, pack_content_items = pack.collect_content_items()
if not task_status:
pack.status = PackStatus.FAILED_COLLECT_ITEMS.name
pack.cleanup()
continue
task_status, integration_images = pack.upload_integration_images(storage_bucket, diff_files_list, True)
if not task_status:
pack.status = PackStatus.FAILED_IMAGES_UPLOAD.name
pack.cleanup()
continue
task_status, author_image = pack.upload_author_image(storage_bucket, diff_files_list, True)
if not task_status:
pack.status = PackStatus.FAILED_AUTHOR_IMAGE_UPLOAD.name
pack.cleanup()
continue
task_status, modified_files_paths, pack_was_modified = pack.detect_modified(
content_repo, index_folder_path, current_commit_hash, previous_commit_hash)
if not task_status:
pack.status = PackStatus.FAILED_DETECTING_MODIFIED_FILES.name
pack.cleanup()
continue
task_status = pack.format_metadata(user_metadata=user_metadata, pack_content_items=pack_content_items,
integration_images=integration_images, author_image=author_image,
index_folder_path=index_folder_path,
packs_dependencies_mapping=packs_dependencies_mapping,
build_number=build_number, commit_hash=current_commit_hash,
packs_statistic_df=packs_statistic_df,
pack_was_modified=pack_was_modified,
landing_page_sections=landing_page_sections,
trending_packs=trending_packs)
if not task_status:
pack.status = PackStatus.FAILED_METADATA_PARSING.name
pack.cleanup()
continue
logging.info("Calling prepare release notes")
task_status, not_updated_build = pack.prepare_release_notes(index_folder_path, build_number, pack_was_modified,
modified_files_paths)
if not task_status:
pack.status = PackStatus.FAILED_RELEASE_NOTES.name
pack.cleanup()
continue
if not_updated_build:
pack.status = PackStatus.PACK_IS_NOT_UPDATED_IN_RUNNING_BUILD.name
pack.cleanup()
continue
task_status = pack.remove_unwanted_files(remove_test_playbooks)
if not task_status:
pack.status = PackStatus.FAILED_REMOVING_PACK_SKIPPED_FOLDERS
pack.cleanup()
continue
task_status = pack.sign_pack(signature_key)
if not task_status:
pack.status = PackStatus.FAILED_SIGNING_PACKS.name
pack.cleanup()
continue
task_status, zip_pack_path = pack.zip_pack()
if not task_status:
pack.status = PackStatus.FAILED_ZIPPING_PACK_ARTIFACTS.name
pack.cleanup()
continue
(task_status, skipped_pack_uploading, full_pack_path) = \
pack.upload_to_storage(zip_pack_path, pack.latest_version,
storage_bucket, override_all_packs
or pack_was_modified)
if not task_status:
pack.status = PackStatus.FAILED_UPLOADING_PACK.name
pack.cleanup()
continue
task_status, exists_in_index = pack.check_if_exists_in_index(index_folder_path)
if not task_status:
pack.status = PackStatus.FAILED_SEARCHING_PACK_IN_INDEX.name
pack.cleanup()
continue
task_status = pack.prepare_for_index_upload()
if not task_status:
pack.status = PackStatus.FAILED_PREPARING_INDEX_FOLDER.name
pack.cleanup()
continue
task_status = update_index_folder(index_folder_path=index_folder_path, pack_name=pack.name, pack_path=pack.path,
pack_version=pack.latest_version, hidden_pack=pack.hidden)
if not task_status:
pack.status = PackStatus.FAILED_UPDATING_INDEX_FOLDER.name
pack.cleanup()
continue
# in case that pack already exist at cloud storage path and in index, don't show that the pack was changed
if skipped_pack_uploading and exists_in_index:
pack.status = PackStatus.PACK_ALREADY_EXISTS.name
pack.cleanup()
continue
pack.status = PackStatus.SUCCESS.name
# upload core packs json to bucket
upload_core_packs_config(storage_bucket, build_number, index_folder_path)
# finished iteration over content packs
upload_index_to_storage(index_folder_path=index_folder_path, extract_destination_path=extract_destination_path,
index_blob=index_blob, build_number=build_number, private_packs=private_packs,
current_commit_hash=current_commit_hash, index_generation=index_generation,
force_upload=force_upload, previous_commit_hash=previous_commit_hash,
landing_page_sections=landing_page_sections)
# upload id_set.json to bucket
upload_id_set(storage_bucket, id_set_path)
# get the lists of packs divided by their status
successful_packs, skipped_packs, failed_packs = get_packs_summary(packs_list)
# Store successful and failed packs list in CircleCI artifacts - to be used in Upload Packs To Marketplace job
packs_results_file_path = os.path.join(os.path.dirname(packs_artifacts_path), BucketUploadFlow.PACKS_RESULTS_FILE)
store_successful_and_failed_packs_in_ci_artifacts(
packs_results_file_path, BucketUploadFlow.PREPARE_CONTENT_FOR_TESTING, successful_packs, failed_packs,
updated_private_packs_ids, images_data=get_images_data(packs_list)
)
# summary of packs status
print_packs_summary(successful_packs, skipped_packs, failed_packs, not is_bucket_upload_flow)
|
def main():
install_logging('Prepare_Content_Packs_For_Testing.log')
option = option_handler()
packs_artifacts_path = option.artifacts_path
extract_destination_path = option.extract_path
storage_bucket_name = option.bucket_name
service_account = option.service_account
target_packs = option.pack_names if option.pack_names else ""
build_number = option.ci_build_number if option.ci_build_number else str(uuid.uuid4())
override_all_packs = option.override_all_packs
signature_key = option.key_string
id_set_path = option.id_set_path
packs_dependencies_mapping = load_json(option.pack_dependencies) if option.pack_dependencies else {}
storage_base_path = option.storage_base_path
remove_test_playbooks = option.remove_test_playbooks
is_bucket_upload_flow = option.bucket_upload
private_bucket_name = option.private_bucket_name
circle_branch = option.circle_branch
force_upload = option.force_upload
# google cloud storage client initialized
storage_client = init_storage_client(service_account)
storage_bucket = storage_client.bucket(storage_bucket_name)
# google cloud bigquery client initialized
bq_client = init_bigquery_client(service_account)
if storage_base_path:
GCPConfig.STORAGE_BASE_PATH = storage_base_path
# Relevant when triggering test upload flow
if storage_bucket_name:
GCPConfig.PRODUCTION_BUCKET = storage_bucket_name
# download and extract index from public bucket
index_folder_path, index_blob, index_generation = download_and_extract_index(storage_bucket,
extract_destination_path)
landing_page_sections = load_json(LANDING_PAGE_SECTIONS_PATH)
trending_packs = get_trending_packs(bq_client, index_folder_path)
# content repo client initialized
content_repo = get_content_git_client(CONTENT_ROOT_PATH)
current_commit_hash, previous_commit_hash = get_recent_commits_data(content_repo, index_folder_path,
is_bucket_upload_flow, circle_branch)
# detect packs to upload
pack_names = get_packs_names(target_packs, previous_commit_hash)
extract_packs_artifacts(packs_artifacts_path, extract_destination_path)
packs_list = [Pack(pack_name, os.path.join(extract_destination_path, pack_name)) for pack_name in pack_names
if os.path.exists(os.path.join(extract_destination_path, pack_name))]
diff_files_list = content_repo.commit(current_commit_hash).diff(content_repo.commit(previous_commit_hash))
# taking care of private packs
is_private_content_updated, private_packs, updated_private_packs_ids = handle_private_content(
index_folder_path, private_bucket_name, extract_destination_path, storage_client, pack_names
)
if not option.override_all_packs:
check_if_index_is_updated(index_folder_path, content_repo, current_commit_hash, previous_commit_hash,
storage_bucket, is_private_content_updated)
packs_statistic_df = get_packs_statistics_dataframe(bq_client)
# clean index and gcs from non existing or invalid packs
clean_non_existing_packs(index_folder_path, private_packs, storage_bucket)
# starting iteration over packs
for pack in packs_list:
task_status, user_metadata = pack.load_user_metadata()
if not task_status:
pack.status = PackStatus.FAILED_LOADING_USER_METADATA.value
pack.cleanup()
continue
task_status, pack_content_items = pack.collect_content_items()
if not task_status:
pack.status = PackStatus.FAILED_COLLECT_ITEMS.name
pack.cleanup()
continue
task_status, integration_images = pack.upload_integration_images(storage_bucket, diff_files_list, True)
if not task_status:
pack.status = PackStatus.FAILED_IMAGES_UPLOAD.name
pack.cleanup()
continue
task_status, author_image = pack.upload_author_image(storage_bucket, diff_files_list, True)
if not task_status:
pack.status = PackStatus.FAILED_AUTHOR_IMAGE_UPLOAD.name
pack.cleanup()
continue
task_status, modified_pack_files_paths, pack_was_modified = pack.detect_modified(
content_repo, index_folder_path, current_commit_hash, previous_commit_hash)
if not task_status:
pack.status = PackStatus.FAILED_DETECTING_MODIFIED_FILES.name
pack.cleanup()
continue
task_status = pack.format_metadata(user_metadata=user_metadata, pack_content_items=pack_content_items,
integration_images=integration_images, author_image=author_image,
index_folder_path=index_folder_path,
packs_dependencies_mapping=packs_dependencies_mapping,
build_number=build_number, commit_hash=current_commit_hash,
packs_statistic_df=packs_statistic_df,
pack_was_modified=pack_was_modified,
landing_page_sections=landing_page_sections,
trending_packs=trending_packs)
if not task_status:
pack.status = PackStatus.FAILED_METADATA_PARSING.name
pack.cleanup()
continue
logging.info("Calling prepare release notes")
task_status, not_updated_build = pack.prepare_release_notes(index_folder_path, build_number, pack_was_modified,
modified_files_paths)
if not task_status:
pack.status = PackStatus.FAILED_RELEASE_NOTES.name
pack.cleanup()
continue
if not_updated_build:
pack.status = PackStatus.PACK_IS_NOT_UPDATED_IN_RUNNING_BUILD.name
pack.cleanup()
continue
task_status = pack.remove_unwanted_files(remove_test_playbooks)
if not task_status:
pack.status = PackStatus.FAILED_REMOVING_PACK_SKIPPED_FOLDERS
pack.cleanup()
continue
task_status = pack.sign_pack(signature_key)
if not task_status:
pack.status = PackStatus.FAILED_SIGNING_PACKS.name
pack.cleanup()
continue
task_status, zip_pack_path = pack.zip_pack()
if not task_status:
pack.status = PackStatus.FAILED_ZIPPING_PACK_ARTIFACTS.name
pack.cleanup()
continue
(task_status, skipped_pack_uploading, full_pack_path) = \
pack.upload_to_storage(zip_pack_path, pack.latest_version,
storage_bucket, override_all_packs
or pack_was_modified)
if not task_status:
pack.status = PackStatus.FAILED_UPLOADING_PACK.name
pack.cleanup()
continue
task_status, exists_in_index = pack.check_if_exists_in_index(index_folder_path)
if not task_status:
pack.status = PackStatus.FAILED_SEARCHING_PACK_IN_INDEX.name
pack.cleanup()
continue
task_status = pack.prepare_for_index_upload()
if not task_status:
pack.status = PackStatus.FAILED_PREPARING_INDEX_FOLDER.name
pack.cleanup()
continue
task_status = update_index_folder(index_folder_path=index_folder_path, pack_name=pack.name, pack_path=pack.path,
pack_version=pack.latest_version, hidden_pack=pack.hidden)
if not task_status:
pack.status = PackStatus.FAILED_UPDATING_INDEX_FOLDER.name
pack.cleanup()
continue
# in case that pack already exist at cloud storage path and in index, don't show that the pack was changed
if skipped_pack_uploading and exists_in_index:
pack.status = PackStatus.PACK_ALREADY_EXISTS.name
pack.cleanup()
continue
pack.status = PackStatus.SUCCESS.name
# upload core packs json to bucket
upload_core_packs_config(storage_bucket, build_number, index_folder_path)
# finished iteration over content packs
upload_index_to_storage(index_folder_path=index_folder_path, extract_destination_path=extract_destination_path,
index_blob=index_blob, build_number=build_number, private_packs=private_packs,
current_commit_hash=current_commit_hash, index_generation=index_generation,
force_upload=force_upload, previous_commit_hash=previous_commit_hash,
landing_page_sections=landing_page_sections)
# upload id_set.json to bucket
upload_id_set(storage_bucket, id_set_path)
# get the lists of packs divided by their status
successful_packs, skipped_packs, failed_packs = get_packs_summary(packs_list)
# Store successful and failed packs list in CircleCI artifacts - to be used in Upload Packs To Marketplace job
packs_results_file_path = os.path.join(os.path.dirname(packs_artifacts_path), BucketUploadFlow.PACKS_RESULTS_FILE)
store_successful_and_failed_packs_in_ci_artifacts(
packs_results_file_path, BucketUploadFlow.PREPARE_CONTENT_FOR_TESTING, successful_packs, failed_packs,
updated_private_packs_ids, images_data=get_images_data(packs_list)
)
# summary of packs status
print_packs_summary(successful_packs, skipped_packs, failed_packs, not is_bucket_upload_flow)
|
7,398 |
def pcc(imgA, imgB, roi=None):
"""Calculate Pearson's Correlation Coefficient between pixel intensities in
channels.
Parameters
----------
imgA : (M, N) ndarray
Image of channel A.
imgB : (M, N) ndarray
Image of channel 2 to be correlated with channel B.
Must have same dimensions as `imgA`.
roi : (M, N) ndarray of dtype bool, optional
Only `imgA` and `imgB` pixels within this region of interest mask are
included in the calculation.
Must have same dimensions as `imgA`.
Returns
-------
pcc : float
Pearson's correlation coefficient of the pixel intensities between the
two images, within the ROI if provided.
p-value : float
Two-tailed p-value.
Notes
-------
Pearson's Correlation Coefficient (PCC) measures the linear correlation
between the pixel intensities of the two images. Its value ranges from -1
for perfect linear anti-correlation and +1 for perfect linear correlation.
The calculation of the p-value assumes that the intensities of pixels in
each input image are normally distributed.
Scipy's implementation of Pearson's correlation coefficient is used. Please
refer to it for further information and caveats [1]_.
.. math::
r = \frac{\sum (A_i - m_A_i) (B_i - m_B_i)}
{\sqrt{\sum (A_i - m_A_i)^2 \sum (B_i - m_B_i)^2}}
where
:math:`A_i` is the value of the :math:`i^{th}` pixel in `imgA`
:math:`B_i` is the value of the :math:`i^{th}` pixel in `imgB`,
:math:`m_A_i` is the mean of the pixel values in `imgA`
:math:`m_B_i` is the mean of the pixel values in `imgB`
A low PCC value does not necessarily mean that there is no correlation
between the two channel intensities, just that there is no linear
correlation. You may wish to plot the pixel intensities of each of thw two
channels in a 2D scatterplot and use Spearman's rank correlation if a
non-linear correlation is visually identified [2]_. Also consider if you
are interested in correlation or co-occurence, in which case a method
involving segmentation masks (e.g. MCC or intersection coefficient) may be
more suitable [3]_ [4]_.
Providing the ROI of only relevant sections of the image (e.g. cells, or
particular cellular compartments) and removing noise is important as the
PCC is sensitive to these measures [3]_ [4]_.
References
-------
.. [1] https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.pearsonr.html
.. [2] https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.spearmanr.html
.. [3] Dunn, K. W., Kamocka, M. M., & McDonald, J. H. (2011). A practical
guide to evaluating colocalization in biological microscopy.
American journal of physiology. Cell physiology, 300(4), C723–C742.
https://doi.org/10.1152/ajpcell.00462.2010
.. [4] Bolte, S. and Cordelières, F.P. (2006), A guided tour into
subcellular colocalization analysis in light microscopy. Journal of
Microscopy, 224: 213-232.
https://doi.org/10.1111/j.1365-2818.2006.01706.x
"""
if roi is None:
roi = np.ones_like(imgA)
check_numpy_arr(imgA, 'imgA', bool_expected=False)
check_numpy_arr(imgB, 'imgB', bool_expected=False)
check_numpy_arr(roi, 'roi', bool_expected=True)
check_shape_equality_all(imgA, imgB, roi)
imgA_masked = imgA[roi.astype(bool)]
imgB_masked = imgB[roi.astype(bool)]
return pearsonr(imgA_masked, imgB_masked)
|
def pcc(image0, image1, roi=None):
"""Calculate Pearson's Correlation Coefficient between pixel intensities in
channels.
Parameters
----------
imgA : (M, N) ndarray
Image of channel A.
imgB : (M, N) ndarray
Image of channel 2 to be correlated with channel B.
Must have same dimensions as `imgA`.
roi : (M, N) ndarray of dtype bool, optional
Only `imgA` and `imgB` pixels within this region of interest mask are
included in the calculation.
Must have same dimensions as `imgA`.
Returns
-------
pcc : float
Pearson's correlation coefficient of the pixel intensities between the
two images, within the ROI if provided.
p-value : float
Two-tailed p-value.
Notes
-------
Pearson's Correlation Coefficient (PCC) measures the linear correlation
between the pixel intensities of the two images. Its value ranges from -1
for perfect linear anti-correlation and +1 for perfect linear correlation.
The calculation of the p-value assumes that the intensities of pixels in
each input image are normally distributed.
Scipy's implementation of Pearson's correlation coefficient is used. Please
refer to it for further information and caveats [1]_.
.. math::
r = \frac{\sum (A_i - m_A_i) (B_i - m_B_i)}
{\sqrt{\sum (A_i - m_A_i)^2 \sum (B_i - m_B_i)^2}}
where
:math:`A_i` is the value of the :math:`i^{th}` pixel in `imgA`
:math:`B_i` is the value of the :math:`i^{th}` pixel in `imgB`,
:math:`m_A_i` is the mean of the pixel values in `imgA`
:math:`m_B_i` is the mean of the pixel values in `imgB`
A low PCC value does not necessarily mean that there is no correlation
between the two channel intensities, just that there is no linear
correlation. You may wish to plot the pixel intensities of each of thw two
channels in a 2D scatterplot and use Spearman's rank correlation if a
non-linear correlation is visually identified [2]_. Also consider if you
are interested in correlation or co-occurence, in which case a method
involving segmentation masks (e.g. MCC or intersection coefficient) may be
more suitable [3]_ [4]_.
Providing the ROI of only relevant sections of the image (e.g. cells, or
particular cellular compartments) and removing noise is important as the
PCC is sensitive to these measures [3]_ [4]_.
References
-------
.. [1] https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.pearsonr.html
.. [2] https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.spearmanr.html
.. [3] Dunn, K. W., Kamocka, M. M., & McDonald, J. H. (2011). A practical
guide to evaluating colocalization in biological microscopy.
American journal of physiology. Cell physiology, 300(4), C723–C742.
https://doi.org/10.1152/ajpcell.00462.2010
.. [4] Bolte, S. and Cordelières, F.P. (2006), A guided tour into
subcellular colocalization analysis in light microscopy. Journal of
Microscopy, 224: 213-232.
https://doi.org/10.1111/j.1365-2818.2006.01706.x
"""
if roi is None:
roi = np.ones_like(imgA)
check_numpy_arr(imgA, 'imgA', bool_expected=False)
check_numpy_arr(imgB, 'imgB', bool_expected=False)
check_numpy_arr(roi, 'roi', bool_expected=True)
check_shape_equality_all(imgA, imgB, roi)
imgA_masked = imgA[roi.astype(bool)]
imgB_masked = imgB[roi.astype(bool)]
return pearsonr(imgA_masked, imgB_masked)
|
19,501 |
def view_copy(src, dst, view, spec=None):
"""
Copy a file from src to dst.
Use spec and view to generate relocations
"""
is_link = os.path.islink(src)
# Just copy the file when not a link, whether relocating it or not.
if not is_link:
shutil.copy2(src, dst)
# If we're not relocating (no spec), we can
# early exit; don't forget to create a symlink if necessary.
# Note: replace with copy2(src, dst, follow_symlinks=False) when
# dropping Python 2.7.
if not spec or spec.external:
if is_link:
os.symlink(os.readlink(src), dst)
return
# Now collect some paths for relocation purposes.
prefix_to_projection = collections.OrderedDict(
{spec.prefix: view.get_projection_for_spec(spec)}
)
for dep in spec.traverse():
if not dep.external:
prefix_to_projection[dep.prefix] = view.get_projection_for_spec(dep)
# Do relocation on the link
if is_link:
# Note: we're doing literal text replacement, so using normpath is not
# an option (and also, normpath should be avoided since it's incorrect
# for paths that contain a symlinked dir followed by ..)
link_dst = os.readlink(src)
# The majority of the cases is relative links, e.g. libz.so -> libz.so.1.2.3
# and we copy these verbatim, without checking if they leave the prefix
# through multiple `..`'s -- this should be improved.
# Absolute links are relocated by literal string replacement -- this
# isn't perfect either...
if os.path.isabs(link_dst):
for (prefix, projection) in prefix_to_projection.items():
if link_dst.startswith(prefix):
link_dst = link_dst.replace(prefix, projection, 1)
break
os.symlink(link_dst, dst)
return
# This is vestigial code for the *old* location of sbang. Previously,
# sbang was a bash script, and it lived in the spack prefix. It is
# now a POSIX script that lives in the install prefix. Old packages
# will have the old sbang location in their shebangs.
# TODO: Not sure which one to use...
import spack.hooks.sbang as sbang
# Break a package include cycle
import spack.relocate
orig_sbang = "#!/bin/bash {0}/bin/sbang".format(spack.paths.spack_root)
new_sbang = sbang.sbang_shebang_line()
if spack.relocate.is_binary(dst):
spack.relocate.relocate_text_bin(binaries=[dst], prefixes=prefix_to_projection)
else:
prefix_to_projection[spack.store.layout.root] = view._root
prefix_to_projection[orig_sbang] = new_sbang
spack.relocate.relocate_text(files=[dst], prefixes=prefix_to_projection)
try:
s = os.stat(src)
os.chown(dst, s.st_uid, s.st_gid)
except OSError:
tty.debug("Can't change the permissions for %s" % dst)
|
def view_copy(src, dst, view, spec=None):
"""
Copy a file from src to dst.
Use spec and view to generate relocations
"""
is_link = os.path.islink(src)
# Just copy the file when not a link, whether relocating it or not.
if not is_link:
shutil.copy2(src, dst)
# If we're not relocating (no spec or external spec), we can
# early exit; don't forget to create a symlink if necessary.
# Note: replace with copy2(src, dst, follow_symlinks=False) when
# dropping Python 2.7.
if not spec or spec.external:
if is_link:
os.symlink(os.readlink(src), dst)
return
# Now collect some paths for relocation purposes.
prefix_to_projection = collections.OrderedDict(
{spec.prefix: view.get_projection_for_spec(spec)}
)
for dep in spec.traverse():
if not dep.external:
prefix_to_projection[dep.prefix] = view.get_projection_for_spec(dep)
# Do relocation on the link
if is_link:
# Note: we're doing literal text replacement, so using normpath is not
# an option (and also, normpath should be avoided since it's incorrect
# for paths that contain a symlinked dir followed by ..)
link_dst = os.readlink(src)
# The majority of the cases is relative links, e.g. libz.so -> libz.so.1.2.3
# and we copy these verbatim, without checking if they leave the prefix
# through multiple `..`'s -- this should be improved.
# Absolute links are relocated by literal string replacement -- this
# isn't perfect either...
if os.path.isabs(link_dst):
for (prefix, projection) in prefix_to_projection.items():
if link_dst.startswith(prefix):
link_dst = link_dst.replace(prefix, projection, 1)
break
os.symlink(link_dst, dst)
return
# This is vestigial code for the *old* location of sbang. Previously,
# sbang was a bash script, and it lived in the spack prefix. It is
# now a POSIX script that lives in the install prefix. Old packages
# will have the old sbang location in their shebangs.
# TODO: Not sure which one to use...
import spack.hooks.sbang as sbang
# Break a package include cycle
import spack.relocate
orig_sbang = "#!/bin/bash {0}/bin/sbang".format(spack.paths.spack_root)
new_sbang = sbang.sbang_shebang_line()
if spack.relocate.is_binary(dst):
spack.relocate.relocate_text_bin(binaries=[dst], prefixes=prefix_to_projection)
else:
prefix_to_projection[spack.store.layout.root] = view._root
prefix_to_projection[orig_sbang] = new_sbang
spack.relocate.relocate_text(files=[dst], prefixes=prefix_to_projection)
try:
s = os.stat(src)
os.chown(dst, s.st_uid, s.st_gid)
except OSError:
tty.debug("Can't change the permissions for %s" % dst)
|
42,410 |
def test_cmax_equals_cmin(byte_arr):
"""Fail gracefully when the cmax is smaller than the cmin."""
with pytest.raises(
ValueError,
match="`cmax` and `cmin` should not be the same value. Please specify `cmax` > `cmin`",
):
es.bytescale(byte_arr, cmin=100, cmax=100)
|
def test_cmax_equals_cmin(byte_arr):
"""Fail gracefully when the cmax is smaller than the cmin."""
with pytest.raises(
ValueError,
match="`cmax` and `cmin` should not be the same value",
):
es.bytescale(byte_arr, cmin=100, cmax=100)
|
59,432 |
def _parse_pulse_args(backend, qubit_lo_freq, meas_lo_freq, qubit_lo_range,
meas_lo_range, schedule_los, meas_level,
meas_return, meas_map,
memory_slot_size,
rep_time, rep_delay,
parametric_pulses,
**run_config):
"""Build a pulse RunConfig replacing unset arguments with defaults derived from the `backend`.
See `assemble` for more information on the required arguments.
Returns:
RunConfig: a run config, which is a standardized object that configures the qobj
and determines the runtime environment.
Raises:
SchemaValidationError: if the given meas_level, rep_time, rep_delay is not allowed
for the given `backend`.
"""
# grab relevant info from backend if it exists
backend_config = None
backend_default = None
if backend:
backend_default = backend.defaults()
backend_config = backend.configuration()
if meas_level not in getattr(backend_config, 'meas_levels', [MeasLevel.CLASSIFIED]):
raise SchemaValidationError(
('meas_level = {} not supported for backend {}, only {} is supported'
).format(meas_level, backend_config.backend_name, backend_config.meas_levels)
)
if rep_time not in getattr(backend_config, 'rep_times', None):
raise SchemaValidationError(
('rep_time = {} not supported for backend {}, only {} is supported'
).format(rep_time, backend_config.backend_name, backend_config.rep_times)
)
if rep_delay not in getattr(backend_config, 'rep_delays', None):
raise SchemaValidationError(
('rep_delay = {} not supported for backend {}, only {} is supported'
).format(rep_delay, backend_config.backend_name, backend_config.rep_delays)
)
meas_map = meas_map or getattr(backend_config, 'meas_map', None)
schedule_los = schedule_los or []
if isinstance(schedule_los, (LoConfig, dict)):
schedule_los = [schedule_los]
# Convert to LoConfig if LO configuration supplied as dictionary
schedule_los = [lo_config if isinstance(lo_config, LoConfig) else LoConfig(lo_config)
for lo_config in schedule_los]
if not qubit_lo_freq and hasattr(backend_default, 'qubit_freq_est'):
qubit_lo_freq = backend_default.qubit_freq_est
if not meas_lo_freq and hasattr(backend_default, 'meas_freq_est'):
meas_lo_freq = backend_default.meas_freq_est
qubit_lo_range = qubit_lo_range or getattr(backend_config, 'qubit_lo_range', None)
meas_lo_range = meas_lo_range or getattr(backend_config, 'meas_lo_range', None)
dynamic_reprate_enabled = getattr(backend_config, 'dynamic_reprate_enabled', False)
rep_time = rep_time or getattr(backend_config, 'rep_times', None)
if rep_time:
if dynamic_reprate_enabled:
warnings.warn("Dynamic rep rates are supported on this backend. 'rep_delay' will be "
"used instead, if specified.", RuntimeWarning)
if isinstance(rep_time, list):
rep_time = rep_time[0]
rep_time = int(rep_time * 1e6) # convert sec to μs
rep_delay = rep_delay or getattr(backend_config, 'rep_delays', None)
if rep_delay:
if not dynamic_reprate_enabled:
warnings.warn("Dynamic rep rates not supported on this backend. 'rep_time' will be "
"used instead.", RuntimeWarning)
if isinstance(rep_delay, list):
rep_delay = rep_delay[0]
rep_delay = rep_delay * 1e6 # convert sec to μs
parametric_pulses = parametric_pulses or getattr(backend_config, 'parametric_pulses', [])
# create run configuration and populate
run_config_dict = dict(qubit_lo_freq=qubit_lo_freq,
meas_lo_freq=meas_lo_freq,
qubit_lo_range=qubit_lo_range,
meas_lo_range=meas_lo_range,
schedule_los=schedule_los,
meas_level=meas_level,
meas_return=meas_return,
meas_map=meas_map,
memory_slot_size=memory_slot_size,
rep_time=rep_time,
rep_delay=rep_delay,
parametric_pulses=parametric_pulses,
**run_config)
run_config = RunConfig(**{k: v for k, v in run_config_dict.items() if v is not None})
return run_config
|
def _parse_pulse_args(backend, qubit_lo_freq, meas_lo_freq, qubit_lo_range,
meas_lo_range, schedule_los, meas_level,
meas_return, meas_map,
memory_slot_size,
rep_time, rep_delay,
parametric_pulses,
**run_config):
"""Build a pulse RunConfig replacing unset arguments with defaults derived from the `backend`.
See `assemble` for more information on the required arguments.
Returns:
RunConfig: a run config, which is a standardized object that configures the qobj
and determines the runtime environment.
Raises:
SchemaValidationError: if the given meas_level, rep_time, rep_delay is not allowed
for the given `backend`.
"""
# grab relevant info from backend if it exists
backend_config = None
backend_default = None
if backend:
backend_default = backend.defaults()
backend_config = backend.configuration()
if meas_level not in getattr(backend_config, 'meas_levels', [MeasLevel.CLASSIFIED]):
raise SchemaValidationError(
('meas_level = {} not supported for backend {}, only {} is supported'
).format(meas_level, backend_config.backend_name, backend_config.meas_levels)
)
if rep_time not in getattr(backend_config, 'rep_times', None):
raise SchemaValidationError(
'rep_time = {} not supported for backend {}, '
'only {} is supported'.format(
rep_time,
backend_config.backend_name,
backend_config.rep_times,
)
)
if rep_delay not in getattr(backend_config, 'rep_delays', None):
raise SchemaValidationError(
('rep_delay = {} not supported for backend {}, only {} is supported'
).format(rep_delay, backend_config.backend_name, backend_config.rep_delays)
)
meas_map = meas_map or getattr(backend_config, 'meas_map', None)
schedule_los = schedule_los or []
if isinstance(schedule_los, (LoConfig, dict)):
schedule_los = [schedule_los]
# Convert to LoConfig if LO configuration supplied as dictionary
schedule_los = [lo_config if isinstance(lo_config, LoConfig) else LoConfig(lo_config)
for lo_config in schedule_los]
if not qubit_lo_freq and hasattr(backend_default, 'qubit_freq_est'):
qubit_lo_freq = backend_default.qubit_freq_est
if not meas_lo_freq and hasattr(backend_default, 'meas_freq_est'):
meas_lo_freq = backend_default.meas_freq_est
qubit_lo_range = qubit_lo_range or getattr(backend_config, 'qubit_lo_range', None)
meas_lo_range = meas_lo_range or getattr(backend_config, 'meas_lo_range', None)
dynamic_reprate_enabled = getattr(backend_config, 'dynamic_reprate_enabled', False)
rep_time = rep_time or getattr(backend_config, 'rep_times', None)
if rep_time:
if dynamic_reprate_enabled:
warnings.warn("Dynamic rep rates are supported on this backend. 'rep_delay' will be "
"used instead, if specified.", RuntimeWarning)
if isinstance(rep_time, list):
rep_time = rep_time[0]
rep_time = int(rep_time * 1e6) # convert sec to μs
rep_delay = rep_delay or getattr(backend_config, 'rep_delays', None)
if rep_delay:
if not dynamic_reprate_enabled:
warnings.warn("Dynamic rep rates not supported on this backend. 'rep_time' will be "
"used instead.", RuntimeWarning)
if isinstance(rep_delay, list):
rep_delay = rep_delay[0]
rep_delay = rep_delay * 1e6 # convert sec to μs
parametric_pulses = parametric_pulses or getattr(backend_config, 'parametric_pulses', [])
# create run configuration and populate
run_config_dict = dict(qubit_lo_freq=qubit_lo_freq,
meas_lo_freq=meas_lo_freq,
qubit_lo_range=qubit_lo_range,
meas_lo_range=meas_lo_range,
schedule_los=schedule_los,
meas_level=meas_level,
meas_return=meas_return,
meas_map=meas_map,
memory_slot_size=memory_slot_size,
rep_time=rep_time,
rep_delay=rep_delay,
parametric_pulses=parametric_pulses,
**run_config)
run_config = RunConfig(**{k: v for k, v in run_config_dict.items() if v is not None})
return run_config
|
7,535 |
def _initialize_astropy():
from . import config
try:
from .utils import _compiler
except ImportError:
if _is_astropy_source():
raise ImportError('You appear to be trying to import astropy from '
'within a source checkout or from an editable '
'installation without building the extension '
'modules first. Either run:\n\n'
' pip install -e .\n\nor\n\n'
' python setup.py build_ext --inplace\n\n'
'to make sure the extension modules are built '
'(note that if you use the latter you may need '
'to install build dependencies manually)')
else:
# Outright broken installation, just raise standard error
raise
# add these here so we only need to cleanup the namespace at the end
config_dir = os.path.dirname(__file__)
try:
config.configuration.update_default_config(__package__, config_dir)
except config.configuration.ConfigurationDefaultMissingError as e:
wmsg = (e.args[0] + " Cannot install default profile. If you are "
"importing from source, this is expected.")
warn(config.configuration.ConfigurationDefaultMissingWarning(wmsg))
|
def _initialize_astropy():
from . import config
try:
from .utils import _compiler
except ImportError:
if _is_astropy_source():
raise ImportError('You appear to be trying to import astropy from '
'within a source checkout or from an editable '
'installation without building the extension '
'modules first. Either run:\n\n'
' pip install -e .\n\nor\n\n'
' python setup.py build_ext --inplace\n\n'
'to make sure the extension modules are built '
'(note that if you use the latter you need '
'to install build dependencies manually)')
else:
# Outright broken installation, just raise standard error
raise
# add these here so we only need to cleanup the namespace at the end
config_dir = os.path.dirname(__file__)
try:
config.configuration.update_default_config(__package__, config_dir)
except config.configuration.ConfigurationDefaultMissingError as e:
wmsg = (e.args[0] + " Cannot install default profile. If you are "
"importing from source, this is expected.")
warn(config.configuration.ConfigurationDefaultMissingWarning(wmsg))
|
23,971 |
def initConfigPath(configPath=None):
"""
Creates the current configuration path if it doesn't exist. Also makes sure that various sub directories also exist.
@param configPath: an optional path which should be used instead (only useful when being called from outside of NVDA)
@type configPath: str
"""
if not configPath:
configPath=globalVars.appArgs.configPath
if not os.path.isdir(configPath):
os.makedirs(configPath)
subdirs=["speechDicts","profiles"]
if not isAppX:
subdirs.append("addons")
for subdir in subdirs:
subdir=os.path.join(configPath,subdir)
if not os.path.isdir(subdir):
os.makedirs(subdir)
|
def openUserConfigDirectory():
"""
Creates the current configuration path if it doesn't exist. Also makes sure that various sub directories also exist.
@param configPath: an optional path which should be used instead (only useful when being called from outside of NVDA)
@type configPath: str
"""
if not configPath:
configPath=globalVars.appArgs.configPath
if not os.path.isdir(configPath):
os.makedirs(configPath)
subdirs=["speechDicts","profiles"]
if not isAppX:
subdirs.append("addons")
for subdir in subdirs:
subdir=os.path.join(configPath,subdir)
if not os.path.isdir(subdir):
os.makedirs(subdir)
|
6,424 |
def execute():
# add holiday list and employee group fields in SLA
# change response and resolution time in priorities child table
if frappe.db.exists('DocType', 'Service Level Agreement'):
sla_details = frappe.db.get_all('Service Level Agreement', fields=['name', 'service_level'])
priorities = frappe.db.get_all('Service Level Priority', fields=['*'], filters={
'parenttype': ('in', ['Service Level Agreement', 'Service Level'])
})
frappe.reload_doc('support', 'doctype', 'service_level_agreement')
frappe.reload_doc('support', 'doctype', 'service_level_priority')
for entry in sla_details:
values = frappe.db.get_value('Service Level', entry.service_level, ['holiday_list', 'employee_group'])
if values:
holiday_list = values[0]
employee_group = values[1]
frappe.db.set_value('Service Level Agreement', entry.name, {
'holiday_list': holiday_list,
'employee_group': employee_group
})
priority_dict = {}
for priority in priorities:
if priority.parenttype == 'Service Level Agreement':
response_time = convert_to_seconds(priority.response_time, priority.response_time_period)
resolution_time = convert_to_seconds(priority.resolution_time, priority.resolution_time_period)
frappe.db.set_value('Service Level Priority', priority.name, {
'response_time': response_time,
'resolution_time': resolution_time
})
if priority.parenttype == 'Service Level':
if not priority.parent in priority_dict:
priority_dict[priority.parent] = []
priority_dict[priority.parent].append(priority)
# copy Service Levels to Service Level Agreements
sl = [entry.service_level for entry in sla_details]
service_levels = frappe.db.get_all('Service Level', filters={'service_level': ('not in', sl)}, fields=['*'])
for entry in service_levels:
sla = frappe.new_doc('Service Level Agreement')
sla.service_level = entry.service_level
sla.holiday_list = entry.holiday_list
sla.employee_group = entry.employee_group
sla.flags.ignore_validate = True
sla = sla.insert(ignore_mandatory=True)
frappe.db.sql("""
UPDATE
`tabService Day`
SET
parent = %(new_parent)s , parentfield = 'support_and_resolution', parenttype = 'Service Level Agreement'
WHERE
parent = %(old_parent)s
""", {'new_parent': sla.name, 'old_parent': entry.name}, as_dict = 1)
priority_list = priority_dict.get(entry.name)
if priority_list:
sla = frappe.get_doc('Service Level Agreement', sla.name)
for priority in priority_list:
row = sla.append('priorities', {
'priority': priority.priority,
'default_priority': priority.default_priority,
'response_time': convert_to_seconds(priority.response_time, priority.response_time_period),
'resolution_time': convert_to_seconds(priority.resolution_time, priority.resolution_time_period)
})
row.db_update()
sla.db_update()
# set issue status as Replied since Hold status is removed
if frappe.db.exists('DocType', 'Issue'):
issues_on_hold = frappe.db.sql("""
SELECT
name
FROM
`tabIssue`
WHERE
status = 'Hold'
""", as_dict=1)
issues = [entry.name for entry in issues_on_hold]
frappe.reload_doc('support', 'doctype', 'issue')
frappe.db.sql("""
UPDATE
`tabIssue`
SET
status='Replied'
WHERE
name in %(issues)s
""", {'issues': issues}, debug=1)
|
def execute():
# add holiday list and employee group fields in SLA
# change response and resolution time in priorities child table
if frappe.db.exists('DocType', 'Service Level Agreement'):
sla_details = frappe.db.get_all('Service Level Agreement', fields=['name', 'service_level'])
priorities = frappe.db.get_all('Service Level Priority', fields=['*'], filters={
'parenttype': ('in', ['Service Level Agreement', 'Service Level'])
})
frappe.reload_doc('support', 'doctype', 'service_level_agreement')
frappe.reload_doc('support', 'doctype', 'service_level_priority')
for entry in sla_details:
values = frappe.db.get_value('Service Level', entry.service_level, ['holiday_list', 'employee_group'])
if values:
holiday_list = values[0]
employee_group = values[1]
frappe.db.set_value('Service Level Agreement', entry.name, {
'holiday_list': holiday_list,
'employee_group': employee_group
})
priority_dict = {}
for priority in priorities:
if priority.parenttype == 'Service Level Agreement':
response_time = convert_to_seconds(priority.response_time, priority.response_time_period)
resolution_time = convert_to_seconds(priority.resolution_time, priority.resolution_time_period)
frappe.db.set_value('Service Level Priority', priority.name, {
'response_time': response_time,
'resolution_time': resolution_time
})
if priority.parenttype == 'Service Level':
if not priority.parent in priority_dict:
priority_dict[priority.parent] = []
priority_dict[priority.parent].append(priority)
# copy Service Levels to Service Level Agreements
sl = [entry.service_level for entry in sla_details]
service_levels = frappe.db.get_all('Service Level', filters={'service_level': ('not in', sl)}, fields=['*'])
for entry in service_levels:
sla = frappe.new_doc('Service Level Agreement')
sla.service_level = entry.service_level
sla.holiday_list = entry.holiday_list
sla.employee_group = entry.employee_group
sla.flags.ignore_validate = True
sla = sla.insert(ignore_mandatory=True)
frappe.db.sql("""
UPDATE
`tabService Day`
SET
parent = %(new_parent)s , parentfield = 'support_and_resolution', parenttype = 'Service Level Agreement'
WHERE
parent = %(old_parent)s
""", {'new_parent': sla.name, 'old_parent': entry.name}, as_dict = 1)
priority_list = priority_dict.get(entry.name)
if priority_list:
sla = frappe.get_doc('Service Level Agreement', sla.name)
for priority in priority_list:
row = sla.append('priorities', {
'priority': priority.priority,
'default_priority': priority.default_priority,
'response_time': convert_to_seconds(priority.response_time, priority.response_time_period),
'resolution_time': convert_to_seconds(priority.resolution_time, priority.resolution_time_period)
})
row.db_update()
sla.db_update()
# set issue status as Replied since Hold status is removed
if frappe.db.exists('DocType', 'Issue'):
issues_on_hold = frappe.db.sql("""
SELECT
name
FROM
`tabIssue`
WHERE
status = 'Hold'
""", as_dict=1)
issues = [entry.name for entry in issues_on_hold]
frappe.reload_doc('support', 'doctype', 'issue')
frappe.db.sql("""
UPDATE
`tabIssue`
SET
status='Replied'
WHERE
name in %(issues)s
""", {'issues': issues})
|
36,891 |
def add_parser(subparsers, parent_parser):
GET_HELP = (
"Download file or directory from any DVC project or Git repository."
)
get_parser = subparsers.add_parser(
"get",
parents=[parent_parser],
description=append_doc_link(GET_HELP, "get"),
help=GET_HELP,
formatter_class=argparse.RawDescriptionHelpFormatter,
)
get_parser.add_argument(
"url",
help="Location of DVC project or Git repository to download from",
)
get_parser.add_argument(
"path",
help="Path to a file or directory within the project or repository",
)
get_parser.add_argument(
"-o", "--out", nargs="?", help="Destination path to download files to"
)
get_parser.add_argument(
"--rev", nargs="?", help="Git revision (e.g. branch, tag, SHA)"
)
get_parser.add_argument(
"--show-url",
action="store_true",
help="Just print the storage location (URL) the target data would be "
"downloaded from.",
)
get_parser.set_defaults(func=CmdGet)
|
def add_parser(subparsers, parent_parser):
GET_HELP = (
"Download file or directory from any DVC project or Git repository."
)
get_parser = subparsers.add_parser(
"get",
parents=[parent_parser],
description=append_doc_link(GET_HELP, "get"),
help=GET_HELP,
formatter_class=argparse.RawDescriptionHelpFormatter,
)
get_parser.add_argument(
"url",
help="Location of DVC project or Git repository to download from",
)
get_parser.add_argument(
"path",
help="Path to a file or directory within the project or repository",
)
get_parser.add_argument(
"-o", "--out", nargs="?", help="Destination path to download files to"
)
get_parser.add_argument(
"--rev", nargs="?", help="Git revision (e.g. branch, tag, SHA)"
)
get_parser.add_argument(
"--show-url",
action="store_true",
help="Print the storage location (URL) the target data would be "
"downloaded from.",
)
get_parser.set_defaults(func=CmdGet)
|
17,448 |
def _infer_interval_breaks(coord, axis=0, scale=None, check_monotonic=False):
"""
>>> _infer_interval_breaks(np.arange(5))
array([-0.5, 0.5, 1.5, 2.5, 3.5, 4.5])
>>> _infer_interval_breaks([[0, 1], [3, 4]], axis=1)
array([[-0.5, 0.5, 1.5],
[ 2.5, 3.5, 4.5]])
>>> _infer_interval_breaks(np.logspace(-2, 2, 5), logscale=True)
array([3.16227766e-03, 3.16227766e-02, 3.16227766e-01, 3.16227766e+00,
3.16227766e+01, 3.16227766e+02])
"""
coord = np.asarray(coord)
if check_monotonic and not _is_monotonic(coord, axis=axis):
raise ValueError(
"The input coordinate is not sorted in increasing "
"order along axis %d. This can lead to unexpected "
"results. Consider calling the `sortby` method on "
"the input DataArray. To plot data with categorical "
"axes, consider using the `heatmap` function from "
"the `seaborn` statistical plotting library." % axis
)
# If logscale, compute the intervals in the logarithmic space
if scale == "log":
coord = np.log10(coord)
deltas = 0.5 * np.diff(coord, axis=axis)
if deltas.size == 0:
deltas = np.array(0.0)
first = np.take(coord, [0], axis=axis) - np.take(deltas, [0], axis=axis)
last = np.take(coord, [-1], axis=axis) + np.take(deltas, [-1], axis=axis)
trim_last = tuple(
slice(None, -1) if n == axis else slice(None) for n in range(coord.ndim)
)
interval_breaks = np.concatenate(
[first, coord[trim_last] + deltas, last], axis=axis
)
if scale == "log":
# Recovert the intervals into the linear space
return np.power(10, interval_breaks)
return interval_breaks
|
def _infer_interval_breaks(coord, axis=0, scale=None, check_monotonic=False):
"""
>>> _infer_interval_breaks(np.arange(5))
array([-0.5, 0.5, 1.5, 2.5, 3.5, 4.5])
>>> _infer_interval_breaks([[0, 1], [3, 4]], axis=1)
array([[-0.5, 0.5, 1.5],
[ 2.5, 3.5, 4.5]])
>>> _infer_interval_breaks(np.logspace(-2, 2, 5), scale="log")
array([3.16227766e-03, 3.16227766e-02, 3.16227766e-01, 3.16227766e+00,
3.16227766e+01, 3.16227766e+02])
"""
coord = np.asarray(coord)
if check_monotonic and not _is_monotonic(coord, axis=axis):
raise ValueError(
"The input coordinate is not sorted in increasing "
"order along axis %d. This can lead to unexpected "
"results. Consider calling the `sortby` method on "
"the input DataArray. To plot data with categorical "
"axes, consider using the `heatmap` function from "
"the `seaborn` statistical plotting library." % axis
)
# If logscale, compute the intervals in the logarithmic space
if scale == "log":
coord = np.log10(coord)
deltas = 0.5 * np.diff(coord, axis=axis)
if deltas.size == 0:
deltas = np.array(0.0)
first = np.take(coord, [0], axis=axis) - np.take(deltas, [0], axis=axis)
last = np.take(coord, [-1], axis=axis) + np.take(deltas, [-1], axis=axis)
trim_last = tuple(
slice(None, -1) if n == axis else slice(None) for n in range(coord.ndim)
)
interval_breaks = np.concatenate(
[first, coord[trim_last] + deltas, last], axis=axis
)
if scale == "log":
# Recovert the intervals into the linear space
return np.power(10, interval_breaks)
return interval_breaks
|
29,015 |
def _receive_socket_data(host: str, sock: socket.socket) -> Optional[bytes]:
server_address = (host, SQL_BROWSER_DEFAULT_PORT)
# The message is a CLNT_UCAST_EX packet to get all instances
# https://msdn.microsoft.com/en-us/library/cc219745.aspx
message = "\x03"
# Encode the message as a bytes array
message = message.encode()
# send data and receive response
try:
logger.info(f"Sending message to requested host: {host}, {message}")
sock.sendto(message, server_address)
data, _ = sock.recvfrom(_BUFFER_SIZE)
return data
except socket.timeout as err:
logger.debug(
f"Socket timeout reached, maybe browser service on host: {host} doesnt " "exist"
)
sock.close()
raise err
except socket.error as err:
if err.errno == errno.ECONNRESET:
error_message = (
f"Connection was forcibly closed by the remote host. The host: {host} is "
"rejecting the packet."
)
else:
error_message = (
"An unknown socket error occurred while trying the mssql fingerprint, "
"closing socket."
)
sock.close()
raise err(error_message)
|
def _query_mssql_for_instance_data(host: str, sock: socket.socket) -> Optional[bytes]:
server_address = (host, SQL_BROWSER_DEFAULT_PORT)
# The message is a CLNT_UCAST_EX packet to get all instances
# https://msdn.microsoft.com/en-us/library/cc219745.aspx
message = "\x03"
# Encode the message as a bytes array
message = message.encode()
# send data and receive response
try:
logger.info(f"Sending message to requested host: {host}, {message}")
sock.sendto(message, server_address)
data, _ = sock.recvfrom(_BUFFER_SIZE)
return data
except socket.timeout as err:
logger.debug(
f"Socket timeout reached, maybe browser service on host: {host} doesnt " "exist"
)
sock.close()
raise err
except socket.error as err:
if err.errno == errno.ECONNRESET:
error_message = (
f"Connection was forcibly closed by the remote host. The host: {host} is "
"rejecting the packet."
)
else:
error_message = (
"An unknown socket error occurred while trying the mssql fingerprint, "
"closing socket."
)
sock.close()
raise err(error_message)
|
43,623 |
def grad(func, argnum=None):
"""Returns the gradient as a callable function of (functions of) QNodes.
This is a wrapper around the :mod:`autograd.grad` functions.
Function arguments with the property ``requires_grad`` set to ``False``
will automatically be excluded from the gradient computation, unless
the ``argnum`` keyword argument is passed.
Args:
func (function): a Python function or QNode that contains
a combination of quantum and classical nodes
Keyword Args:
argnum (int or list(int)): Which argument(s) to take the gradient
with respect to. By default, the arguments themselves are used
to determine differentiability, by examining the ``requires_grad``
property. Providing this keyword argument overrides this behaviour,
allowing differentiability to be set manually.
Returns:
function: the function that returns the gradient of the input
function with respect to the arguments in argnum
"""
# pylint: disable=no-value-for-parameter
if argnum is not None:
# for backwards compatibility with existing code
# that manually specifies argnum
return _grad(func, argnum)
def _gradient_function(*args, **kwargs):
"""Inspect the arguments for differentiability, and
compute the autograd gradient function with required argnums
dynamically"""
argnum = []
for idx, arg in enumerate(args):
if not getattr(arg, "requires_grad", True):
continue
argnum.append(idx)
return _grad(func, argnum)(*args, **kwargs)
return _gradient_function
|
def grad(func, argnum=None):
"""Returns the gradient as a callable function of (functions of) QNodes.
This is a wrapper around the :mod:`autograd.grad` function.
Function arguments with the property ``requires_grad`` set to ``False``
will automatically be excluded from the gradient computation, unless
the ``argnum`` keyword argument is passed.
Args:
func (function): a Python function or QNode that contains
a combination of quantum and classical nodes
Keyword Args:
argnum (int or list(int)): Which argument(s) to take the gradient
with respect to. By default, the arguments themselves are used
to determine differentiability, by examining the ``requires_grad``
property. Providing this keyword argument overrides this behaviour,
allowing differentiability to be set manually.
Returns:
function: the function that returns the gradient of the input
function with respect to the arguments in argnum
"""
# pylint: disable=no-value-for-parameter
if argnum is not None:
# for backwards compatibility with existing code
# that manually specifies argnum
return _grad(func, argnum)
def _gradient_function(*args, **kwargs):
"""Inspect the arguments for differentiability, and
compute the autograd gradient function with required argnums
dynamically"""
argnum = []
for idx, arg in enumerate(args):
if not getattr(arg, "requires_grad", True):
continue
argnum.append(idx)
return _grad(func, argnum)(*args, **kwargs)
return _gradient_function
|
7,433 |
def stain_color_matrix(colors):
"""Creates a stain color matrix for a combination of stains.
This routine knows some common stains, their colors are taken from
other tools implementing stain unmixing, but will likely not exactly
match the colors of the stains in your image. This is because the
color of a stain depends on many factors, including the chemistry,
the microscope light source, and the RGB camera capturing the image.
It is always best to measure your stain colors.
Known stains are:
"Hematoxylin"
"Eosin"
"DAB"
"AEC"
"Alcian Blue"
"Aniline Blue"
"Azocarmine"
"FastBlue"
"FastRed"
"Feulgen"
"Light Green"
"Methyl Blue"
"Methyl Green"
"Orange-G"
"PAS"
"Ponceau Fuchsin"
See separate_stains() and combine_stains().
Parameters
----------
colors : iterable with 1 to 3 elements. Each element must be either a
string for a known stain name (see below) or an RGB triplet in the
form of an iterable.
Returns
-------
out : (..., 3) ndarray
The stain color matrix, an Nx3 matrix, where N is the length of the
input `colors`.
Raises
------
ValueError
If `colors` contains an unknown stain name or an illegal RGB triplet,
or if `colors` is empty or has more than 3 elements.
References
----------
.. [1] https://web.archive.org/web/20160624145052/http://www.mecourse.com/landinig/software/cdeconv/cdeconv.html
"""
# Following matrices are adapted form the Java code written by G.Landini.
# https://web.archive.org/web/20160624145052/http://www.mecourse.com/landinig/software/cdeconv/cdeconv.html
# Similar values can be found in CellProfiler:
# https://github.com/CellProfiler/CellProfiler/blob/master/cellprofiler/modules/unmixcolors.py
stain_colors = {
"Hematoxylin": (0.650, 0.704, 0.286),
"Eosin": (0.092789, 0.954111, 0.283111),
"DAB": (0.268, 0.570, 0.776),
"AEC": (0.2743, 0.6796, 0.6803),
"Alcian Blue": (0.552556, 0.7544, 0.353744),
"Aniline Blue": (0.853033, 0.508733, 0.112656),
"Azocarmine": (0.09289875, 0.8662008, 0.49098468),
"FastBlue": (0.74890292, 0.60624161, 0.26731082),
"FastRed": (0.21393921, 0.85112669, 0.47794022),
"Feulgen": (0.46420921, 0.83008335, 0.30827187),
"Light Green": (0.94705542, 0.25373821, 0.19650764),
"Methyl Blue": (0.7995107, 0.5913521, 0.10528667),
"Methyl Green": (0.98003, 0.144316, 0.133146),
"Orange-G": (0.10732849, 0.36765403, 0.9237484),
"PAS": (0.175411, 0.972178, 0.154589),
"Ponceau Fuchsin": (0.09997159, 0.73738605, 0.6680326),
}
N = len(colors)
if N < 1 or N > 3:
msg = (f'the input `colors` must have between 1 and 3 elements, got {N}')
raise ValueError(msg)
out = np.zeros((N, 3))
for ii, val in enumerate(colors):
if isinstance(val, str):
if not val in stain_colors:
msg = (f'the input `colors` contains {val}, which I do not recognize as a stain')
raise ValueError(msg)
val = stain_colors[val]
else:
if len(val) != 3 or not all(isinstance(v, float) for v in val):
msg = (f'the input `colors` contains {val}, which is not an RGB triplet')
raise ValueError(msg)
norm = np.linalg.norm(val)
val = [v / norm for v in val]
out[ii, :] = val
return out
|
def stain_color_matrix(colors):
"""Creates a stain color matrix for a combination of stains.
This routine knows some common stains, their colors are taken from
other tools implementing stain unmixing, but will likely not exactly
match the colors of the stains in your image. This is because the
color of a stain depends on many factors, including the chemistry,
the microscope light source, and the RGB camera capturing the image.
It is always best to measure your stain colors.
Known stains are:
"Hematoxylin"
"Eosin"
"DAB"
"AEC"
"Alcian Blue"
"Aniline Blue"
"Azocarmine"
"FastBlue"
"FastRed"
"Feulgen"
"Light Green"
"Methyl Blue"
"Methyl Green"
"Orange-G"
"PAS"
"Ponceau Fuchsin"
See separate_stains() and combine_stains().
Parameters
----------
colors : iterable with 1 to 3 elements. Each element must be either a
string for a known stain name (see below) or an RGB triplet in the
form of an iterable.
Returns
-------
out : (..., 3) ndarray
The stain color matrix, an Nx3 matrix, where N is the length of the
input `colors`.
Raises
------
ValueError
If `colors` contains an unknown stain name or an illegal RGB triplet,
or if `colors` is empty or has more than 3 elements.
References
----------
.. [1] https://web.archive.org/web/20160624145052/http://www.mecourse.com/landinig/software/cdeconv/cdeconv.html
"""
# Following matrices are adapted form the Java code written by G.Landini.
# https://web.archive.org/web/20160624145052/http://www.mecourse.com/landinig/software/cdeconv/cdeconv.html
# Similar values can be found in CellProfiler:
# https://github.com/CellProfiler/CellProfiler/blob/master/cellprofiler/modules/unmixcolors.py
stain_colors = {
"Hematoxylin": (0.650, 0.704, 0.286),
"Eosin": (0.092789, 0.954111, 0.283111),
"DAB": (0.268, 0.570, 0.776),
"AEC": (0.2743, 0.6796, 0.6803),
"Alcian Blue": (0.552556, 0.7544, 0.353744),
"Aniline Blue": (0.853033, 0.508733, 0.112656),
"Azocarmine": (0.09289875, 0.8662008, 0.49098468),
"FastBlue": (0.74890292, 0.60624161, 0.26731082),
"FastRed": (0.21393921, 0.85112669, 0.47794022),
"Feulgen": (0.46420921, 0.83008335, 0.30827187),
"Light Green": (0.94705542, 0.25373821, 0.19650764),
"Methyl Blue": (0.7995107, 0.5913521, 0.10528667),
"Methyl Green": (0.98003, 0.144316, 0.133146),
"Orange-G": (0.10732849, 0.36765403, 0.9237484),
"PAS": (0.175411, 0.972178, 0.154589),
"Ponceau Fuchsin": (0.09997159, 0.73738605, 0.6680326),
}
N = len(colors)
if N < 1 or N > 3:
msg = (f'the input `colors` must have between 1 and 3 elements, got {N}')
raise ValueError(msg)
out = np.zeros((N, 3))
for ii, val in enumerate(colors):
if isinstance(val, str):
if not val in stain_colors:
msg = (f'the input `colors` contains {val}, which is not a known stain')
raise ValueError(msg)
val = stain_colors[val]
else:
if len(val) != 3 or not all(isinstance(v, float) for v in val):
msg = (f'the input `colors` contains {val}, which is not an RGB triplet')
raise ValueError(msg)
norm = np.linalg.norm(val)
val = [v / norm for v in val]
out[ii, :] = val
return out
|
47,504 |
def is_unusual(example, min=4):
"""Check if file is unusual, uses symbol '=' less than min times"""
lines = example["content"].splitlines()
counter = 0
for line in lines:
counter += line.lower().count("=")
if counter > min:
return {"unusual": False}
return {"unusual": True}
|
def is_unusual(example, min=4):
"""Check if file uses symbol '=' less than `min` times."""
lines = example["content"].splitlines()
counter = 0
for line in lines:
counter += line.lower().count("=")
if counter > min:
return {"unusual": False}
return {"unusual": True}
|
27,914 |
def clip(a, a_min, a_max):
"""Clips the values of an array to a given interval.
Given an interval, values outside the interval are clipped to the
interval edges. For example, if an interval of ``[0, 1]`` is specified,
values smaller than 0 become 0, and values larger than 1 become 1.
Args:
a (~chainerx.ndarray): Array containing elements to clip.
a_min (scalar): Maximum value.
a_max (scalar): Minimum value.
Returns:
~chainerx.ndarray: An array with the elements of ``a``, but where
values < ``a_min`` are replaced with ``a_min``,
and those > ``a_max`` with ``a_max``.
Note:
The :class:`~chainerx.ndarray` typed ``a_min`` and ``a_max`` are
not supported yet.
Note:
During backpropagation, this function propagates the gradient
of the output array to the input array ``a``.
.. seealso:: :func:`numpy.clip`
"""
if a_min is None:
a_min = a.min()
if a_max is None:
a_max = a.max()
return -chainerx.maximum(-chainerx.maximum(a, a_min), -a_max)
|
def clip(a, a_min, a_max):
"""Clips the values of an array to a given interval.
Given an interval, values outside the interval are clipped to the
interval edges. For example, if an interval of ``[0, 1]`` is specified,
values smaller than 0 become 0, and values larger than 1 become 1.
Args:
a (~chainerx.ndarray): Array containing elements to clip.
a_min (scalar): Maximum value.
a_max (scalar): Minimum value.
Returns:
~chainerx.ndarray: An array with the elements of ``a``, but where
values < ``a_min`` are replaced with ``a_min``,
and those > ``a_max`` with ``a_max``.
Note:
The :class:`~chainerx.ndarray` typed ``a_min`` and ``a_max`` are
not supported yet.
Note:
During backpropagation, this function propagates the gradient
of the output array to the input array ``a``.
.. seealso:: :func:`numpy.clip`
"""
if a_min is None:
return chainerx.minimum(a, a_max)
if a_max is None:
a_max = a.max()
return -chainerx.maximum(-chainerx.maximum(a, a_min), -a_max)
|
56,393 |
def add_arguments_to_parser(parser):
"""
Add the subcommand's arguments to the given argparse.ArgumentParser.
"""
parser.add_argument('input',
type=str,
nargs='+',
metavar='file/folder',
help="The analysis result files and/or folders "
"containing analysis results which should be "
"parsed and printed.")
parser.add_argument('--config',
dest='config_file',
required=False,
help="R|Allow the configuration from an "
"explicit JSON based configuration file. "
"The value of the 'parse' key in the "
"config file will be emplaced as command "
"line arguments. The format of "
"configuration file is:\n"
"{\n"
" \"parse\": [\n"
" \"--trim-path-prefix\",\n"
" \"/home/workspace\"\n"
" ]\n"
"}")
parser.add_argument('-t', '--type', '--input-format',
dest="input_format",
required=False,
choices=['plist'],
default='plist',
help="Specify the format the analysis results were "
"created as.")
output_opts = parser.add_argument_group("export arguments")
output_opts.add_argument('-e', '--export',
dest="export",
required=False,
choices=['html', 'json', 'codeclimate'],
help="R|Specify extra output format type.\n"
"'codeclimate' format can be used for "
"Code Climate and for GitLab integration. "
"For more information see:\n"
"https://github.com/codeclimate/platform/"
"blob/master/spec/analyzers/SPEC.md"
"#data-types")
output_opts.add_argument('-o', '--output',
dest="output_path",
default=argparse.SUPPRESS,
help="Store the output in the given folder.")
parser.add_argument('--suppress',
type=str,
dest="suppress",
default=argparse.SUPPRESS,
required=False,
help="Path of the suppress file to use. Records in "
"the suppress file are used to suppress the "
"display of certain results when parsing the "
"analyses' report. (Reports to an analysis "
"result can also be suppressed in the source "
"code -- please consult the manual on how to "
"do so.) NOTE: The suppress file relies on the "
"\"bug identifier\" generated by the analyzers "
"which is experimental, take care when relying "
"on it.")
parser.add_argument('--export-source-suppress',
dest="create_suppress",
action="store_true",
required=False,
default=argparse.SUPPRESS,
help="Write suppress data from the suppression "
"annotations found in the source files that were "
"analyzed earlier that created the results. "
"The suppression information will be written "
"to the parameter of '--suppress'.")
parser.add_argument('--print-steps',
dest="print_steps",
action="store_true",
required=False,
default=argparse.SUPPRESS,
help="Print the steps the analyzers took in finding "
"the reported defect.")
parser.add_argument('-i', '--ignore', '--skip',
dest="skipfile",
required=False,
default=argparse.SUPPRESS,
help="Path to the Skipfile dictating which project "
"files should be omitted from analysis. Please "
"consult the User guide on how a Skipfile "
"should be laid out.")
parser.add_argument('--trim-path-prefix',
type=str,
nargs='*',
dest="trim_path_prefix",
required=False,
default=argparse.SUPPRESS,
help="Removes leading path from files which will be "
"printed. So if you have /a/b/c/x.cpp and "
"/a/b/c/y.cpp then by removing \"/a/b/\" prefix "
"will print files like c/x.cpp and c/y.cpp. "
"If multiple prefix is given, the longest match "
"will be removed.")
parser.add_argument('--review-status',
nargs='*',
dest="review_status",
metavar='REVIEW_STATUS',
choices=REVIEW_STATUS_VALUES,
default=["confirmed", "unreviewed"],
help="Filter results by review statuses. Valid "
"values are: {0}".format(
', '.join(REVIEW_STATUS_VALUES)))
logger.add_verbose_arguments(parser)
parser.set_defaults(
func=main, func_process_config_file=cmd_config.process_config_file)
|
def add_arguments_to_parser(parser):
"""
Add the subcommand's arguments to the given argparse.ArgumentParser.
"""
parser.add_argument('input',
type=str,
nargs='+',
metavar='file/folder',
help="The analysis result files and/or folders "
"containing analysis results which should be "
"parsed and printed.")
parser.add_argument('--config',
dest='config_file',
required=False,
help="R|Allow the configuration from an "
"explicit JSON based configuration file. "
"The value of the 'parse' key in the "
"config file will be emplaced as command "
"line arguments. The format of "
"configuration file is:\n"
"{\n"
" \"parse\": [\n"
" \"--trim-path-prefix\",\n"
" \"/home/workspace\"\n"
" ]\n"
"}")
parser.add_argument('-t', '--type', '--input-format',
dest="input_format",
required=False,
choices=['plist'],
default='plist',
help="Specify the format the analysis results were "
"}.\n"
"You can use any environment variable "
"inside this file and it will be "
"expaneded.")
output_opts = parser.add_argument_group("export arguments")
output_opts.add_argument('-e', '--export',
dest="export",
required=False,
choices=['html', 'json', 'codeclimate'],
help="R|Specify extra output format type.\n"
"'codeclimate' format can be used for "
"Code Climate and for GitLab integration. "
"For more information see:\n"
"https://github.com/codeclimate/platform/"
"blob/master/spec/analyzers/SPEC.md"
"#data-types")
output_opts.add_argument('-o', '--output',
dest="output_path",
default=argparse.SUPPRESS,
help="Store the output in the given folder.")
parser.add_argument('--suppress',
type=str,
dest="suppress",
default=argparse.SUPPRESS,
required=False,
help="Path of the suppress file to use. Records in "
"the suppress file are used to suppress the "
"display of certain results when parsing the "
"analyses' report. (Reports to an analysis "
"result can also be suppressed in the source "
"code -- please consult the manual on how to "
"do so.) NOTE: The suppress file relies on the "
"\"bug identifier\" generated by the analyzers "
"which is experimental, take care when relying "
"on it.")
parser.add_argument('--export-source-suppress',
dest="create_suppress",
action="store_true",
required=False,
default=argparse.SUPPRESS,
help="Write suppress data from the suppression "
"annotations found in the source files that were "
"analyzed earlier that created the results. "
"The suppression information will be written "
"to the parameter of '--suppress'.")
parser.add_argument('--print-steps',
dest="print_steps",
action="store_true",
required=False,
default=argparse.SUPPRESS,
help="Print the steps the analyzers took in finding "
"the reported defect.")
parser.add_argument('-i', '--ignore', '--skip',
dest="skipfile",
required=False,
default=argparse.SUPPRESS,
help="Path to the Skipfile dictating which project "
"files should be omitted from analysis. Please "
"consult the User guide on how a Skipfile "
"should be laid out.")
parser.add_argument('--trim-path-prefix',
type=str,
nargs='*',
dest="trim_path_prefix",
required=False,
default=argparse.SUPPRESS,
help="Removes leading path from files which will be "
"printed. So if you have /a/b/c/x.cpp and "
"/a/b/c/y.cpp then by removing \"/a/b/\" prefix "
"will print files like c/x.cpp and c/y.cpp. "
"If multiple prefix is given, the longest match "
"will be removed.")
parser.add_argument('--review-status',
nargs='*',
dest="review_status",
metavar='REVIEW_STATUS',
choices=REVIEW_STATUS_VALUES,
default=["confirmed", "unreviewed"],
help="Filter results by review statuses. Valid "
"values are: {0}".format(
', '.join(REVIEW_STATUS_VALUES)))
logger.add_verbose_arguments(parser)
parser.set_defaults(
func=main, func_process_config_file=cmd_config.process_config_file)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.