id
int64 11
59.9k
| original
stringlengths 33
150k
| modified
stringlengths 37
150k
|
---|---|---|
10,112 |
def deprecation_warning(module):
deprecated_aliases = ['private_token', 'access_token']
for aliase in deprecated_aliases:
if aliase in module.params:
module.deprecate("Aliases \'{aliase}\' is deprecated".format(aliase=aliase), "2.10")
|
def deprecation_warning(module):
deprecated_aliases = ['private_token', 'access_token']
for aliase in deprecated_aliases:
if aliase in module.params:
module.deprecate("Alias \'{aliase}\' is deprecated".format(aliase=aliase), "2.10")
|
17,943 |
def require(filename: str) -> List[str]:
"""
Allows for composable requirement files with the `-r filename` flag.
"""
reqs = open(f"requirements/{filename}").readlines()
pattern = re.compile(r"^\s*-r\s*(?P<filename>.*)$")
for req in reqs:
match = pattern.match(req)
if match:
reqs.remove(req)
reqs.extend(require(match.group("filename")))
return reqs
|
def load_requirements_from_file(filename: str) -> List[str]:
"""
Allows for composable requirement files with the `-r filename` flag.
"""
reqs = open(f"requirements/{filename}").readlines()
pattern = re.compile(r"^\s*-r\s*(?P<filename>.*)$")
for req in reqs:
match = pattern.match(req)
if match:
reqs.remove(req)
reqs.extend(require(match.group("filename")))
return reqs
|
35,664 |
def train_one_epoch(model, optimizer, data_loader, device, epoch, print_freq, scaler=None):
model.train()
metric_logger = utils.MetricLogger(delimiter=" ")
metric_logger.add_meter("lr", utils.SmoothedValue(window_size=1, fmt="{value:.6f}"))
header = f"Epoch: [{epoch}]"
lr_scheduler = None
if epoch == 0:
warmup_factor = 1.0 / 1000
warmup_iters = min(1000, len(data_loader) - 1)
lr_scheduler = torch.optim.lr_scheduler.LinearLR(
optimizer, start_factor=warmup_factor, total_iters=warmup_iters
)
for images, targets in metric_logger.log_every(data_loader, print_freq, header):
images = list(image.to(device) for image in images)
targets = [{k: v.to(device) for k, v in t.items()} for t in targets]
with torch.cuda.amp.autocast(enabled=scaler is not None):
loss_dict = model(images, targets)
losses = sum(loss for loss in loss_dict.values())
# reduce losses over all GPUs for logging purposes
loss_dict_reduced = utils.reduce_dict(loss_dict)
losses_reduced = sum(loss for loss in loss_dict_reduced.values())
loss_value = losses_reduced.item()
if not math.isfinite(loss_value):
print(f"Loss is {loss_value}, stopping training")
print(loss_dict_reduced)
sys.exit(1)
optimizer.zero_grad()
if scaler:
scaler.scale(losses).backward()
scaler.step(optimizer)
scaler.update()
else:
losses.backward()
optimizer.step()
if lr_scheduler is not None:
lr_scheduler.step()
metric_logger.update(loss=losses_reduced, **loss_dict_reduced)
metric_logger.update(lr=optimizer.param_groups[0]["lr"])
return metric_logger
|
def train_one_epoch(model, optimizer, data_loader, device, epoch, print_freq, scaler=None):
model.train()
metric_logger = utils.MetricLogger(delimiter=" ")
metric_logger.add_meter("lr", utils.SmoothedValue(window_size=1, fmt="{value:.6f}"))
header = f"Epoch: [{epoch}]"
lr_scheduler = None
if epoch == 0:
warmup_factor = 1.0 / 1000
warmup_iters = min(1000, len(data_loader) - 1)
lr_scheduler = torch.optim.lr_scheduler.LinearLR(
optimizer, start_factor=warmup_factor, total_iters=warmup_iters
)
for images, targets in metric_logger.log_every(data_loader, print_freq, header):
images = list(image.to(device) for image in images)
targets = [{k: v.to(device) for k, v in t.items()} for t in targets]
with torch.cuda.amp.autocast(enabled=scaler is not None):
loss_dict = model(images, targets)
losses = sum(loss for loss in loss_dict.values())
# reduce losses over all GPUs for logging purposes
loss_dict_reduced = utils.reduce_dict(loss_dict)
losses_reduced = sum(loss for loss in loss_dict_reduced.values())
loss_value = losses_reduced.item()
if not math.isfinite(loss_value):
print(f"Loss is {loss_value}, stopping training")
print(loss_dict_reduced)
sys.exit(1)
optimizer.zero_grad()
if scaler is not None:
scaler.scale(losses).backward()
scaler.step(optimizer)
scaler.update()
else:
losses.backward()
optimizer.step()
if lr_scheduler is not None:
lr_scheduler.step()
metric_logger.update(loss=losses_reduced, **loss_dict_reduced)
metric_logger.update(lr=optimizer.param_groups[0]["lr"])
return metric_logger
|
50,323 |
def test_list_user_starred_projects(gl, resp_starred_projects):
starred_projects = gl.users.get(1).starred_projects.list()[0]
assert isinstance(starred_projects, StarredProject)
assert starred_projects.name == "name"
assert starred_projects.id == 1
|
def test_list_user_starred_projects(user, resp_starred_projects):
starred_projects = user.starred_projects.list()[0]
assert isinstance(starred_projects, StarredProject)
assert starred_projects.name == "name"
assert starred_projects.id == 1
|
48,394 |
def ismount(path):
"""python issue 2466 is fixed then ismount() was rewritten"""
# check python version
current_version = sys.version_info
if (current_version.major == 3 and 5 <= current_version.minor <= 6) or (current_version.major == 2 and current_version.minor == 7):
# clone upstream version of ismount() from Cpython
"""Test whether a path is a mount point"""
try:
s1 = os.lstat(path)
except (OSError, ValueError):
# It doesn't exist -- so not a mount point. :-)
return False
else:
# A symlink can never be a mount point
if os.path.stat.S_ISLNK(s1.st_mode):
return False
if isinstance(path, bytes):
parent = os.path.join(path, b'..')
else:
parent = os.path.join(path, '..')
parent = os.path.realpath(parent)
try:
s2 = os.lstat(parent)
except (OSError, ValueError):
return False
dev1 = s1.st_dev
dev2 = s2.st_dev
if dev1 != dev2:
return True # path/.. on a different device as path
ino1 = s1.st_ino
ino2 = s2.st_ino
if ino1 == ino2:
return True # path/.. is the same i-node as path
return False
else:
try:
s1 = os.lstat(path)
except OSError:
# the OSError should be handled with more care
# it could be a "permission denied" but path is still a mount
return False
else:
# A symlink can never be a mount point
if os.path.stat.S_ISLNK(s1.st_mode):
return False
parent = os.path.join(path, os.path.pardir)
parent = os.path.realpath(parent)
try:
s2 = os.lstat(parent)
except OSError:
# one should handle the returned OSError with more care to figure
# out whether this is still a mount
return False
if s1.st_dev != s2.st_dev:
return True # path/.. on a different device as path
if s1.st_ino == s2.st_ino:
return True # path/.. is the same i-node as path, i.e. path=='/'
return False
|
def ismount(path):
"""Test whether a path is a mount point
This is a copy of the upstream version of ismount(). Originally this was copied here as a workaround
until Python issue 2466 was fixed. Now it is here so this will work on older versions of Python
that may not have the upstream fix.
https://github.com/ansible/ansible-modules-core/issues/2186
http://bugs.python.org/issue2466
"""
# check python version
current_version = sys.version_info
if (current_version.major == 3 and 5 <= current_version.minor <= 6) or (current_version.major == 2 and current_version.minor == 7):
# clone upstream version of ismount() from Cpython
"""Test whether a path is a mount point"""
try:
s1 = os.lstat(path)
except (OSError, ValueError):
# It doesn't exist -- so not a mount point. :-)
return False
else:
# A symlink can never be a mount point
if os.path.stat.S_ISLNK(s1.st_mode):
return False
if isinstance(path, bytes):
parent = os.path.join(path, b'..')
else:
parent = os.path.join(path, '..')
parent = os.path.realpath(parent)
try:
s2 = os.lstat(parent)
except (OSError, ValueError):
return False
dev1 = s1.st_dev
dev2 = s2.st_dev
if dev1 != dev2:
return True # path/.. on a different device as path
ino1 = s1.st_ino
ino2 = s2.st_ino
if ino1 == ino2:
return True # path/.. is the same i-node as path
return False
else:
try:
s1 = os.lstat(path)
except OSError:
# the OSError should be handled with more care
# it could be a "permission denied" but path is still a mount
return False
else:
# A symlink can never be a mount point
if os.path.stat.S_ISLNK(s1.st_mode):
return False
parent = os.path.join(path, os.path.pardir)
parent = os.path.realpath(parent)
try:
s2 = os.lstat(parent)
except OSError:
# one should handle the returned OSError with more care to figure
# out whether this is still a mount
return False
if s1.st_dev != s2.st_dev:
return True # path/.. on a different device as path
if s1.st_ino == s2.st_ino:
return True # path/.. is the same i-node as path, i.e. path=='/'
return False
|
44,826 |
def build_docker_image(work_dir, repository_uri, base_image, run_id, user_env_vars=None):
"""
Build a docker image containing the project in `work_dir`, using the base image.
"""
image_uri = _get_docker_image_uri(repository_uri=repository_uri, work_dir=work_dir)
envs = ""
if user_env_vars:
envs = "ENV " + " ".join([
f"{k}={v}" for k, v in _parse_user_env_vars(user_env_vars).items()])
dockerfile = (
"FROM {imagename}\n"
"{envs}\n"
"COPY {build_context_path}/ {workdir}\n"
"WORKDIR {workdir}\n"
).format(
imagename=base_image,
envs=envs,
build_context_path=_PROJECT_TAR_ARCHIVE_NAME,
workdir=MLFLOW_DOCKER_WORKDIR_PATH,
)
build_ctx_path = _create_docker_build_ctx(work_dir, dockerfile)
with open(build_ctx_path, "rb") as docker_build_ctx:
_logger.info("=== Building docker image %s ===", image_uri)
client = docker.from_env()
image, _ = client.images.build(
tag=image_uri,
forcerm=True,
dockerfile=posixpath.join(_PROJECT_TAR_ARCHIVE_NAME, _GENERATED_DOCKERFILE_NAME),
fileobj=docker_build_ctx,
custom_context=True,
encoding="gzip",
)
try:
os.remove(build_ctx_path)
except Exception:
_logger.info("Temporary docker context file %s was not deleted.", build_ctx_path)
tracking.MlflowClient().set_tag(run_id, MLFLOW_DOCKER_IMAGE_URI, image_uri)
tracking.MlflowClient().set_tag(run_id, MLFLOW_DOCKER_IMAGE_ID, image.id)
return image
|
def build_docker_image(work_dir, repository_uri, base_image, run_id, env_vars=None):
"""
Build a docker image containing the project in `work_dir`, using the base image.
"""
image_uri = _get_docker_image_uri(repository_uri=repository_uri, work_dir=work_dir)
envs = ""
if user_env_vars:
envs = "ENV " + " ".join([
f"{k}={v}" for k, v in _parse_user_env_vars(user_env_vars).items()])
dockerfile = (
"FROM {imagename}\n"
"{envs}\n"
"COPY {build_context_path}/ {workdir}\n"
"WORKDIR {workdir}\n"
).format(
imagename=base_image,
envs=envs,
build_context_path=_PROJECT_TAR_ARCHIVE_NAME,
workdir=MLFLOW_DOCKER_WORKDIR_PATH,
)
build_ctx_path = _create_docker_build_ctx(work_dir, dockerfile)
with open(build_ctx_path, "rb") as docker_build_ctx:
_logger.info("=== Building docker image %s ===", image_uri)
client = docker.from_env()
image, _ = client.images.build(
tag=image_uri,
forcerm=True,
dockerfile=posixpath.join(_PROJECT_TAR_ARCHIVE_NAME, _GENERATED_DOCKERFILE_NAME),
fileobj=docker_build_ctx,
custom_context=True,
encoding="gzip",
)
try:
os.remove(build_ctx_path)
except Exception:
_logger.info("Temporary docker context file %s was not deleted.", build_ctx_path)
tracking.MlflowClient().set_tag(run_id, MLFLOW_DOCKER_IMAGE_URI, image_uri)
tracking.MlflowClient().set_tag(run_id, MLFLOW_DOCKER_IMAGE_ID, image.id)
return image
|
31,458 |
def site_lookup(params):
a = "http://api.screenshotmachine.com"
r = requests.get(a, params=params, allow_redirects=True)
# demisto.results(str(r.status_code))
if r.status_code < 200 or r.status_code >= 300:
return_error_and_exit(
'Failed to update Content.\nURL: {}, Status Code: {}, Response: {}'.format(a, r.status_code, r.text))
return r
|
def site_lookup(params):
a = "http://api.screenshotmachine.com"
r = requests.get(a, params=params, allow_redirects=True)
# demisto.results(str(r.status_code))
if r.status_code < 200 or r.status_code >= 300:
return_error(
'Failed to update Content.\nURL: {}, Status Code: {}, Response: {}'.format(a, r.status_code, r.text))
return r
|
31,355 |
def get_policy_command(client: Client, args: dict):
policy_id = args.get('policyId')
headers = ["id", "description", "name", "latestRevision", "version", "priorityLevel", "systemPolicy"]
if not policy_id:
return "Missing required arguments."
res = client.get_policy_by_id(policy_id)
policy_info = dict(res.get('policyInfo'))
if not policy_info:
return "Policy not found."
del policy_info['policy']
policy_info['latestRevision'] = timestamp_to_datestring(policy_info['latestRevision'])
readable_output = tableToMarkdown('Carbon Black Defense Policy',
policy_info,
headers=headers,
headerTransform=pascalToSpace,
removeNull=True)
return CommandResults(
outputs_prefix='CarbonBlackDefense.Policy',
outputs_key_field='id',
outputs=res.get('policyInfo'),
readable_output=readable_output,
raw_response=res
)
|
def get_policy_command(client: Client, args: dict):
policy_id = args.get('policyId')
headers = ["id", "description", "name", "latestRevision", "version", "priorityLevel", "systemPolicy"]
if not policy_id:
return "Missing required arguments."
res = client.get_policy_by_id(policy_id)
policy_info = dict(res.get('policyInfo'))
if not policy_info:
return "Policy not found."
del policy_info['policy']
policy_info['latestRevision'] = timestamp_to_datestring(policy_info.get('latestRevision', ''))
readable_output = tableToMarkdown('Carbon Black Defense Policy',
policy_info,
headers=headers,
headerTransform=pascalToSpace,
removeNull=True)
return CommandResults(
outputs_prefix='CarbonBlackDefense.Policy',
outputs_key_field='id',
outputs=res.get('policyInfo'),
readable_output=readable_output,
raw_response=res
)
|
10,829 |
def remove_unnecessary_nrt_usage(function, context, fndesc):
"""
Remove unnecessary NRT incref/decref in the given LLVM function.
It uses highlevel type info to determine if the function does not need NRT.
Such a function does not:
- return array object;
- take arguments that need refcount except array;
- call function that return refcounted object.
In effect, the function will not capture or create references that extend
the lifetime of any refcounted objects beyound the lifetime of the
function.
The rewrite performs inplace.
If rewrite has happen, this function return True. Otherwise, return False.
"""
dmm = context.data_model_manager
if _legalize(function.module, dmm, fndesc):
_rewrite_function(function)
return True
else:
return False
|
def remove_unnecessary_nrt_usage(function, context, fndesc):
"""
Remove unnecessary NRT incref/decref in the given LLVM function.
It uses highlevel type info to determine if the function does not need NRT.
Such a function does not:
- return array object(s);
- take arguments that need refcount except array;
- call function that return refcounted object.
In effect, the function will not capture or create references that extend
the lifetime of any refcounted objects beyound the lifetime of the
function.
The rewrite performs inplace.
If rewrite has happen, this function return True. Otherwise, return False.
"""
dmm = context.data_model_manager
if _legalize(function.module, dmm, fndesc):
_rewrite_function(function)
return True
else:
return False
|
45,510 |
def get_channels_data(api_token: str) -> typing.List[typing.Mapping[str, str]]:
"""
Returns a dictionary with channel_name and channel_id of non archived
public channels.
"""
client = get_client(api_token)
response = client.conversations_list(exclude_archived=True)
conversations = response["channels"]
channel_data = [
{"channel_name": channel["name"], "channel_id": channel["id"]}
for channel in conversations
]
return channel_data
|
def get_channels_data(api_token: str) -> typing.List[typing.Mapping[str, str]]:
"""
Returns a dictionary with channel_name and channel_id of non archived
public channels.
"""
client = get_client(api_token)
response = client.conversations_list(exclude_archived=True)
channels = response["channels"]
channel_data = [
{"channel_name": channel["name"], "channel_id": channel["id"]}
for channel in conversations
]
return channel_data
|
27,736 |
def locate_config(
args: Iterable[Union[str, Path]],
) -> Tuple[
Optional[Path], Optional[Path], Dict[str, Union[str, List[str]]],
]:
"""Search in the list of arguments for a valid ini-file for pytest,
and return a tuple of (rootdir, inifile, cfg-dict)."""
config_names = [
"pytest.ini",
"pyproject.toml",
"tox.ini",
"setup.cfg",
]
args = [x for x in args if not str(x).startswith("-")]
if not args:
args = [Path.cwd()]
for arg in args:
argpath = Path(arg).resolve()
for base in itertools.chain((argpath,), reversed(argpath.parents)):
for config_name in config_names:
p = base / config_name
if p.is_file():
ini_config = load_config_dict_from_file(p)
if ini_config is not None:
return base, p, ini_config
return None, None, {}
|
def locate_config(
args: Iterable[Union[str, Path]],
) -> Tuple[
Optional[Path], Optional[Path], Dict[str, Union[str, List[str]]],
]:
"""Search in the list of arguments for a valid ini-file for pytest,
and return a tuple of (rootdir, inifile, cfg-dict)."""
config_names = [
"pytest.ini",
"pyproject.toml",
"tox.ini",
"setup.cfg",
]
args = [x for x in args if not str(x).startswith("-")]
if not args:
args = [Path.cwd()]
for arg in args:
argpath = Path(arg).absolute()
for base in itertools.chain((argpath,), reversed(argpath.parents)):
for config_name in config_names:
p = base / config_name
if p.is_file():
ini_config = load_config_dict_from_file(p)
if ini_config is not None:
return base, p, ini_config
return None, None, {}
|
42,935 |
def plot(graph: nx.Graph, subgraph: Optional[list] = None, size: float = 500) -> None:
""" Creates a plot.ly plot of the input graph.
The graph layout is fixed to be the Kamada-Kawai layout with an aspect ratio of 1:1. The
function can plot just the input graph or the graph with a specified subgraph highlighted.
The function uses the standard colour theme of green nodes, grey edges, and red highlighted
subgraph.
**Example usage**:
>>> graph = nx.complete_graph(10)
>>> fig = plot(graph, [0, 1, 2, 3])
>>> fig.show()
Args:
graph (nx.Graph): input graph
subgraph (list): list of nodes comprising the subgraph to highlight
size (dict): size of the plot
Returns:
plot.ly graph object
"""
s = graph.subgraph(subgraph)
l = nx.kamada_kawai_layout(graph)
g_nodes = go.Scatter(
**_node_coords(graph, l),
mode='markers',
hoverinfo='text',
marker=dict(color=graph_node_colour, size=graph_node_size, line_width=2)
)
g_edges = go.Scatter(
**edge_coords(graph, l),
line=dict(width=1, color=graph_edge_colour),
hoverinfo='none',
mode='lines'
)
g_nodes.text = [str(i) for i in graph.nodes()]
layout = go.Layout(showlegend=False, hovermode='closest',
xaxis=dict(showgrid=False, zeroline=False, showticklabels=False),
yaxis=dict(showgrid=False, zeroline=False, showticklabels=False),
margin=dict(b=0, l=0, r=0, t=25),
height=size,
width=size,
plot_bgcolor='#ffffff'
)
if subgraph:
s_edges = go.Scatter(
**edge_coords(s, l),
line=dict(width=2, color=subgraph_edge_colour),
hoverinfo='none',
mode='lines'
)
s_nodes = go.Scatter(
**_node_coords(s, l),
mode='markers',
hoverinfo='text',
marker=dict(color=subgraph_node_colour, size=subgraph_node_size, line_width=2)
)
s_nodes.text = [str(i) for i in s.nodes()]
f = go.Figure(data=[g_edges, s_edges, g_nodes, s_nodes], layout=layout)
else:
f = go.Figure(data=[g_edges, g_nodes], layout=layout)
return f
|
def plot(graph: nx.Graph, subgraph: Optional[list] = None, size: float = 500) -> None:
""" Creates a plot.ly plot of the input graph.
The graph layout is fixed to be the Kamada-Kawai layout with an aspect ratio of 1:1. The
function can plot just the input graph or the graph with a specified subgraph highlighted.
The function uses the standard colour theme of green nodes, grey edges, and red highlighted
subgraph.
**Example usage**:
>>> graph = nx.complete_graph(10)
>>> fig = plot(graph, [0, 1, 2, 3])
>>> fig.show()
Args:
graph (nx.Graph): input graph
subgraph (list): list of nodes comprising the subgraph to highlight
size (float): size of the plot in pixels, rendered in a square layout
Returns:
plot.ly graph object
"""
s = graph.subgraph(subgraph)
l = nx.kamada_kawai_layout(graph)
g_nodes = go.Scatter(
**_node_coords(graph, l),
mode='markers',
hoverinfo='text',
marker=dict(color=graph_node_colour, size=graph_node_size, line_width=2)
)
g_edges = go.Scatter(
**edge_coords(graph, l),
line=dict(width=1, color=graph_edge_colour),
hoverinfo='none',
mode='lines'
)
g_nodes.text = [str(i) for i in graph.nodes()]
layout = go.Layout(showlegend=False, hovermode='closest',
xaxis=dict(showgrid=False, zeroline=False, showticklabels=False),
yaxis=dict(showgrid=False, zeroline=False, showticklabels=False),
margin=dict(b=0, l=0, r=0, t=25),
height=size,
width=size,
plot_bgcolor='#ffffff'
)
if subgraph:
s_edges = go.Scatter(
**edge_coords(s, l),
line=dict(width=2, color=subgraph_edge_colour),
hoverinfo='none',
mode='lines'
)
s_nodes = go.Scatter(
**_node_coords(s, l),
mode='markers',
hoverinfo='text',
marker=dict(color=subgraph_node_colour, size=subgraph_node_size, line_width=2)
)
s_nodes.text = [str(i) for i in s.nodes()]
f = go.Figure(data=[g_edges, s_edges, g_nodes, s_nodes], layout=layout)
else:
f = go.Figure(data=[g_edges, g_nodes], layout=layout)
return f
|
46,531 |
def hash_tree_root(value, typ=None):
if typ is None:
typ = infer_type(value)
chunks = pack_object(value, typ)
if (isinstance(typ, list) and len(typ) == 1) or typ == 'bytes':
return mix_in_length(merkleize(chunks), len(value))
else:
if isinstance(typ, list):
assert len(value) == typ[1]
elif typ[:5] == 'bytes':
assert len(value) == int(typ[5:])
return merkleize(chunks)
|
def hash_tree_root(value, typ=None):
if typ is None:
typ = infer_type(value)
chunks = pack_object(value, typ)
if (isinstance(typ, list) and len(typ) == 1) or typ == 'bytes':
return mix_in_length(merkleize(chunks), len(value))
else:
if isinstance(typ, list):
assert len(value) == typ[1]
elif isinstance(typ, str) and typ[:5] == 'bytes':
assert len(value) == int(typ[5:])
return merkleize(chunks)
|
58,147 |
def make_api_request(url: str, method: str, data: Optional[dict] = None, parameters: Optional[dict] = None) -> Union[requests.Response, Response]:
'''
Make request to api endpoint
Args:
url (str): endpoint to make request
method (str): GET, POST action to perform
data (Optional[Dict]): Body to be sent to service
parameters (Optional[Dict]): Query parameters to be sent to service
Returns:
requests.Response object (For returning raw API response object) or Response object (For handling errors in integration configuration)
'''
http_basic_creds_to_pass = None
headers_to_pass = None
if API_CRED_TYPE:
if API_CRED_TYPE == 'Basic':
http_basic_creds_to_pass = (API_CREDENTIALS.get('identifier'), API_CREDENTIALS.get('password'))
elif API_CRED_TYPE == 'Bearer Token':
headers_to_pass = {
"Authorization": f"Bearer {API_CREDENTIALS.get('password')}"
}
elif API_CRED_TYPE == 'Custom Header':
if not API_CUSTOM_HEADER:
return Response(status_code=200, content="Custom Header is not set in integration configuration and Credential Type is selected as 'Custom Header'")
elif not API_CUSTOM_HEADER_VALUE:
return Response(status_code=200, content="Custom Header value is not set in integration configuration and Credential Type is selected as 'Custom Header'")
headers_to_pass = {
API_CUSTOM_HEADER: API_CUSTOM_HEADER_VALUE
}
# json stringify if dict
isjson = False
if isinstance(data, dict):
# data = json.dumps(data)
isjson = True
if method.upper() == "GET":
if isjson:
response = requests.get(url, json=data, params=parameters, auth=http_basic_creds_to_pass,
headers=headers_to_pass, verify=False)
else:
response = requests.get(url, data=data, params=parameters, auth=http_basic_creds_to_pass,
headers=headers_to_pass, verify=False)
elif method.upper() == "POST":
if isjson:
response = requests.post(url, json=data, params=parameters, auth=http_basic_creds_to_pass,
headers=headers_to_pass, verify=False)
else:
response = requests.post(url, data=data, params=parameters, auth=http_basic_creds_to_pass,
headers=headers_to_pass, verify=False)
elif method.upper() == "PUT":
if isjson:
response = requests.put(url, json=data, params=parameters, auth=http_basic_creds_to_pass,
headers=headers_to_pass, verify=False)
else:
response = requests.put(url, data=data, params=parameters, auth=http_basic_creds_to_pass,
headers=headers_to_pass, verify=False)
elif method.upper() == "DELETE":
if isjson:
response = requests.delete(url, json=data, params=parameters, auth=http_basic_creds_to_pass,
headers=headers_to_pass, verify=False)
else:
response = requests.delete(url, data=data, params=parameters, auth=http_basic_creds_to_pass,
headers=headers_to_pass, verify=False)
elif method.upper() == "HEAD":
if isjson:
response = requests.head(url, json=data, params=parameters, auth=http_basic_creds_to_pass,
headers=headers_to_pass, verify=False)
else:
response = requests.head(url, data=data, params=parameters, auth=http_basic_creds_to_pass,
headers=headers_to_pass, verify=False)
demisto.debug(f'Requests Request Headers: {response.request.headers}')
demisto.debug(f'Requests Response: {response.text}')
return response
|
def make_api_request(url: str, method: str, data: Optional[dict] = None, parameters: Optional[dict] = None) -> Union[requests.Response, Response]:
'''
Make request to api endpoint
Args:
url (str): endpoint to make request
method (str): GET, POST action to perform
data (Optional[Dict]): Body to be sent to service
parameters (Optional[Dict]): Query parameters to be sent to service
Returns:
requests.Response object (For returning raw API response object) or Response object (For handling errors in integration configuration)
'''
http_basic_creds_to_pass = None
headers_to_pass = None
if API_CRED_TYPE:
if API_CRED_TYPE == 'Basic':
http_basic_creds_to_pass = (API_CREDENTIALS.get('identifier'), API_CREDENTIALS.get('password'))
elif API_CRED_TYPE == 'Bearer Token':
headers_to_pass = {
"Authorization": f"Bearer {API_CREDENTIALS.get('password')}"
}
elif API_CRED_TYPE == 'Custom Header':
if not API_CUSTOM_HEADER:
return Response(status_code=200, content="Custom Header is not set in integration configuration and Credential Type is selected as 'Custom Header'")
elif not API_CUSTOM_HEADER_VALUE:
return Response(status_code=200, content="Custom Header value is not set in integration configuration and Credential Type is selected as 'Custom Header'")
headers_to_pass = {
API_CUSTOM_HEADER: API_CUSTOM_HEADER_VALUE
}
# json stringify if dict
isjson = False
if isinstance(data, dict):
# data = json.dumps(data)
isjson = True
if isjson:
response = requests.request(method.upper(), url, json=data, params=parameters, auth=http_basic_creds_to_pass, headers=headers_to_pass, verify=False)
else:
response = requests.request(method.upper(), url, data=data, params=parameters, auth=http_basic_creds_to_pass, headers=headers_to_pass, verify=False)
demisto.debug(f'Requests Request Headers: {response.request.headers}')
demisto.debug(f'Requests Response: {response.text}')
return response
|
58,087 |
def main() -> None:
"""Main function, parses params and runs command functions."""
ip = demisto.params().get("ip")
token = demisto.params().get("token", None)
user = demisto.params().get("credentials", {}).get("identifier", None)
password = demisto.params().get("credentials", {}).get("password", None)
check_cert = demisto.params().get("check_cert", False)
demisto.debug(f"Command being called is {demisto.command()}")
try:
client = GwClient(ip=ip, check_cert=check_cert)
client.auth(
user=user if user != "" else None,
password=password if password != "" else None,
token=token
)
if demisto.command() == "test-module":
return_results(
test_module(client=client)
)
elif demisto.command() == "gw-list-alerts":
return_results(
gw_list_alerts(client=client, args=demisto.args())
)
elif demisto.command() == "gw-get-alert":
return_results(
gw_get_alert(client=client, args=demisto.args())
)
elif demisto.command() == "gw-add-malcore-list-entry":
return_results(
gw_add_malcore_list_entry(client=client, args=demisto.args())
)
elif demisto.command() == "gw-del-malcore-list-entry":
return_results(
gw_del_malcore_list_entry(client=client, args=demisto.args())
)
elif demisto.command() == "gw-add-dga-list-entry":
return_results(
gw_add_dga_list_entry(client=client, args=demisto.args())
)
elif demisto.command() == "gw-del-dga-list-entry":
return_results(
gw_del_dga_list_entry(client=client, args=demisto.args())
)
elif demisto.command() == "gw-es-query":
return_results(
gw_es_query(client=client, args=demisto.args())
)
elif demisto.command() == "gw-add-ignore-asset-name":
return_results(
gw_add_ignore_asset_name(client=client, args=demisto.args())
)
elif demisto.command() == "gw-add-ignore-kuser-ip":
return_results(
gw_add_ignore_kuser_ip(client=client, args=demisto.args())
)
elif demisto.command() == "gw-add-ignore-kuser-name":
return_results(
gw_add_ignore_kuser_name(client=client, args=demisto.args())
)
elif demisto.command() == "gw-add-ignore-mac-address":
return_results(
gw_add_ignore_mac_address(client=client, args=demisto.args())
)
elif demisto.command() == "gw-del-ignore-asset-name":
return_results(
gw_del_ignore_asset_name(client=client, args=demisto.args())
)
elif demisto.command() == "gw-del-ignore-kuser-ip":
return_results(
gw_del_ignore_kuser_ip(client=client, args=demisto.args())
)
elif demisto.command() == "gw-del-ignore-kuser-name":
return_results(
gw_del_ignore_kuser_name(client=client, args=demisto.args())
)
elif demisto.command() == "gw-del-ignore-mac-address":
return_results(
gw_del_ignore_mac_address(client=client, args=demisto.args())
)
elif demisto.command() == "gw-send-malware":
return_results(
gw_send_malware(client=client, args=demisto.args())
)
elif demisto.command() == "gw-send-powershell":
return_results(
gw_send_powershell(client=client, args=demisto.args())
)
elif demisto.command() == "gw-send-shellcode":
return_results(
gw_send_shellcode(client=client, args=demisto.args())
)
except Exception as e:
demisto.error(traceback.format_exc())
return_error(
f"Failed to execute {demisto.command()} command.\nError: {str(e)}"
)
|
def main() -> None:
"""Main function, parses params and runs command functions."""
ip = demisto.params().get("ip")
token = demisto.params().get("token", None)
user = demisto.params().get("credentials", {}).get("identifier", None)
password = demisto.params().get("credentials", {}).get("password", None)
check_cert = demisto.params().get("check_cert", False)
command = demisto.command()
args = demisto.args()
demisto.debug(f"Command being called is {demisto.command()}")
try:
client = GwClient(ip=ip, check_cert=check_cert)
client.auth(
user=user if user != "" else None,
password=password if password != "" else None,
token=token
)
if demisto.command() == "test-module":
return_results(
test_module(client=client)
)
elif demisto.command() == "gw-list-alerts":
return_results(
gw_list_alerts(client=client, args=demisto.args())
)
elif demisto.command() == "gw-get-alert":
return_results(
gw_get_alert(client=client, args=demisto.args())
)
elif demisto.command() == "gw-add-malcore-list-entry":
return_results(
gw_add_malcore_list_entry(client=client, args=demisto.args())
)
elif demisto.command() == "gw-del-malcore-list-entry":
return_results(
gw_del_malcore_list_entry(client=client, args=demisto.args())
)
elif demisto.command() == "gw-add-dga-list-entry":
return_results(
gw_add_dga_list_entry(client=client, args=demisto.args())
)
elif demisto.command() == "gw-del-dga-list-entry":
return_results(
gw_del_dga_list_entry(client=client, args=demisto.args())
)
elif demisto.command() == "gw-es-query":
return_results(
gw_es_query(client=client, args=demisto.args())
)
elif demisto.command() == "gw-add-ignore-asset-name":
return_results(
gw_add_ignore_asset_name(client=client, args=demisto.args())
)
elif demisto.command() == "gw-add-ignore-kuser-ip":
return_results(
gw_add_ignore_kuser_ip(client=client, args=demisto.args())
)
elif demisto.command() == "gw-add-ignore-kuser-name":
return_results(
gw_add_ignore_kuser_name(client=client, args=demisto.args())
)
elif demisto.command() == "gw-add-ignore-mac-address":
return_results(
gw_add_ignore_mac_address(client=client, args=demisto.args())
)
elif demisto.command() == "gw-del-ignore-asset-name":
return_results(
gw_del_ignore_asset_name(client=client, args=demisto.args())
)
elif demisto.command() == "gw-del-ignore-kuser-ip":
return_results(
gw_del_ignore_kuser_ip(client=client, args=demisto.args())
)
elif demisto.command() == "gw-del-ignore-kuser-name":
return_results(
gw_del_ignore_kuser_name(client=client, args=demisto.args())
)
elif demisto.command() == "gw-del-ignore-mac-address":
return_results(
gw_del_ignore_mac_address(client=client, args=demisto.args())
)
elif demisto.command() == "gw-send-malware":
return_results(
gw_send_malware(client=client, args=demisto.args())
)
elif demisto.command() == "gw-send-powershell":
return_results(
gw_send_powershell(client=client, args=demisto.args())
)
elif demisto.command() == "gw-send-shellcode":
return_results(
gw_send_shellcode(client=client, args=demisto.args())
)
except Exception as e:
demisto.error(traceback.format_exc())
return_error(
f"Failed to execute {demisto.command()} command.\nError: {str(e)}"
)
|
45,646 |
def bool_converter(s):
"""Convert variable to boolean
:param s: a variable
:return: the return of the builtin bool() function except if the variable is equal to a str
representation of the boolean value
"""
answer = bool(s)
if isinstance(s, str):
if s in ('False', 'false', '0'):
answer = False
return answer
|
def bool_converter(s):
"""Convert variable to boolean
:param s: a variable
:return: True or False
representation of the boolean value
"""
answer = bool(s)
if isinstance(s, str):
if s in ('False', 'false', '0'):
answer = False
return answer
|
36,328 |
def serialize_schema(schema):
schema_type = type(schema)
# --------------------------------------------------------------- #
# Class
# --------------------------------------------------------------- #
if schema_type is type:
if issubclass(schema, Field):
return schema().serialize()
elif schema is dict:
return Dictionary().serialize()
elif schema is list:
return List().serialize()
elif schema is int:
return Integer().serialize()
elif schema is float:
return Float().serialize()
elif schema is str:
return String().serialize()
elif schema is bool:
return Boolean().serialize()
elif schema is date:
return Date().serialize()
elif schema is datetime:
return DateTime().serialize()
elif schema is UUID:
return OAPIUUID().serialize()
else:
return Object(schema).serialize()
# --------------------------------------------------------------- #
# Object
# --------------------------------------------------------------- #
else:
if issubclass(schema_type, Field):
return schema.serialize()
elif schema_type is dict:
return Dictionary(schema).serialize()
elif schema_type is list:
return List(schema).serialize()
return {}
|
def serialize_schema(schema):
schema_type = type(schema)
# --------------------------------------------------------------- #
# Class
# --------------------------------------------------------------- #
if schema_type is type:
if issubclass(schema, Field):
return schema().serialize()
elif schema is dict:
return Dictionary().serialize()
elif schema is list:
return List().serialize()
elif schema is int:
return Integer().serialize()
elif schema is float:
return Float().serialize()
elif schema is str:
return String().serialize()
elif schema is bool:
return Boolean().serialize()
elif schema is date:
return Date().serialize()
elif schema is datetime:
return DateTime().serialize()
elif schema is uuid.UUID:
return OAPIUUID().serialize()
else:
return Object(schema).serialize()
# --------------------------------------------------------------- #
# Object
# --------------------------------------------------------------- #
else:
if issubclass(schema_type, Field):
return schema.serialize()
elif schema_type is dict:
return Dictionary(schema).serialize()
elif schema_type is list:
return List(schema).serialize()
return {}
|
26,444 |
def _unmarshaling_method_array(swagger_spec, object_schema):
# type: (Spec, JSONDict) -> UnmarshalingMethod
"""
Determine the unmarshaling method needed for a schema of a type array.
The method will be responsible for the identification of the unmarshaling method of the array items.
:param swagger_spec: Spec object
:param object_schema: Schema of the object type
"""
item_schema = swagger_spec.deref(swagger_spec.deref(object_schema).get('items', _NOT_FOUND))
if item_schema is _NOT_FOUND:
return _no_op_unmarshaling
return partial(
_unmarshal_array,
_get_unmarshaling_method(swagger_spec, item_schema),
)
|
def _unmarshaling_method_array(swagger_spec, object_schema):
# type: (Spec, JSONDict) -> UnmarshalingMethod
"""
Determine the unmarshaling method needed for a schema of a type array.
The method will be responsible for the identification of the unmarshaling method of the array items.
:param swagger_spec: Spec object
:param object_schema: Schema of the object type
"""
item_schema = swagger_spec.deref(swagger_spec.deref(object_schema).get('items', _NOT_FOUND))
if item_schema is _NOT_FOUND:
return _no_op_unmarshaling
return partial(
_unmarshal_array,
_get_unmarshaling_method(swagger_spec=swagger_spec, object_schema=item_schema),
)
|
37,046 |
def _transpilation(circuit, basis_gates=None, coupling_map=None,
initial_layout=None, seed_mapper=None,
pass_manager=None):
"""Perform transpilation of a single circuit.
Args:
circuit (QuantumCircuit): A circuit to transpile.
basis_gates (list[str]): list of basis gate names supported by the
target. Default: ['u1','u2','u3','cx','id']
coupling_map (CouplingMap): coupling map (perhaps custom) to target in mapping
initial_layout (Layout): initial layout of qubits in mapping
seed_mapper (int): random seed for the swap_mapper
pass_manager (PassManager): a pass_manager for the transpiler stage
Returns:
QuantumCircuit: A transpiled circuit.
Raises:
TranspilerError: if args are not complete for transpiler to function.
"""
if initial_layout is not None and set(circuit.qregs) != initial_layout.get_registers():
raise TranspilerError('The provided initial layout does not matches the registers in '
'the circuit "%s"' % circuit.name)
if pass_manager and not pass_manager.working_list:
return circuit
dag = circuit_to_dag(circuit)
del circuit
# if the circuit and layout already satisfy the coupling_constraints, use that layout
# if there's no layout but the circuit is compatible, use a trivial layout
# otherwise layout on the most densely connected physical qubit subset
# FIXME: this should be simplified once it is ported to a PassManager
if coupling_map:
cm_object = CouplingMap(coupling_map)
check_map = CheckMap(cm_object, initial_layout)
check_map.run(dag)
if check_map.property_set['is_swap_mapped']:
if not initial_layout:
trivial_layout = TrivialLayout(cm_object)
trivial_layout.run(dag)
initial_layout = trivial_layout.property_set['layout']
else:
dense_layout = DenseLayout(cm_object)
dense_layout.run(dag)
initial_layout = dense_layout.property_set['layout']
final_dag = transpile_dag(dag, basis_gates=basis_gates,
coupling_map=coupling_map,
initial_layout=initial_layout,
seed_mapper=seed_mapper,
pass_manager=pass_manager)
out_circuit = dag_to_circuit(final_dag)
return out_circuit
|
def _transpilation(circuit, basis_gates=None, coupling_map=None,
initial_layout=None, seed_mapper=None,
pass_manager=None):
"""Perform transpilation of a single circuit.
Args:
circuit (QuantumCircuit): A circuit to transpile.
basis_gates (list[str]): list of basis gate names supported by the
target. Default: ['u1','u2','u3','cx','id']
coupling_map (CouplingMap): coupling map (perhaps custom) to target in mapping
initial_layout (Layout): initial layout of qubits in mapping
seed_mapper (int): random seed for the swap_mapper
pass_manager (PassManager): a pass_manager for the transpiler stage
Returns:
QuantumCircuit: A transpiled circuit.
Raises:
TranspilerError: if args are not complete for transpiler to function.
"""
if initial_layout is not None and set(circuit.qregs) != initial_layout.get_registers():
raise TranspilerError('The provided initial layout does not match the registers in '
'the circuit "%s"' % circuit.name)
if pass_manager and not pass_manager.working_list:
return circuit
dag = circuit_to_dag(circuit)
del circuit
# if the circuit and layout already satisfy the coupling_constraints, use that layout
# if there's no layout but the circuit is compatible, use a trivial layout
# otherwise layout on the most densely connected physical qubit subset
# FIXME: this should be simplified once it is ported to a PassManager
if coupling_map:
cm_object = CouplingMap(coupling_map)
check_map = CheckMap(cm_object, initial_layout)
check_map.run(dag)
if check_map.property_set['is_swap_mapped']:
if not initial_layout:
trivial_layout = TrivialLayout(cm_object)
trivial_layout.run(dag)
initial_layout = trivial_layout.property_set['layout']
else:
dense_layout = DenseLayout(cm_object)
dense_layout.run(dag)
initial_layout = dense_layout.property_set['layout']
final_dag = transpile_dag(dag, basis_gates=basis_gates,
coupling_map=coupling_map,
initial_layout=initial_layout,
seed_mapper=seed_mapper,
pass_manager=pass_manager)
out_circuit = dag_to_circuit(final_dag)
return out_circuit
|
7,391 |
def _moments_raw_to_central_fast(moments_raw):
"""Analytical equations for 2D and 3D central moments with order < 4.
`moments_raw_to_central` will automatically call this function when
possible.
Parameters
----------
moments_raw : ndarray
The raw moments.
Returns
-------
moments_central : ndarray
The central moments.
"""
ndim = moments_raw.ndim
order = moments_raw.shape[0] - 1
float_dtype = moments_raw.dtype
# convert to float64 during the computation for better accuracy
moments_raw = moments_raw.astype(np.float64, copy=False)
moments_central = np.zeros_like(moments_raw)
if order >= 4 or ndim not in [2, 3]:
raise ValueError(
"This function only supports 2D or 3D moments with order < 4"
)
m = moments_raw
if ndim == 2:
cx = m[1, 0] / m[0, 0]
cy = m[0, 1] / m[0, 0]
moments_central[0, 0] = m[0, 0]
# Note: 1st order moments are both 0
if order > 1:
# 2nd order moments
moments_central[1, 1] = m[1, 1] - cx*m[0, 1]
moments_central[2, 0] = m[2, 0] - cx*m[1, 0]
moments_central[0, 2] = m[0, 2] - cy*m[0, 1]
if order > 2:
# 3rd order moments
moments_central[2, 1] = (m[2, 1] - 2*cx*m[1, 1] - cy*m[2, 0]
+ cx**2*m[0, 1] + cy*cx*m[1, 0])
moments_central[1, 2] = (m[1, 2] - 2*cy*m[1, 1] - cx*m[0, 2]
+ 2*cy*cx*m[0, 1])
moments_central[3, 0] = m[3, 0] - 3*cx*m[2, 0] + 2*cx**2*m[1, 0]
moments_central[0, 3] = m[0, 3] - 3*cy*m[0, 2] + 2*cy**2*m[0, 1]
else:
# 3D case
cx = m[1, 0, 0] / m[0, 0, 0]
cy = m[0, 1, 0] / m[0, 0, 0]
cz = m[0, 0, 1] / m[0, 0, 0]
moments_central[0, 0, 0] = m[0, 0, 0]
# Note: all first order moments are 0
if order > 1:
# 2nd order moments
moments_central[0, 0, 2] = -cz*m[0, 0, 1] + m[0, 0, 2]
moments_central[0, 1, 1] = -cy*m[0, 0, 1] + m[0, 1, 1]
moments_central[0, 2, 0] = -cy*m[0, 1, 0] + m[0, 2, 0]
moments_central[1, 0, 1] = -cx*m[0, 0, 1] + m[1, 0, 1]
moments_central[1, 1, 0] = -cx*m[0, 1, 0] + m[1, 1, 0]
moments_central[2, 0, 0] = -cx*m[1, 0, 0] + m[2, 0, 0]
if order > 2:
# 3rd order moments
moments_central[0, 0, 3] = (2*cz**2*m[0, 0, 1]
- 3*cz*m[0, 0, 2]
+ m[0, 0, 3])
moments_central[0, 1, 2] = (-cy*m[0, 0, 2]
+ 2*cz*(cy*m[0, 0, 1] - m[0, 1, 1])
+ m[0, 1, 2])
moments_central[0, 2, 1] = (cy**2*m[0, 0, 1] - 2*cy*m[0, 1, 1]
+ cz*(cy*m[0, 1, 0] - m[0, 2, 0])
+ m[0, 2, 1])
moments_central[0, 3, 0] = (2*cy**2*m[0, 1, 0]
- 3*cy*m[0, 2, 0]
+ m[0, 3, 0])
moments_central[1, 0, 2] = (-cx*m[0, 0, 2]
+ 2*cz*(cx*m[0, 0, 1] - m[1, 0, 1])
+ m[1, 0, 2])
moments_central[1, 1, 1] = (-cx*m[0, 1, 1]
+ cy*(cx*m[0, 0, 1] - m[1, 0, 1])
+ cz*(cx*m[0, 1, 0] - m[1, 1, 0])
+ m[1, 1, 1])
moments_central[1, 2, 0] = (-cx*m[0, 2, 0]
- 2*cy*(-cx*m[0, 1, 0] + m[1, 1, 0])
+ m[1, 2, 0])
moments_central[2, 0, 1] = (cx**2*m[0, 0, 1]
- 2*cx*m[1, 0, 1]
+ cz*(cx*m[1, 0, 0] - m[2, 0, 0])
+ m[2, 0, 1])
moments_central[2, 1, 0] = (cx**2*m[0, 1, 0]
- 2*cx*m[1, 1, 0]
+ cy*(cx*m[1, 0, 0] - m[2, 0, 0])
+ m[2, 1, 0])
moments_central[3, 0, 0] = (2*cx**2*m[1, 0, 0]
- 3*cx*m[2, 0, 0]
+ m[3, 0, 0])
return moments_central.astype(float_dtype, copy=False)
|
def _moments_raw_to_central_fast(moments_raw):
"""Analytical equations for 2D and 3D central moments with order < 4.
`moments_raw_to_central` will automatically call this function when
possible.
Parameters
----------
moments_raw : ndarray
The raw moments.
Returns
-------
moments_central : ndarray
The central moments.
"""
ndim = moments_raw.ndim
order = moments_raw.shape[0] - 1
float_dtype = moments_raw.dtype
# convert to float64 during the computation for better accuracy
moments_raw = moments_raw.astype(np.float64, copy=False)
moments_central = np.zeros_like(moments_raw)
if order >= 4 or ndim not in [2, 3]:
raise ValueError(
"This function only supports 2D or 3D moments of order < 4."
)
m = moments_raw
if ndim == 2:
cx = m[1, 0] / m[0, 0]
cy = m[0, 1] / m[0, 0]
moments_central[0, 0] = m[0, 0]
# Note: 1st order moments are both 0
if order > 1:
# 2nd order moments
moments_central[1, 1] = m[1, 1] - cx*m[0, 1]
moments_central[2, 0] = m[2, 0] - cx*m[1, 0]
moments_central[0, 2] = m[0, 2] - cy*m[0, 1]
if order > 2:
# 3rd order moments
moments_central[2, 1] = (m[2, 1] - 2*cx*m[1, 1] - cy*m[2, 0]
+ cx**2*m[0, 1] + cy*cx*m[1, 0])
moments_central[1, 2] = (m[1, 2] - 2*cy*m[1, 1] - cx*m[0, 2]
+ 2*cy*cx*m[0, 1])
moments_central[3, 0] = m[3, 0] - 3*cx*m[2, 0] + 2*cx**2*m[1, 0]
moments_central[0, 3] = m[0, 3] - 3*cy*m[0, 2] + 2*cy**2*m[0, 1]
else:
# 3D case
cx = m[1, 0, 0] / m[0, 0, 0]
cy = m[0, 1, 0] / m[0, 0, 0]
cz = m[0, 0, 1] / m[0, 0, 0]
moments_central[0, 0, 0] = m[0, 0, 0]
# Note: all first order moments are 0
if order > 1:
# 2nd order moments
moments_central[0, 0, 2] = -cz*m[0, 0, 1] + m[0, 0, 2]
moments_central[0, 1, 1] = -cy*m[0, 0, 1] + m[0, 1, 1]
moments_central[0, 2, 0] = -cy*m[0, 1, 0] + m[0, 2, 0]
moments_central[1, 0, 1] = -cx*m[0, 0, 1] + m[1, 0, 1]
moments_central[1, 1, 0] = -cx*m[0, 1, 0] + m[1, 1, 0]
moments_central[2, 0, 0] = -cx*m[1, 0, 0] + m[2, 0, 0]
if order > 2:
# 3rd order moments
moments_central[0, 0, 3] = (2*cz**2*m[0, 0, 1]
- 3*cz*m[0, 0, 2]
+ m[0, 0, 3])
moments_central[0, 1, 2] = (-cy*m[0, 0, 2]
+ 2*cz*(cy*m[0, 0, 1] - m[0, 1, 1])
+ m[0, 1, 2])
moments_central[0, 2, 1] = (cy**2*m[0, 0, 1] - 2*cy*m[0, 1, 1]
+ cz*(cy*m[0, 1, 0] - m[0, 2, 0])
+ m[0, 2, 1])
moments_central[0, 3, 0] = (2*cy**2*m[0, 1, 0]
- 3*cy*m[0, 2, 0]
+ m[0, 3, 0])
moments_central[1, 0, 2] = (-cx*m[0, 0, 2]
+ 2*cz*(cx*m[0, 0, 1] - m[1, 0, 1])
+ m[1, 0, 2])
moments_central[1, 1, 1] = (-cx*m[0, 1, 1]
+ cy*(cx*m[0, 0, 1] - m[1, 0, 1])
+ cz*(cx*m[0, 1, 0] - m[1, 1, 0])
+ m[1, 1, 1])
moments_central[1, 2, 0] = (-cx*m[0, 2, 0]
- 2*cy*(-cx*m[0, 1, 0] + m[1, 1, 0])
+ m[1, 2, 0])
moments_central[2, 0, 1] = (cx**2*m[0, 0, 1]
- 2*cx*m[1, 0, 1]
+ cz*(cx*m[1, 0, 0] - m[2, 0, 0])
+ m[2, 0, 1])
moments_central[2, 1, 0] = (cx**2*m[0, 1, 0]
- 2*cx*m[1, 1, 0]
+ cy*(cx*m[1, 0, 0] - m[2, 0, 0])
+ m[2, 1, 0])
moments_central[3, 0, 0] = (2*cx**2*m[1, 0, 0]
- 3*cx*m[2, 0, 0]
+ m[3, 0, 0])
return moments_central.astype(float_dtype, copy=False)
|
57,915 |
def redlock_get_scan_results():
"""
Get DevOps Scan Results
"""
scan_id = demisto.args().get('scan_id', None)
response = req('GET', f'iac/v2/scans/{scan_id}/results', param_data={}, data={})
if (
not response
or 'data' not in response
or not isinstance(response['data'], list)
):
demisto.results('No results found')
else:
items = response['data']
readable_output = []
for item in items:
readable_output.append({
"ID": item.get('id'),
"Name": item.get('attributes')['name'],
"Policy ID": item.get('attributes')['policyId'],
"Description": item.get('attributes')['desc'],
"Severity": item.get('attributes')['severity']
})
results = {
"id": scan_id,
"results": items
}
md = tableToMarkdown("Scan Results:", readable_output)
demisto.results({
'Type': entryTypes['note'],
'ContentsFormat': formats['json'],
'Contents': results,
'EntryContext': {'Redlock.Scans(val.id == obj.id)': results},
'HumanReadable': md
})
|
def redlock_get_scan_results():
"""
Get DevOps Scan Results
"""
scan_id = demisto.args().get('scan_id', None)
response = req('GET', f'iac/v2/scans/{scan_id}/results', param_data={}, data={})
if (
not response
or 'data' not in response
or not isinstance(response['data'], list)
):
demisto.results('No results found')
else:
items = response['data']
readable_output = []
for item in items:
id = item.get('id')
attributes = item.get('attributes', {})
readable_output.append({
"ID": id,
"Name": attributes.get('name'),
"Policy ID": attributes.get('policyId'),
"Description": attributes.get('desc'),
"Severity": attributes.get('severity')
})
results = {
"id": scan_id,
"results": items
}
md = tableToMarkdown("Scan Results:", readable_output)
demisto.results({
'Type': entryTypes['note'],
'ContentsFormat': formats['json'],
'Contents': results,
'EntryContext': {'Redlock.Scans(val.id == obj.id)': results},
'HumanReadable': md
})
|
42,106 |
def upgrade():
bind = op.get_bind()
sa.Enum(IntermediateValueModel.TrialIntermediateValueType).create(bind, checkfirst=True)
# MySQL and PostgreSQL supports DEFAULT clause like 'ALTER TABLE <tbl_name>
# ADD COLUMN <col_name> ... DEFAULT "FINITE_OR_NAN"', but seemingly Alembic
# does not support such a SQL statement. So first add a column with schema-level
# default value setting, then remove it by `batch_op.alter_column()`.
with op.batch_alter_table("trial_intermediate_values") as batch_op:
batch_op.add_column(
sa.Column(
"intermediate_value_type",
sa.Enum("FINITE", "INF_POS", "INF_NEG", "NAN", name="floattypeenum"),
nullable=False,
server_default="FINITE",
),
)
with op.batch_alter_table("trial_intermediate_values") as batch_op:
batch_op.alter_column("intermediate_value_type", server_default=None)
session = orm.Session(bind=bind)
try:
records = session.query(IntermediateValueModel).all()
mapping = []
for r in records:
value: float
if np.isclose(r.intermediate_value, RDB_MAX_FLOAT) or np.isposinf(
r.intermediate_value
):
value = np.inf
elif np.isclose(r.intermediate_value, RDB_MIN_FLOAT) or np.isneginf(
r.intermediate_value
):
value = -np.inf
elif np.isnan(r.intermediate_value):
value = np.nan
else:
value = r.intermediate_value
(
sanitized_value,
float_type,
) = IntermediateValueModel._intermediate_value_to_stored_repr(value)
mapping.append(
{
"trial_intermediate_value_id": r.trial_intermediate_value_id,
"intermediate_value_type": float_type,
"intermediate_value": sanitized_value,
}
)
session.bulk_update_mappings(IntermediateValueModel, mapping)
session.commit()
except SQLAlchemyError as e:
session.rollback()
raise e
finally:
session.close()
|
def upgrade():
bind = op.get_bind()
sa.Enum(IntermediateValueModel.TrialIntermediateValueType).create(bind, checkfirst=True)
# MySQL and PostgreSQL supports DEFAULT clause like 'ALTER TABLE <tbl_name>
# ADD COLUMN <col_name> ... DEFAULT "FINITE_OR_NAN"', but seemingly Alembic
# does not support such a SQL statement. So first add a column with schema-level
# default value setting, then remove it by `batch_op.alter_column()`.
with op.batch_alter_table("trial_intermediate_values") as batch_op:
batch_op.add_column(
sa.Column(
"intermediate_value_type",
sa.Enum("FINITE", "INF_POS", "INF_NEG", "NAN", name="floattypeenum"),
nullable=False,
server_default="FINITE",
),
)
with op.batch_alter_table("trial_intermediate_values") as batch_op:
batch_op.alter_column("intermediate_value_type", server_default=None)
session = orm.Session(bind=bind)
try:
records = session.query(IntermediateValueModel).all()
mapping = []
for r in records:
value: float
if np.isclose(r.intermediate_value, RDB_MAX_FLOAT) or np.isposinf(
r.intermediate_value
):
value = np.inf
elif np.isclose(r.intermediate_value, RDB_MIN_FLOAT) or np.isneginf(
r.intermediate_value
):
value = float("-inf")
elif np.isnan(r.intermediate_value):
value = np.nan
else:
value = r.intermediate_value
(
sanitized_value,
float_type,
) = IntermediateValueModel._intermediate_value_to_stored_repr(value)
mapping.append(
{
"trial_intermediate_value_id": r.trial_intermediate_value_id,
"intermediate_value_type": float_type,
"intermediate_value": sanitized_value,
}
)
session.bulk_update_mappings(IntermediateValueModel, mapping)
session.commit()
except SQLAlchemyError as e:
session.rollback()
raise e
finally:
session.close()
|
35,175 |
def lstsq(a, b, rcond='warn'):
"""Return the least-squares solution to a linear matrix equation.
Solves the equation `a x = b` by computing a vector `x` that
minimizes the Euclidean 2-norm `|| b - a x ||^2`. The equation may
be under-, well-, or over- determined (i.e., the number of
linearly independent rows of `a` can be less than, equal to, or
greater than its number of linearly independent columns). If `a`
is square and of full rank, then `x` (but for round-off error) is
the "exact" solution of the equation.
Args:
a (cupy.ndarray): "Coefficient" matrix with dimension ``(M, N)``
b (cupy.ndarray): "Dependent variable" values with dimension ``(M,)``
or ``(M, K)``
rcond (float): Cutoff parameter for small singular values.
For stability it computes the largest singular value denoted by
``s``, and sets all singular values smaller than ``s`` to zero.
Returns:
tuple:
A tuple of ``(x, residuals, rank, s)``. Note ``x`` is the
least-squares solution with shape ``(N,)`` or ``(N, K)`` depending
if ``b`` was two-dimensional. The sums of ``residuals`` is the
squared Euclidean 2-norm for each column in b - a*x. The
``residuals`` is an empty array if the rank of a is < N or M <= N,
but iff b is 1-dimensional, this is a (1,) shape array, Otherwise
the shape is (K,). The ``rank`` of matrix ``a`` is an integer. The
singular values of ``a`` are ``s``.
.. warning::
This function calls one or more cuSOLVER routine(s) which may yield
invalid results if input conditions are not met.
To detect these invalid results, you can set the `linalg`
configuration to a value that is not `ignore` in
:func:`cupyx.errstate` or :func:`cupyx.seterr`.
.. seealso:: :func:`numpy.linalg.lstsq`
"""
if rcond == 'warn':
warnings.warn(
'`rcond` parameter will change to the default of '
'machine precision times ``max(M, N)`` where M and N '
'are the input matrix dimensions.\n'
'To use the future default and silence this warning '
'we advise to pass `rcond=None`.\n'
'Using the old CuPy default: `rcond=1e-15`, which is '
'neither the same as the old NumPy default (`rcond=-1`).',
FutureWarning)
rcond = 1e-15
_util._assert_cupy_array(a, b)
_util._assert_rank2(a)
if b.ndim > 2:
raise linalg.LinAlgError('{}-dimensional array given. Array must be at'
' most two-dimensional'.format(b.ndim))
m, n = a.shape[-2:]
m2 = b.shape[0]
if m != m2:
raise linalg.LinAlgError('Incompatible dimensions')
u, s, vt = cupy.linalg.svd(a, full_matrices=False)
if rcond is None:
rcond = numpy.finfo(s.dtype).eps * max(m, n)
# number of singular values and matrix rank
cutoff = rcond * s.max()
s1 = 1 / s
sing_vals = s <= cutoff
s1[sing_vals] = 0
rank = s.size - sing_vals.sum()
if b.ndim == 2:
s1 = cupy.repeat(s1.reshape(-1, 1), b.shape[1], axis=1)
# Solve the least-squares solution
z = core.dot(u.transpose(), b) * s1
x = core.dot(vt.transpose(), z)
# Calculate squared Euclidean 2-norm for each column in b - a*x
if rank != n or m <= n:
resids = cupy.array([], dtype=a.dtype)
elif b.ndim == 2:
e = b - core.dot(a, x)
resids = cupy.sum(cupy.square(e), axis=0)
else:
e = b - cupy.dot(a, x)
resids = cupy.dot(e.T, e).reshape(-1)
return x, resids, rank, s
|
def lstsq(a, b, rcond='warn'):
"""Return the least-squares solution to a linear matrix equation.
Solves the equation `a x = b` by computing a vector `x` that
minimizes the Euclidean 2-norm `|| b - a x ||^2`. The equation may
be under-, well-, or over- determined (i.e., the number of
linearly independent rows of `a` can be less than, equal to, or
greater than its number of linearly independent columns). If `a`
is square and of full rank, then `x` (but for round-off error) is
the "exact" solution of the equation.
Args:
a (cupy.ndarray): "Coefficient" matrix with dimension ``(M, N)``
b (cupy.ndarray): "Dependent variable" values with dimension ``(M,)``
or ``(M, K)``
rcond (float): Cutoff parameter for small singular values.
For stability it computes the largest singular value denoted by
``s``, and sets all singular values smaller than ``s`` to zero.
Returns:
tuple:
A tuple of ``(x, residuals, rank, s)``. Note ``x`` is the
least-squares solution with shape ``(N,)`` or ``(N, K)`` depending
if ``b`` was two-dimensional. The sums of ``residuals`` is the
squared Euclidean 2-norm for each column in b - a*x. The
``residuals`` is an empty array if the rank of a is < N or M <= N,
but iff b is 1-dimensional, this is a (1,) shape array, Otherwise
the shape is (K,). The ``rank`` of matrix ``a`` is an integer. The
singular values of ``a`` are ``s``.
.. warning::
This function calls one or more cuSOLVER routine(s) which may yield
invalid results if input conditions are not met.
To detect these invalid results, you can set the `linalg`
configuration to a value that is not `ignore` in
:func:`cupyx.errstate` or :func:`cupyx.seterr`.
.. seealso:: :func:`numpy.linalg.lstsq`
"""
if rcond == 'warn':
warnings.warn(
'`rcond` parameter will change to the default of '
'machine precision times ``max(M, N)`` where M and N '
'are the input matrix dimensions.\n'
'To use the future default and silence this warning '
'we advise to pass `rcond=None`.\n'
'Using the old CuPy default: `rcond=1e-15`, which is '
'neither the same as the old NumPy default (`rcond=-1`).',
FutureWarning)
rcond = -1
_util._assert_cupy_array(a, b)
_util._assert_rank2(a)
if b.ndim > 2:
raise linalg.LinAlgError('{}-dimensional array given. Array must be at'
' most two-dimensional'.format(b.ndim))
m, n = a.shape[-2:]
m2 = b.shape[0]
if m != m2:
raise linalg.LinAlgError('Incompatible dimensions')
u, s, vt = cupy.linalg.svd(a, full_matrices=False)
if rcond is None:
rcond = numpy.finfo(s.dtype).eps * max(m, n)
# number of singular values and matrix rank
cutoff = rcond * s.max()
s1 = 1 / s
sing_vals = s <= cutoff
s1[sing_vals] = 0
rank = s.size - sing_vals.sum()
if b.ndim == 2:
s1 = cupy.repeat(s1.reshape(-1, 1), b.shape[1], axis=1)
# Solve the least-squares solution
z = core.dot(u.transpose(), b) * s1
x = core.dot(vt.transpose(), z)
# Calculate squared Euclidean 2-norm for each column in b - a*x
if rank != n or m <= n:
resids = cupy.array([], dtype=a.dtype)
elif b.ndim == 2:
e = b - core.dot(a, x)
resids = cupy.sum(cupy.square(e), axis=0)
else:
e = b - cupy.dot(a, x)
resids = cupy.dot(e.T, e).reshape(-1)
return x, resids, rank, s
|
41,397 |
def package_directory(dest_folder, classes, imagery, ml_type, seed=False, split_names=['train', 'test'],
split_vals=[0.8, .2], **kwargs):
"""Generate an .npz file containing arrays for training machine learning algorithms
Parameters
------------
dest_folder: str
Folder to save labels, tiles, and final numpy arrays into
classes: list
A list of classes for machine learning training. Each class is defined as a dict
with two required properties:
- name: class name
- filter: A Mapbox GL Filter.
See the README for more details
imagery: str
Imagery template to download satellite images from.
Ex: http://a.tiles.mapbox.com/v4/mapbox.satellite/{z}/{x}/{y}.jpg?access_token=ACCESS_TOKEN
ml_type: str
Defines the type of machine learning. One of "classification", "object-detection", or "segmentation"
seed: int
Random generator seed. Optional, use to make results reproducible.
split_vals: lst
Percentage of data to put in each catagory listed in split_names. Must be floats and must sum to one.
split_names: lst
List of names for each subset of the data, either ['train', 'test'] or ['train', 'test', 'val']
**kwargs: dict
Other properties from CLI config passed as keywords to other utility functions
"""
# if a seed is given, use it
if seed:
np.random.seed(seed)
assert len(split_names) == 2 or len(split_names) == 3.
assert len(split_names) == len(split_vals), "split_names and split_vals must be the same length."
assert np.isclose(sum(split_vals), 1), "split_vals must sum to one."
# open labels file, create tile array
labels_file = op.join(dest_folder, 'labels.npz')
labels = np.load(labels_file)
tile_names = [tile for tile in labels.files]
tile_names.sort()
tiles = np.array(tile_names)
np.random.shuffle(tiles)
# find maximum number of features in advance so numpy shapes match
if ml_type == 'object-detection':
max_features = 0
for tile in labels.files:
features = len(labels[tile])
if features > max_features:
max_features = features
x_vals = []
y_vals = []
# open the images and load those plus the labels into the final arrays
o = urlparse(imagery)
_, image_format = op.splitext(o.path)
if is_tif(imagery): # if a TIF is provided, use jpg as tile format
image_format = '.jpg'
for tile in tiles:
image_file = op.join(dest_folder, 'tiles', '{}{}'.format(tile, image_format))
try:
img = Image.open(image_file)
except FileNotFoundError:
# we often don't download images for each label (e.g. background tiles)
continue
except OSError:
print('Couldn\'t open {}, skipping'.format(image_file))
continue
np_image = np.array(img)
img.close()
x_vals.append(np_image)
if ml_type == 'classification':
y_vals.append(labels[tile])
elif ml_type == 'object-detection':
# zero pad object-detection arrays
cl = labels[tile]
y_vals.append(np.concatenate((cl, np.zeros((max_features - len(cl), 5)))))
elif ml_type == 'segmentation':
y_vals.append(labels[tile][..., np.newaxis]) # Add grayscale channel
# convert lists to numpy arrays
x_vals = np.array(x_vals, dtype=np.uint8)
y_vals = np.array(y_vals, dtype=np.uint8)
x_vals_split_lst = np.split(x_vals,
[int(split_vals[0] * len(x_vals)), int((split_vals[0] + split_vals[1]) * len(x_vals))])
if len(x_vals_split_lst[-1]) == 0:
x_vals_split_lst = x_vals_split_lst[:-1]
y_vals_split_lst = np.split(y_vals,
[int(split_vals[0] * len(y_vals)), int((split_vals[0] + split_vals[1]) * len(y_vals))])
if len(y_vals_split_lst[-1]) == 0:
y_vals_split_lst = y_vals_split_lst[:-1]
print('Saving packaged file to {}'.format(op.join(dest_folder, 'data.npz')))
if len(split_vals) == 2:
np.savez(op.join(dest_folder, 'data.npz'),
x_train=x_vals_split_lst[0],
y_train=y_vals_split_lst[0],
x_test=x_vals_split_lst[1],
y_test=y_vals_split_lst[1])
if len(split_vals) == 3:
np.savez(op.join(dest_folder, 'data.npz'),
x_train=x_vals_split_lst[0],
y_train=y_vals_split_lst[0],
x_test=x_vals_split_lst[1],
y_test=y_vals_split_lst[1],
x_val=x_vals_split_lst[2],
y_val=y_vals_split_lst[2])
|
def package_directory(dest_folder, classes, imagery, ml_type, seed=False, split_names=['train', 'test'],
split_vals=[0.8, .2], **kwargs):
"""Generate an .npz file containing arrays for training machine learning algorithms
Parameters
------------
dest_folder: str
Folder to save labels, tiles, and final numpy arrays into
classes: list
A list of classes for machine learning training. Each class is defined as a dict
with two required properties:
- name: class name
- filter: A Mapbox GL Filter.
See the README for more details
imagery: str
Imagery template to download satellite images from.
Ex: http://a.tiles.mapbox.com/v4/mapbox.satellite/{z}/{x}/{y}.jpg?access_token=ACCESS_TOKEN
ml_type: str
Defines the type of machine learning. One of "classification", "object-detection", or "segmentation"
seed: int
Random generator seed. Optional, use to make results reproducible.
split_vals: lst
Percentage of data to put in each catagory listed in split_names. Must be floats and must sum to one.
split_names: lst
List of names for each subset of the data. Length must match that of `split_vals`. Default ['train', 'test'].
**kwargs: dict
Other properties from CLI config passed as keywords to other utility functions
"""
# if a seed is given, use it
if seed:
np.random.seed(seed)
assert len(split_names) == 2 or len(split_names) == 3.
assert len(split_names) == len(split_vals), "split_names and split_vals must be the same length."
assert np.isclose(sum(split_vals), 1), "split_vals must sum to one."
# open labels file, create tile array
labels_file = op.join(dest_folder, 'labels.npz')
labels = np.load(labels_file)
tile_names = [tile for tile in labels.files]
tile_names.sort()
tiles = np.array(tile_names)
np.random.shuffle(tiles)
# find maximum number of features in advance so numpy shapes match
if ml_type == 'object-detection':
max_features = 0
for tile in labels.files:
features = len(labels[tile])
if features > max_features:
max_features = features
x_vals = []
y_vals = []
# open the images and load those plus the labels into the final arrays
o = urlparse(imagery)
_, image_format = op.splitext(o.path)
if is_tif(imagery): # if a TIF is provided, use jpg as tile format
image_format = '.jpg'
for tile in tiles:
image_file = op.join(dest_folder, 'tiles', '{}{}'.format(tile, image_format))
try:
img = Image.open(image_file)
except FileNotFoundError:
# we often don't download images for each label (e.g. background tiles)
continue
except OSError:
print('Couldn\'t open {}, skipping'.format(image_file))
continue
np_image = np.array(img)
img.close()
x_vals.append(np_image)
if ml_type == 'classification':
y_vals.append(labels[tile])
elif ml_type == 'object-detection':
# zero pad object-detection arrays
cl = labels[tile]
y_vals.append(np.concatenate((cl, np.zeros((max_features - len(cl), 5)))))
elif ml_type == 'segmentation':
y_vals.append(labels[tile][..., np.newaxis]) # Add grayscale channel
# convert lists to numpy arrays
x_vals = np.array(x_vals, dtype=np.uint8)
y_vals = np.array(y_vals, dtype=np.uint8)
x_vals_split_lst = np.split(x_vals,
[int(split_vals[0] * len(x_vals)), int((split_vals[0] + split_vals[1]) * len(x_vals))])
if len(x_vals_split_lst[-1]) == 0:
x_vals_split_lst = x_vals_split_lst[:-1]
y_vals_split_lst = np.split(y_vals,
[int(split_vals[0] * len(y_vals)), int((split_vals[0] + split_vals[1]) * len(y_vals))])
if len(y_vals_split_lst[-1]) == 0:
y_vals_split_lst = y_vals_split_lst[:-1]
print('Saving packaged file to {}'.format(op.join(dest_folder, 'data.npz')))
if len(split_vals) == 2:
np.savez(op.join(dest_folder, 'data.npz'),
x_train=x_vals_split_lst[0],
y_train=y_vals_split_lst[0],
x_test=x_vals_split_lst[1],
y_test=y_vals_split_lst[1])
if len(split_vals) == 3:
np.savez(op.join(dest_folder, 'data.npz'),
x_train=x_vals_split_lst[0],
y_train=y_vals_split_lst[0],
x_test=x_vals_split_lst[1],
y_test=y_vals_split_lst[1],
x_val=x_vals_split_lst[2],
y_val=y_vals_split_lst[2])
|
48,046 |
def main():
args = parse_args()
DEMOS = scopes[args.scope]
suppressed_devices = parse_supported_device_list(args.supported_devices)
omz_dir = (Path(__file__).parent / '../..').resolve()
demos_dir = omz_dir / 'demos'
auto_tools_dir = omz_dir / 'tools/model_tools'
model_info_list = json.loads(subprocess.check_output(
[sys.executable, '--', str(auto_tools_dir / 'info_dumper.py'), '--all'],
universal_newlines=True))
model_info = {}
for model_data in model_info_list:
models_list = model_data['model_stages'] if model_data['model_stages'] else [model_data]
for model in models_list:
model_info[model['name']] = model
if args.demos is not None:
names_of_demos_to_test = set(args.demos.split(','))
if all(impl in Demo.IMPLEMENTATION_TYPES for impl in names_of_demos_to_test):
names_of_demos_to_test = {demo.subdirectory for demo in DEMOS if demo.implementation in names_of_demos_to_test}
demos_to_test = [demo for demo in DEMOS if demo.subdirectory in names_of_demos_to_test]
else:
demos_to_test = DEMOS
if len(demos_to_test) == 0:
if args.demos:
print("List of demos to test is empty.")
print(f"Command line argument '--demos {args.demos}' was passed, check that you've specified correct value from the list below:")
print(*(list(Demo.IMPLEMENTATION_TYPES) + [demo.subdirectory for demo in DEMOS]), sep=',')
raise RuntimeError("Not found demos to test!")
print(f"{len(demos_to_test)} demos will be tested:")
print(*[demo.subdirectory for demo in demos_to_test], sep =',')
with temp_dir_as_path() as global_temp_dir:
if args.models_dir:
dl_dir = args.models_dir
print(f"\nRunning on pre-converted IRs: {str(dl_dir)}\n")
else:
dl_dir = prepare_models(auto_tools_dir, args.downloader_cache_dir, args.mo, global_temp_dir, demos_to_test, args.precisions)
num_failures = 0
try:
pythonpath = f"{os.environ['PYTHONPATH']}{os.pathsep}"
except KeyError:
pythonpath = ''
demo_environment = {**os.environ,
'PYTHONIOENCODING': 'utf-8',
'PYTHONPATH': f"{pythonpath}{args.demo_build_dir}",
}
print('Demo Environment: {}'.format(demo_environment))
for demo in demos_to_test:
print('Testing {}...'.format(demo.subdirectory))
print()
demo.set_precisions(args.precisions, model_info)
declared_model_names = set()
for model_data in json.loads(subprocess.check_output(
[sys.executable, '--', str(auto_tools_dir / 'info_dumper.py'),
'--list', str(demo.models_lst_path(demos_dir))],
universal_newlines=True)):
models_list = model_data['model_stages'] if model_data['model_stages'] else [model_data]
for model in models_list:
declared_model_names.add(model['name'])
with temp_dir_as_path() as temp_dir:
arg_context = ArgContext(
dl_dir=dl_dir,
data_sequence_dir=temp_dir / 'data_seq',
data_sequences=DATA_SEQUENCES,
model_info=model_info,
test_data_dir=args.test_data_dir,
)
def resolve_arg(arg):
if isinstance(arg, str): return arg
return arg.resolve(arg_context)
def option_to_args(key, value):
if value is None: return [key]
if isinstance(value, list): return [key, *map(resolve_arg, value)]
return [key, resolve_arg(value)]
fixed_args = demo.fixed_args(demos_dir, args.demo_build_dir)
print('Fixed arguments:', ' '.join(map(shlex.quote, fixed_args)))
print()
device_args = demo.device_args(args.devices.split())
for test_case_index, test_case in enumerate(demo.test_cases):
test_case_models = get_models(test_case, demo.model_keys)
case_args = [demo_arg
for key, value in sorted(test_case.options.items())
for demo_arg in option_to_args(key, value)]
case_model_names = {arg.name for arg in list(test_case.options.values()) + test_case.extra_models if isinstance(arg, ModelArg)}
undeclared_case_model_names = case_model_names - declared_model_names
if undeclared_case_model_names:
print("Test case #{}: models not listed in demo's models.lst: {}".format(
test_case_index, ' '.join(sorted(undeclared_case_model_names))))
print()
num_failures += 1
continue
for device, dev_arg in device_args.items():
skip = False
for model in test_case_models:
if suppressed_devices and device in suppressed_devices.get(model, []):
print('Test case #{}/{}: Model {} is suppressed on device'
.format(test_case_index, device, model))
print(flush=True)
skip = True
if skip: continue
print('Test case #{}/{}:'.format(test_case_index, device),
' '.join(shlex.quote(str(arg)) for arg in dev_arg + case_args))
print(flush=True)
try:
start_time = timeit.default_timer()
output = subprocess.check_output(fixed_args + dev_arg + case_args,
stderr=subprocess.STDOUT, universal_newlines=True, encoding='utf-8',
env=demo_environment)
execution_time = timeit.default_timer() - start_time
demo.parse_output(output, test_case, device)
except subprocess.CalledProcessError as e:
print(e.output)
print('Exit code:', e.returncode)
num_failures += 1
execution_time = -1
if args.report_file:
collect_result(demo.subdirectory, device, case_model_names, execution_time, args.report_file)
print()
print("Failures: {}".format(num_failures))
sys.exit(0 if num_failures == 0 else 1)
|
def main():
args = parse_args()
DEMOS = scopes[args.scope]
suppressed_devices = parse_supported_device_list(args.supported_devices)
omz_dir = (Path(__file__).parent / '../..').resolve()
demos_dir = omz_dir / 'demos'
auto_tools_dir = omz_dir / 'tools/model_tools'
model_info_list = json.loads(subprocess.check_output(
[sys.executable, '--', str(auto_tools_dir / 'info_dumper.py'), '--all'],
universal_newlines=True))
model_info = {}
for model_data in model_info_list:
models_list = model_data['model_stages'] if model_data['model_stages'] else [model_data]
for model in models_list:
model_info[model['name']] = model
if args.demos is not None:
names_of_demos_to_test = set(args.demos.split(','))
if all(impl in Demo.IMPLEMENTATION_TYPES for impl in names_of_demos_to_test):
names_of_demos_to_test = {demo.subdirectory for demo in DEMOS if demo.implementation in names_of_demos_to_test}
demos_to_test = [demo for demo in DEMOS if demo.subdirectory in names_of_demos_to_test]
else:
demos_to_test = DEMOS
if len(demos_to_test) == 0:
if args.demos:
print("List of demos to test is empty.")
print(f"Command line argument '--demos {args.demos}' was passed, check that you've specified correct value from the list below:")
print(*(list(Demo.IMPLEMENTATION_TYPES) + [demo.subdirectory for demo in DEMOS]), sep=',')
raise RuntimeError("Not found demos to test!")
print(f"{len(demos_to_test)} demos will be tested:")
print(*[demo.subdirectory for demo in demos_to_test], sep =',')
with temp_dir_as_path() as global_temp_dir:
if args.models_dir:
dl_dir = args.models_dir
print(f"\nRunning on pre-converted IRs: {str(dl_dir)}\n")
else:
dl_dir = prepare_models(auto_tools_dir, args.downloader_cache_dir, args.mo, global_temp_dir, demos_to_test, args.precisions)
num_failures = 0
try:
pythonpath = f"{os.environ['PYTHONPATH']}{os.pathsep}"
except KeyError:
pythonpath = ''
demo_environment = {**os.environ,
'PYTHONIOENCODING': 'utf-8',
'PYTHONPATH': f"{pythonpath}{args.demo_build_dir}{os.pathsep}{os.path.join(args.demo_build_dir, 'ctcdecode_numpy')}",
}
print('Demo Environment: {}'.format(demo_environment))
for demo in demos_to_test:
print('Testing {}...'.format(demo.subdirectory))
print()
demo.set_precisions(args.precisions, model_info)
declared_model_names = set()
for model_data in json.loads(subprocess.check_output(
[sys.executable, '--', str(auto_tools_dir / 'info_dumper.py'),
'--list', str(demo.models_lst_path(demos_dir))],
universal_newlines=True)):
models_list = model_data['model_stages'] if model_data['model_stages'] else [model_data]
for model in models_list:
declared_model_names.add(model['name'])
with temp_dir_as_path() as temp_dir:
arg_context = ArgContext(
dl_dir=dl_dir,
data_sequence_dir=temp_dir / 'data_seq',
data_sequences=DATA_SEQUENCES,
model_info=model_info,
test_data_dir=args.test_data_dir,
)
def resolve_arg(arg):
if isinstance(arg, str): return arg
return arg.resolve(arg_context)
def option_to_args(key, value):
if value is None: return [key]
if isinstance(value, list): return [key, *map(resolve_arg, value)]
return [key, resolve_arg(value)]
fixed_args = demo.fixed_args(demos_dir, args.demo_build_dir)
print('Fixed arguments:', ' '.join(map(shlex.quote, fixed_args)))
print()
device_args = demo.device_args(args.devices.split())
for test_case_index, test_case in enumerate(demo.test_cases):
test_case_models = get_models(test_case, demo.model_keys)
case_args = [demo_arg
for key, value in sorted(test_case.options.items())
for demo_arg in option_to_args(key, value)]
case_model_names = {arg.name for arg in list(test_case.options.values()) + test_case.extra_models if isinstance(arg, ModelArg)}
undeclared_case_model_names = case_model_names - declared_model_names
if undeclared_case_model_names:
print("Test case #{}: models not listed in demo's models.lst: {}".format(
test_case_index, ' '.join(sorted(undeclared_case_model_names))))
print()
num_failures += 1
continue
for device, dev_arg in device_args.items():
skip = False
for model in test_case_models:
if suppressed_devices and device in suppressed_devices.get(model, []):
print('Test case #{}/{}: Model {} is suppressed on device'
.format(test_case_index, device, model))
print(flush=True)
skip = True
if skip: continue
print('Test case #{}/{}:'.format(test_case_index, device),
' '.join(shlex.quote(str(arg)) for arg in dev_arg + case_args))
print(flush=True)
try:
start_time = timeit.default_timer()
output = subprocess.check_output(fixed_args + dev_arg + case_args,
stderr=subprocess.STDOUT, universal_newlines=True, encoding='utf-8',
env=demo_environment)
execution_time = timeit.default_timer() - start_time
demo.parse_output(output, test_case, device)
except subprocess.CalledProcessError as e:
print(e.output)
print('Exit code:', e.returncode)
num_failures += 1
execution_time = -1
if args.report_file:
collect_result(demo.subdirectory, device, case_model_names, execution_time, args.report_file)
print()
print("Failures: {}".format(num_failures))
sys.exit(0 if num_failures == 0 else 1)
|
58,170 |
def fetch_indicators(client: Client, tlp_color: Optional[str] = None, feed_tags: List = [], limit: int = -1,) -> List[Dict]:
"""Retrieves indicators from the feed
Args:
client (Client): Client object with request
tlp_color (str): Traffic Light Protocol color
feed_tags (list): tags to assign fetched indicators
limit (int): limit the results
Returns:
Indicators.
"""
iterator = client.build_iterator()
indicators = []
if limit > 0:
iterator = iterator[:limit]
# extract values from iterator
for item in iterator:
value_ = item.get('value')
type_ = item.get('type')
raw_data = {
'value': value_,
'type': type_,
}
# Create indicator object for each value.
# The object consists of a dictionary with required and optional keys and values, as described blow.
for key, value in item.items():
raw_data.update({key: value})
indicator_obj = {
# The indicator value.
'value': value_,
# The indicator type as defined in Cortex XSOAR.
# One can use the FeedIndicatorType class under CommonServerPython to populate this field.
'type': type_,
# The name of the service supplying this feed.
'service': 'Snort IP Blocklist',
# A dictionary that maps values to existing indicator fields defined in Cortex XSOAR.
# One can use this section in order to map custom indicator fields previously defined
# in Cortex XSOAR to their values.
'fields': {},
# A dictionary of the raw data returned from the feed source about the indicator.
'rawJSON': raw_data
}
if feed_tags:
indicator_obj['fields']['tags'] = feed_tags
if tlp_color:
indicator_obj['fields']['trafficlightprotocol'] = tlp_color
indicators.append(indicator_obj)
return indicators
|
def fetch_indicators(client: Client, tlp_color: Optional[str] = None, feed_tags: List = [], limit: int = -1,) -> List[Dict]:
"""Retrieves indicators from the feed
Args:
client (Client): Client object with request
tlp_color (str): Traffic Light Protocol color
feed_tags (list): tags to assign fetched indicators
limit (int): limit the results
Returns:
Indicators.
"""
iterator = client.build_iterator()
indicators = []
if limit > 0:
iterator = iterator[:limit]
# extract values from iterator
for item in iterator:
value_ = item.get('value')
type_ = item.get('type')
raw_data = {
'value': value_,
'type': type_,
}
# Create an indicator object for each value.
# The object consists of a dictionary with required and optional keys and values, as described below.
for key, value in item.items():
raw_data.update({key: value})
indicator_obj = {
# The indicator value.
'value': value_,
# The indicator type as defined in Cortex XSOAR.
# One can use the FeedIndicatorType class under CommonServerPython to populate this field.
'type': type_,
# The name of the service supplying this feed.
'service': 'Snort IP Blocklist',
# A dictionary that maps values to existing indicator fields defined in Cortex XSOAR.
# One can use this section in order to map custom indicator fields previously defined
# in Cortex XSOAR to their values.
'fields': {},
# A dictionary of the raw data returned from the feed source about the indicator.
'rawJSON': raw_data
}
if feed_tags:
indicator_obj['fields']['tags'] = feed_tags
if tlp_color:
indicator_obj['fields']['trafficlightprotocol'] = tlp_color
indicators.append(indicator_obj)
return indicators
|
15,594 |
def configure_mydevolo(conf: dict) -> Mydevolo:
"""Configure mydevolo."""
mydevolo = Mydevolo()
mydevolo.user = conf[CONF_USERNAME]
mydevolo.password = conf[CONF_PASSWORD]
try:
mydevolo.url = conf[CONF_MYDEVOLO]
except KeyError:
mydevolo.url = DEFAULT_MYDEVOLO
return mydevolo
|
def configure_mydevolo(conf: dict) -> Mydevolo:
"""Configure mydevolo."""
mydevolo = Mydevolo()
mydevolo.user = conf[CONF_USERNAME]
mydevolo.password = conf[CONF_PASSWORD]
mydevolo.url = conf.get(CONF_MYDEVOLO, DEFAULT_MYDEVOLO)
return mydevolo
|
29,586 |
def error_message(scheduler, workers, client, client_name="client"):
from .utils import asciitable
MISSING, UNKNOWN = "MISSING", "UNKNOWN"
scheduler_name = "scheduler"
nodes = {**{client_name: client}, **{scheduler_name: scheduler}, **workers}
# Hold all versions, e.g. versions["scheduler"]["distributed"] = 2.9.3
node_packages = defaultdict(dict)
# Collect all package versions
packages = set()
for node, info in nodes.items():
if info is None or not (isinstance(info, dict)) or "packages" not in info:
node_packages[node] = defaultdict(lambda: UNKNOWN)
else:
node_packages[node] = defaultdict(lambda: MISSING)
for pkg, version in info["packages"].items():
node_packages[node][pkg] = version
packages.add(pkg)
errs = []
for pkg in sorted(packages):
versions = set(node_packages[node][pkg] for node in nodes)
if len(versions) <= 1:
continue
rows = [
(node_name, node_packages[node_name][pkg]) for node_name in nodes.keys()
]
errs.append("%s\n%s" % (pkg, asciitable(["", "version"], rows)))
if errs:
return "Mismatched versions found\n" "\n" "%s" % ("\n\n".join(errs))
else:
return ""
|
def error_message(scheduler, workers, client, client_name="client"):
from .utils import asciitable
MISSING, UNKNOWN = "MISSING", "UNKNOWN"
scheduler_name = "scheduler"
nodes = {client_name: client, "scheduler": scheduler, **workers}
# Hold all versions, e.g. versions["scheduler"]["distributed"] = 2.9.3
node_packages = defaultdict(dict)
# Collect all package versions
packages = set()
for node, info in nodes.items():
if info is None or not (isinstance(info, dict)) or "packages" not in info:
node_packages[node] = defaultdict(lambda: UNKNOWN)
else:
node_packages[node] = defaultdict(lambda: MISSING)
for pkg, version in info["packages"].items():
node_packages[node][pkg] = version
packages.add(pkg)
errs = []
for pkg in sorted(packages):
versions = set(node_packages[node][pkg] for node in nodes)
if len(versions) <= 1:
continue
rows = [
(node_name, node_packages[node_name][pkg]) for node_name in nodes.keys()
]
errs.append("%s\n%s" % (pkg, asciitable(["", "version"], rows)))
if errs:
return "Mismatched versions found\n" "\n" "%s" % ("\n\n".join(errs))
else:
return ""
|
9,563 |
def main():
argument_spec = dict(
autoscaling_group_name=dict(type='str', required=True),
scheduled_action_name=dict(type='str', required=True),
start_time=dict(type='str', default=None),
end_time=dict(type='str', default=None),
recurrence=dict(type='str', required=True),
min_size=dict(type='int', default=None),
max_size=dict(type='int', default=None),
desired_capacity=dict(type='int', required=True),
state=dict(type='str', default='present', choices=['present', 'absent'])
)
module = AnsibleAWSModule(argument_spec=argument_spec)
if not HAS_BOTO3:
module.fail_json(msg='json and boto3 required for this module')
try:
client = module.client('autoscaling', retry_decorator=AWSRetry.jittered_backoff())
except (ClientError, BotoCoreError) as e:
module.fail_json(msg='{0}'.format(e))
state = module.params.get('state')
if state == 'present':
(changed, results) = put_scheduled_update_group_action(client, module)
else:
(changed, results) = delete_scheduled_action(client, module)
module.exit_json(changed=changed, scheduled_action=results)
|
def main():
argument_spec = dict(
autoscaling_group_name=dict(type='str', required=True),
scheduled_action_name=dict(type='str', required=True),
start_time=dict(type='str', default=None),
end_time=dict(type='str', default=None),
recurrence=dict(type='str', required=True),
min_size=dict(type='int', default=None),
max_size=dict(type='int', default=None),
desired_capacity=dict(type='int', required=True),
state=dict(type='str', default='present', choices=['present', 'absent'])
)
module = AnsibleAWSModule(argument_spec=argument_spec)
if not HAS_BOTO3:
module.fail_json(msg='json and boto3 required for this module')
try:
client = module.client('autoscaling', retry_decorator=AWSRetry.jittered_backoff())
except (ClientError, BotoCoreError) as e:
module.fail_json_aws(e,msg='Failed to get boto3 client')
state = module.params.get('state')
if state == 'present':
(changed, results) = put_scheduled_update_group_action(client, module)
else:
(changed, results) = delete_scheduled_action(client, module)
module.exit_json(changed=changed, scheduled_action=results)
|
37,815 |
def execute_cmd(
docker: DockerContainer,
cmd_str: str,
before_build: bool,
target_arch: str,
env: Optional[Dict[str, str]] = None,
) -> None:
invalid_cmd = False
pip_install_env_create = True
tmpdirpath = ""
assert env is not None
target_arch_env = TargetArchEnvUtil(env.get("CROSS_ROOT"), target_arch)
cmds = [cmd.strip().replace("\t", " ") for cmd in cmd_str.split("&&")]
# Copy install_deps.sh script from container's tmp to host machine tmp and use it
if not os.path.isfile(target_arch_env.tmp + "/install_deps.sh"):
docker.call(
[
"cp",
target_arch_env.tmp + "/install_deps.sh",
target_arch_env.host_machine_tmp_in_container,
]
)
for cmd in cmds:
if cmd.startswith("yum "):
# Install the dependencies into the emulated docker container and
# Copy back the installed files into host machine
print(
"\nRunning cmd: '"
+ cmd
+ "' in target's native container '"
+ native_docker_images[target_arch]
+ "' and copy the artifacts into the toolchain\n"
)
subprocess.run(
[
"docker",
"run",
"--rm",
"--volume=/:/host", # ignored on CircleCI
native_docker_images[target_arch],
"bash",
"-c",
target_arch_env.host_machine_tmp_in_container
+ '/install_deps.sh "'
+ cmd
+ '"',
],
check=True,
)
# The instaleld dependencies are in /tmp/install_deps on host machine.
# Copy them into the toolchain
dir_list = os.listdir(target_arch_env.host_machine_deps_usr_out_container)
for dir in dir_list:
docker.call(
[
"cp",
"-rf",
target_arch_env.host_machine_deps_usr_in_container + "/" + dir,
target_arch_env.toolchain_deps,
]
)
elif cmd.startswith("pip ") or cmd.startswith("python ") or cmd.startswith("python3 "):
if pip_install_env_create is True and before_build is True:
tmpdirpath = docker.call(["mktemp", "-d"], capture_output=True).strip()
# Adding temp directory in PATH
env_path_var = env.get("PATH")
env_path_var = f"{tmpdirpath}:{env_path_var}"
temp_dict = {"PATH": env_path_var}
env.update(temp_dict)
build_pip = docker.call(
["which", "build-pip"], env=env, capture_output=True
).strip()
build_pybin = build_pip[: build_pip.rindex("/")]
docker.call(["ln", "-s", build_pip, tmpdirpath + "/pip"], env=env)
docker.call(
["ln", "-s", build_pybin + "/build-pip3", tmpdirpath + "/pip3"], env=env
)
docker.call(
["ln", "-s", build_pybin + "/build-python", tmpdirpath + "/python"], env=env
)
docker.call(
["ln", "-s", build_pybin + "/build-python3", tmpdirpath + "/python3"], env=env
)
pip_install_env_create = False
docker.call(["sh", "-c", cmd], env=env)
else:
print(
"During cross compilation, in wheel build phase, only pip/python/yum related commands are allowed"
)
invalid_cmd = True
break
docker.call(["rm", "-rf", tmpdirpath])
if invalid_cmd is True:
sys.exit(1)
|
def execute_cmd(
docker: DockerContainer,
cmd_str: str,
before_build: bool,
target_arch: str,
env: Optional[Dict[str, str]] = None,
) -> None:
invalid_cmd = False
pip_install_env_create = True
tmpdirpath = ""
assert env is not None
target_arch_env = TargetArchEnvUtil(env.get("CROSS_ROOT"), target_arch)
cmds = [cmd.strip().replace("\t", " ") for cmd in cmd_str.split("&&")]
# Copy install_deps.sh script from container's tmp to host machine tmp and use it
if not os.path.isfile(target_arch_env.tmp + "/install_deps.sh"):
docker.call(
[
"cp",
target_arch_env.tmp + "/install_deps.sh",
target_arch_env.host_machine_tmp_in_container,
]
)
for cmd in cmds:
if cmd.startswith("yum "):
# Install the dependencies into the emulated docker container and
# Copy back the installed files into host machine
print(
"\nRunning cmd: '"
+ cmd
+ "' in target's native container '"
+ native_docker_images[target_arch]
+ "' and copy the artifacts into the toolchain\n"
)
subprocess.run(
[
"docker",
"run",
"--rm",
"--volume=/:/host", # ignored on CircleCI
native_docker_images[target_arch],
"bash",
"-c",
target_arch_env.host_machine_tmp_in_container
+ '/install_deps.sh "'
+ cmd
+ '"',
],
check=True,
)
# The instaleld dependencies are in /tmp/install_deps on host machine.
# Copy them into the toolchain
dir_list = os.listdir(target_arch_env.host_machine_deps_usr_out_container)
for dir in dir_list:
docker.call(
[
"cp",
"-rf",
target_arch_env.host_machine_deps_usr_in_container + "/" + dir,
target_arch_env.toolchain_deps,
]
)
elif cmd.startswith("pip ") or cmd.startswith("python ") or cmd.startswith("python3 "):
if pip_install_env_create and before_build:
tmpdirpath = docker.call(["mktemp", "-d"], capture_output=True).strip()
# Adding temp directory in PATH
env_path_var = env.get("PATH")
env_path_var = f"{tmpdirpath}:{env_path_var}"
temp_dict = {"PATH": env_path_var}
env.update(temp_dict)
build_pip = docker.call(
["which", "build-pip"], env=env, capture_output=True
).strip()
build_pybin = build_pip[: build_pip.rindex("/")]
docker.call(["ln", "-s", build_pip, tmpdirpath + "/pip"], env=env)
docker.call(
["ln", "-s", build_pybin + "/build-pip3", tmpdirpath + "/pip3"], env=env
)
docker.call(
["ln", "-s", build_pybin + "/build-python", tmpdirpath + "/python"], env=env
)
docker.call(
["ln", "-s", build_pybin + "/build-python3", tmpdirpath + "/python3"], env=env
)
pip_install_env_create = False
docker.call(["sh", "-c", cmd], env=env)
else:
print(
"During cross compilation, in wheel build phase, only pip/python/yum related commands are allowed"
)
invalid_cmd = True
break
docker.call(["rm", "-rf", tmpdirpath])
if invalid_cmd is True:
sys.exit(1)
|
24,796 |
def test_infer_node_3():
"""Return a set containing an nodes.ClassDef object when the attribute
has a type annotation"""
node = astroid.extract_node(
"""
class Component:
pass
class Composite:
def __init__(self, component: Component):
self.component = component
"""
)
instance_attr = node.instance_attrs.get("component")[0]
assert isinstance(infer_node(instance_attr), set)
assert isinstance(infer_node(instance_attr).pop(), nodes.ClassDef)
|
def test_infer_node_3():
"""Return a set containing a nodes.ClassDef object when the attribute
has a type annotation"""
node = astroid.extract_node(
"""
class Component:
pass
class Composite:
def __init__(self, component: Component):
self.component = component
"""
)
instance_attr = node.instance_attrs.get("component")[0]
assert isinstance(infer_node(instance_attr), set)
assert isinstance(infer_node(instance_attr).pop(), nodes.ClassDef)
|
23,149 |
def read_hdf(
pattern,
key,
start=0,
stop=None,
columns=None,
chunksize=1000000,
sorted_index=False,
lock=True,
mode="r",
):
"""
Read HDF files into a Dask DataFrame
Read hdf files into a dask dataframe. This function is like
``pandas.read_hdf``, except it can read from a single large file, or from
multiple files, or from multiple keys from the same file.
Parameters
----------
pattern : string, pathlib.Path, list
File pattern (string), pathlib.Path, buffer to read from, or list of
file paths. Can contain wildcards.
key : group identifier in the store. Can contain wildcards
start : optional, integer (defaults to 0), row number to start at
stop : optional, integer (defaults to None, the last row), row number to
stop at
columns : list of columns, optional
A list of columns that if not None, will limit the return
columns (default is None)
chunksize : positive integer, optional
Maximal number of rows per partition (default is 1000000).
sorted_index : boolean, optional
Option to specify whether or not the input hdf files have a sorted
index (default is False).
lock : boolean, optional
Option to use a lock to prevent concurrency issues (default is True).
mode : {'a', 'r', 'r+'}, default 'r'. Mode to use when opening file(s).
'r'
Read-only; no data can be modified.
'a'
Append; an existing file is opened for reading and writing,
and if the file does not exist it is created.
'r+'
It is similar to 'a', but the file must already exist.
Returns
-------
dask.DataFrame
Examples
--------
Load single file
>>> dd.read_hdf('myfile.1.hdf5', '/x') # doctest: +SKIP
Load multiple files
>>> dd.read_hdf('myfile.*.hdf5', '/x') # doctest: +SKIP
>>> dd.read_hdf(['myfile.1.hdf5', 'myfile.2.hdf5'], '/x') # doctest: +SKIP
Load multiple datasets
>>> dd.read_hdf('myfile.1.hdf5', '/*') # doctest: +SKIP
"""
if lock is True:
lock = get_scheduler_lock()
key = key if key.startswith("/") else "/" + key
# Convert path-like objects to a string
pattern = stringify_path(pattern)
if isinstance(pattern, str):
paths = sorted(glob(pattern))
else:
paths = pattern
if not isinstance(pattern, str) and len(paths) == 0:
raise ValueError("No files provided")
if not paths or len(paths) == 0:
raise OSError(f"File(s) not found: {pattern}")
for path in paths:
try:
exists = os.path.exists(path)
except (ValueError, TypeError):
exists = False
if not exists:
raise OSError(f"File not found or insufficient permissions: {path}")
if (start != 0 or stop is not None) and len(paths) > 1:
raise NotImplementedError(read_hdf_error_msg)
if chunksize <= 0:
raise ValueError("Chunksize must be a positive integer")
if (start != 0 or stop is not None) and sorted_index:
raise ValueError(
"When assuming pre-partitioned data, data must be "
"read in its entirety using the same chunksizes"
)
# Build metadata
with pd.HDFStore(paths[0], mode=mode) as hdf:
meta_key = _expand_key(key, hdf)[0]
try:
meta = pd.read_hdf(paths[0], meta_key, mode=mode, stop=0)
except IndexError: # if file is empty, don't set stop
meta = pd.read_hdf(paths[0], meta_key, mode=mode)
if columns is not None:
meta = meta[columns]
# Common kwargs
if meta.ndim == 1:
common_kwargs = {"name": meta.name, "mode": mode}
else:
common_kwargs = {"mode": mode}
# Build parts
parts, divisions = _build_parts(
paths, key, start, stop, chunksize, sorted_index, mode
)
# Construct the output collection with from_map
return from_map(
HDFFunctionWrapper(columns, meta.ndim, lock, common_kwargs),
parts,
meta=meta,
divisions=divisions,
label="read-hdf-",
token=tokenize(paths, key, start, stop, sorted_index, chunksize, mode),
enforce_metadata=False,
)
|
def read_hdf(
pattern,
key,
start=0,
stop=None,
columns=None,
chunksize=1000000,
sorted_index=False,
lock=True,
mode="r",
):
"""
Read HDF files into a Dask DataFrame
Read hdf files into a dask dataframe. This function is like
``pandas.read_hdf``, except it can read from a single large file, or from
multiple files, or from multiple keys from the same file.
Parameters
----------
pattern : string, pathlib.Path, list
File pattern (string), pathlib.Path, buffer to read from, or list of
file paths. Can contain wildcards.
key : group identifier in the store. Can contain wildcards
start : optional, integer (defaults to 0), row number to start at
stop : optional, integer (defaults to None, the last row), row number to
stop at
columns : list of columns, optional
A list of columns that if not None, will limit the return
columns (default is None)
chunksize : positive integer, optional
Maximal number of rows per partition (default is 1000000).
sorted_index : boolean, optional
Option to specify whether or not the input hdf files have a sorted
index (default is False).
lock : boolean, optional
Option to use a lock to prevent concurrency issues (default is True).
mode : {'a', 'r', 'r+'}, default 'r'. Mode to use when opening file(s).
'r'
Read-only; no data can be modified.
'a'
Append; an existing file is opened for reading and writing,
and if the file does not exist it is created.
'r+'
It is similar to 'a', but the file must already exist.
Returns
-------
dask.DataFrame
Examples
--------
Load single file
>>> dd.read_hdf('myfile.1.hdf5', '/x') # doctest: +SKIP
Load multiple files
>>> dd.read_hdf('myfile.*.hdf5', '/x') # doctest: +SKIP
>>> dd.read_hdf(['myfile.1.hdf5', 'myfile.2.hdf5'], '/x') # doctest: +SKIP
Load multiple datasets
>>> dd.read_hdf('myfile.1.hdf5', '/*') # doctest: +SKIP
"""
if lock is True:
lock = get_scheduler_lock()
key = key if key.startswith("/") else "/" + key
# Convert path-like objects to a string
pattern = stringify_path(pattern)
if isinstance(pattern, str):
paths = sorted(glob(pattern))
else:
paths = pattern
if not isinstance(pattern, str) and len(paths) == 0:
raise ValueError("No files provided")
if not paths or len(paths) == 0:
raise OSError(f"File(s) not found: {pattern}")
for path in paths:
try:
exists = os.path.exists(path)
except (ValueError, TypeError):
exists = False
if not exists:
raise OSError(f"File not found or insufficient permissions: {path}")
if (start != 0 or stop is not None) and len(paths) > 1:
raise NotImplementedError(read_hdf_error_msg)
if chunksize <= 0:
raise ValueError("Chunksize must be a positive integer")
if (start != 0 or stop is not None) and sorted_index:
raise ValueError(
"When assuming pre-partitioned data, data must be "
"read in its entirety using the same chunksizes"
)
# Build metadata
with pd.HDFStore(paths[0], mode=mode) as hdf:
meta_key = _expand_key(key, hdf)[0]
try:
meta = pd.read_hdf(paths[0], meta_key, mode=mode, stop=0)
except IndexError: # if file is empty, don't set stop
meta = pd.read_hdf(paths[0], meta_key, mode=mode)
if columns is not None:
meta = meta[columns]
# Common kwargs
if meta.ndim == 1:
common_kwargs = {"name": meta.name, "mode": mode}
else:
common_kwargs = {"mode": mode}
# Build parts
parts, divisions = _build_parts(
paths, key, start, stop, chunksize, sorted_index, mode
)
# Construct the output collection with from_map
return from_map(
HDFFunctionWrapper(columns, meta.ndim, lock, common_kwargs),
parts,
meta=meta,
divisions=divisions,
label="read-hdf",
token=tokenize(paths, key, start, stop, sorted_index, chunksize, mode),
enforce_metadata=False,
)
|
28,560 |
def plot_khat(
hover_label, # pylint: disable=unused-argument
hover_format, # pylint: disable=unused-argument
ax,
figsize,
xdata,
khats,
kwargs,
annotate,
coord_labels,
show_bins,
hlines_kwargs, # pylint: disable=unused-argument
xlabels, # pylint: disable=unused-argument
legend, # pylint: disable=unused-argument
color,
dims,
textsize,
markersize, # pylint: disable=unused-argument
n_data_points,
bin_format,
backend_kwargs,
show,
):
"""Bokeh khat plot."""
if backend_kwargs is None:
backend_kwargs = {}
backend_kwargs = {
**backend_kwarg_defaults(("dpi", "plot.bokeh.figure.dpi"),),
**backend_kwargs,
}
dpi = backend_kwargs.pop("dpi")
(figsize, *_, line_width, _) = _scale_fig_size(figsize, textsize)
cmap = None
if isinstance(color, str):
if color in dims:
colors, _ = color_from_dim(khats, color)
cmap_name = kwargs.get("cmap", plt.rcParams["image.cmap"])
cmap = getattr(cm, cmap_name)
rgba_c = cmap(colors)
else:
legend = False
rgba_c = to_rgba_array(np.full(n_data_points, color))
else:
legend = False
try:
rgba_c = to_rgba_array(color)
except ValueError:
cmap_name = kwargs.get("cmap", plt.rcParams["image.cmap"])
cmap = getattr(cm, cmap_name)
rgba_c = cmap(color)
khats = khats if isinstance(khats, np.ndarray) else khats.values.flatten()
alphas = 0.5 + 0.2 * (khats > 0.5) + 0.3 * (khats > 1)
rgba_c[:, 3] = alphas
rgba_c = vectorized_to_hex(rgba_c)
if ax is None:
backend_kwargs.setdefault("width", int(figsize[0] * dpi))
backend_kwargs.setdefault("height", int(figsize[1] * dpi))
ax = bkp.figure(**backend_kwargs)
if not isinstance(rgba_c, str) and isinstance(rgba_c, Iterable):
for idx, rgba_c_ in enumerate(rgba_c):
ax.cross(xdata[idx], khats[idx], line_color=rgba_c_, fill_color=rgba_c_, size=10)
else:
ax.cross(xdata, khats, line_color=rgba_c, fill_color=rgba_c, size=10)
if annotate:
idxs = xdata[khats > 1]
for idx in idxs:
ax.text(x=[idx], y=[khats[idx]], text=[coord_labels[idx]])
for hline in [0, 0.5, 0.7, 1]:
_hline = Span(
location=hline,
dimension="width",
line_color="grey",
line_width=line_width,
line_dash="dashed",
)
ax.renderers.append(_hline)
ymin = min(khats)
ymax = max(khats)
xmax = len(khats)
if show_bins:
bin_edges = np.array([ymin, 0.5, 0.7, 1, ymax])
bin_edges = bin_edges[(bin_edges >= ymin) & (bin_edges <= ymax)]
hist, _, _ = histogram(khats, bin_edges)
for idx, count in enumerate(hist):
ax.text(
x=[(n_data_points - 1 + xmax) / 2],
y=[np.mean(bin_edges[idx : idx + 2])],
text=[bin_format.format(count, count / n_data_points * 100)],
)
ax.x_range._property_values["end"] = xmax + 1 # pylint: disable=protected-access
ax.xaxis.axis_label = "Data Point"
ax.yaxis.axis_label = "Shape parameter k"
if ymin > 0:
ax.y_range._property_values["start"] = -0.02 # pylint: disable=protected-access
if ymax < 1:
ax.y_range._property_values["end"] = 1.02 # pylint: disable=protected-access
elif ymax > 1 & annotate:
ax.y_range._property_values["end"] = 1.1 * ymax # pylint: disable=protected-access
show_layout(ax, show)
return ax
|
def plot_khat(
hover_label, # pylint: disable=unused-argument
hover_format, # pylint: disable=unused-argument
ax,
figsize,
xdata,
khats,
kwargs,
annotate,
coord_labels,
show_bins,
hlines_kwargs, # pylint: disable=unused-argument
xlabels, # pylint: disable=unused-argument
legend, # pylint: disable=unused-argument
color,
dims,
textsize,
markersize, # pylint: disable=unused-argument
n_data_points,
bin_format,
backend_kwargs,
show,
):
"""Bokeh khat plot."""
if backend_kwargs is None:
backend_kwargs = {}
backend_kwargs = {
**backend_kwarg_defaults(("dpi", "plot.bokeh.figure.dpi"),),
**backend_kwargs,
}
dpi = backend_kwargs.pop("dpi")
(figsize, *_, line_width, _) = _scale_fig_size(figsize, textsize)
cmap = None
if isinstance(color, str):
if color in dims:
colors, _ = color_from_dim(khats, color)
cmap_name = kwargs.get("cmap", plt.rcParams["image.cmap"])
cmap = getattr(cm, cmap_name)
rgba_c = cmap(colors)
else:
legend = False
rgba_c = to_rgba_array(np.full(n_data_points, color))
else:
legend = False
try:
rgba_c = to_rgba_array(color)
except ValueError:
cmap_name = kwargs.get("cmap", plt.rcParams["image.cmap"])
cmap = getattr(cm, cmap_name)
rgba_c = cmap(color)
khats = khats if isinstance(khats, np.ndarray) else khats.values.flatten()
alphas = 0.5 + 0.2 * (khats > 0.5) + 0.3 * (khats > 1)
rgba_c[:, 3] = alphas
rgba_c = vectorized_to_hex(rgba_c, keep_alpha=True)
if ax is None:
backend_kwargs.setdefault("width", int(figsize[0] * dpi))
backend_kwargs.setdefault("height", int(figsize[1] * dpi))
ax = bkp.figure(**backend_kwargs)
if not isinstance(rgba_c, str) and isinstance(rgba_c, Iterable):
for idx, rgba_c_ in enumerate(rgba_c):
ax.cross(xdata[idx], khats[idx], line_color=rgba_c_, fill_color=rgba_c_, size=10)
else:
ax.cross(xdata, khats, line_color=rgba_c, fill_color=rgba_c, size=10)
if annotate:
idxs = xdata[khats > 1]
for idx in idxs:
ax.text(x=[idx], y=[khats[idx]], text=[coord_labels[idx]])
for hline in [0, 0.5, 0.7, 1]:
_hline = Span(
location=hline,
dimension="width",
line_color="grey",
line_width=line_width,
line_dash="dashed",
)
ax.renderers.append(_hline)
ymin = min(khats)
ymax = max(khats)
xmax = len(khats)
if show_bins:
bin_edges = np.array([ymin, 0.5, 0.7, 1, ymax])
bin_edges = bin_edges[(bin_edges >= ymin) & (bin_edges <= ymax)]
hist, _, _ = histogram(khats, bin_edges)
for idx, count in enumerate(hist):
ax.text(
x=[(n_data_points - 1 + xmax) / 2],
y=[np.mean(bin_edges[idx : idx + 2])],
text=[bin_format.format(count, count / n_data_points * 100)],
)
ax.x_range._property_values["end"] = xmax + 1 # pylint: disable=protected-access
ax.xaxis.axis_label = "Data Point"
ax.yaxis.axis_label = "Shape parameter k"
if ymin > 0:
ax.y_range._property_values["start"] = -0.02 # pylint: disable=protected-access
if ymax < 1:
ax.y_range._property_values["end"] = 1.02 # pylint: disable=protected-access
elif ymax > 1 & annotate:
ax.y_range._property_values["end"] = 1.1 * ymax # pylint: disable=protected-access
show_layout(ax, show)
return ax
|
49,204 |
def setup_databases(verbosity, interactive, *, time_keeper=None, keepdb=False, debug_sql=False, parallel=0,
aliases=None, serialize_aliases=None):
"""Create the test databases."""
if time_keeper is None:
time_keeper = NullTimeKeeper()
test_databases, mirrored_aliases = get_unique_databases_and_mirrors(aliases)
old_names = []
for db_name, aliases in test_databases.values():
first_alias = None
for alias in aliases:
connection = connections[alias]
old_names.append((connection, db_name, first_alias is None))
# Actually create the database for the first connection
if first_alias is None:
first_alias = alias
with time_keeper.timed(" Creating '%s'" % alias):
try:
serialize_alias = connection.settings_dict['TEST']['SERIALIZE']
except KeyError:
serialize_alias = serialize_aliases is None or alias in serialize_aliases
else:
warnings.warn(
'The `SERIALIZE` test database setting is deprecated '
'as it can be inferred from the `databases` of the discovered '
'`TransactionTestCase` that enable the `serialized_rollback` '
'feature. It will be completely ignored from Django 5.0.',
category=RemovedInDjango50Warning
)
connection.creation.create_test_db(
verbosity=verbosity,
autoclobber=not interactive,
keepdb=keepdb,
serialize=serialize_alias,
)
if parallel > 1:
for index in range(parallel):
with time_keeper.timed(" Cloning '%s'" % alias):
connection.creation.clone_test_db(
suffix=str(index + 1),
verbosity=verbosity,
keepdb=keepdb,
)
# Configure all other connections as mirrors of the first one
else:
connections[alias].creation.set_as_test_mirror(connections[first_alias].settings_dict)
# Configure the test mirrors.
for alias, mirror_alias in mirrored_aliases.items():
connections[alias].creation.set_as_test_mirror(
connections[mirror_alias].settings_dict)
if debug_sql:
for alias in connections:
connections[alias].force_debug_cursor = True
return old_names
|
def setup_databases(verbosity, interactive, *, time_keeper=None, keepdb=False, debug_sql=False, parallel=0,
aliases=None, serialize_aliases=None):
"""Create the test databases."""
if time_keeper is None:
time_keeper = NullTimeKeeper()
test_databases, mirrored_aliases = get_unique_databases_and_mirrors(aliases)
old_names = []
for db_name, aliases in test_databases.values():
first_alias = None
for alias in aliases:
connection = connections[alias]
old_names.append((connection, db_name, first_alias is None))
# Actually create the database for the first connection
if first_alias is None:
first_alias = alias
with time_keeper.timed(" Creating '%s'" % alias):
try:
serialize_alias = connection.settings_dict['TEST']['SERIALIZE']
except KeyError:
serialize_alias = serialize_aliases is None or alias in serialize_aliases
else:
warnings.warn(
'The SERIALIZE test database setting is '
'deprecated as it can be inferred from the '
'TransactionTestCase.databases that enable the '
'serialized_rollback feature.',
category=RemovedInDjango50Warning
)
connection.creation.create_test_db(
verbosity=verbosity,
autoclobber=not interactive,
keepdb=keepdb,
serialize=serialize_alias,
)
if parallel > 1:
for index in range(parallel):
with time_keeper.timed(" Cloning '%s'" % alias):
connection.creation.clone_test_db(
suffix=str(index + 1),
verbosity=verbosity,
keepdb=keepdb,
)
# Configure all other connections as mirrors of the first one
else:
connections[alias].creation.set_as_test_mirror(connections[first_alias].settings_dict)
# Configure the test mirrors.
for alias, mirror_alias in mirrored_aliases.items():
connections[alias].creation.set_as_test_mirror(
connections[mirror_alias].settings_dict)
if debug_sql:
for alias in connections:
connections[alias].force_debug_cursor = True
return old_names
|
14,054 |
def _read_feather(path, columns=None, **kwargs):
"""
Load a Feather object from the file path, returning a GeoDataFrame.
You can read a subset of columns in the file using the ``columns`` parameter.
However, the structure of the returned GeoDataFrame will depend on which
columns you read:
* if no geometry columns are read, this will raise a ``ValueError`` - you
should use the pandas `read_feather` method instead.
* if the primary geometry column saved to this file is not included in
columns, the first available geometry column will be set as the geometry
column of the returned GeoDataFrame.
Requires 'pyarrow' >= 0.17.
.. versionadded:: 0.8
Parameters
----------
path : str, path object
columns : list-like of strings, default=None
If not None, only these columns will be read from the file. If
the primary geometry column is not included, the first secondary
geometry read from the file will be set as the geometry column
of the returned GeoDataFrame. If no geometry columns are present,
a ``ValueError`` will be raised.
**kwargs
Any additional kwargs passed to pyarrow.feather.read_table().
Returns
-------
GeoDataFrame
Examples
--------
>>> df = geopandas.read_feather("data.feather) # doctest: +SKIP
Specifying columns to read:
>>> df = geopandas.read_feather(
... "data.feather,
... columns=["geometry", "pop_est"]
... ) # doctest: +SKIP
"""
feather = import_optional_dependency(
"pyarrow.feather", extra="pyarrow is required for Feather support."
)
# TODO move this into `import_optional_dependency`
import pyarrow
if pyarrow.__version__ < LooseVersion("0.17.0"):
raise ImportError("pyarrow >= 0.17 required for Feather support")
table = feather.read_table(path, columns=columns, **kwargs)
return _arrow_to_geopandas(table)
|
def _read_feather(path, columns=None, **kwargs):
"""
Load a Feather object from the file path, returning a GeoDataFrame.
You can read a subset of columns in the file using the ``columns`` parameter.
However, the structure of the returned GeoDataFrame will depend on which
columns you read:
* if no geometry columns are read, this will raise a ``ValueError`` - you
should use the pandas `read_feather` method instead.
* if the primary geometry column saved to this file is not included in
columns, the first available geometry column will be set as the geometry
column of the returned GeoDataFrame.
Requires 'pyarrow' >= 0.17.
.. versionadded:: 0.8
Parameters
----------
path : str, path object
columns : list-like of strings, default=None
If not None, only these columns will be read from the file. If
the primary geometry column is not included, the first secondary
geometry read from the file will be set as the geometry column
of the returned GeoDataFrame. If no geometry columns are present,
a ``ValueError`` will be raised.
**kwargs
Any additional kwargs passed to pyarrow.feather.read_table().
Returns
-------
GeoDataFrame
Examples
--------
>>> df = geopandas.read_feather("data.feather") # doctest: +SKIP
Specifying columns to read:
>>> df = geopandas.read_feather(
... "data.feather,
... columns=["geometry", "pop_est"]
... ) # doctest: +SKIP
"""
feather = import_optional_dependency(
"pyarrow.feather", extra="pyarrow is required for Feather support."
)
# TODO move this into `import_optional_dependency`
import pyarrow
if pyarrow.__version__ < LooseVersion("0.17.0"):
raise ImportError("pyarrow >= 0.17 required for Feather support")
table = feather.read_table(path, columns=columns, **kwargs)
return _arrow_to_geopandas(table)
|
9,394 |
def main():
ssh_defaults = dict(
bits=0,
type='rsa',
passphrase=None,
comment='ansible-generated on %s' % socket.gethostname()
)
module = AnsibleModule(
argument_spec=dict(
state=dict(type='str', default='present', choices=['absent', 'present']),
name=dict(type='str', required=True, aliases=['user']),
uid=dict(type='str'),
non_unique=dict(type='bool', default=False),
group=dict(type='str'),
groups=dict(type='list'),
comment=dict(type='str'),
home=dict(type='path'),
shell=dict(type='str'),
password=dict(type='str', no_log=True),
login_class=dict(type='str'),
# following options are specific to macOS
hidden=dict(type='bool'),
# following options are specific to selinux
seuser=dict(type='str'),
# following options are specific to userdel
force=dict(type='bool', default=False),
remove=dict(type='bool', default=False),
# following options are specific to useradd
create_home=dict(type='bool', default=True, aliases=['createhome']),
skeleton=dict(type='str'),
system=dict(type='bool', default=False),
# following options are specific to usermod
move_home=dict(type='bool', default=False),
append=dict(type='bool', default=False),
# following are specific to ssh key generation
generate_ssh_key=dict(type='bool'),
ssh_key_bits=dict(type='int', default=ssh_defaults['bits']),
ssh_key_type=dict(type='str', default=ssh_defaults['type']),
ssh_key_file=dict(type='path'),
ssh_key_comment=dict(type='str', default=ssh_defaults['comment']),
ssh_key_passphrase=dict(type='str', no_log=True),
update_password=dict(type='str', default='always', choices=['always', 'on_create']),
expires=dict(type='float'),
password_lock=dict(type='bool'),
local=dict(type='bool'),
profile=dict(type='str'),
authorization=dict(type='str'),
role=dict(type='str'),
),
supports_check_mode=True
)
user = User(module)
user.check_password_encrypted()
module.debug('User instantiated - platform %s' % user.platform)
if user.distribution:
module.debug('User instantiated - distribution %s' % user.distribution)
rc = None
out = ''
err = ''
result = {}
result['name'] = user.name
result['state'] = user.state
if user.state == 'absent':
if user.user_exists():
if module.check_mode:
module.exit_json(changed=True)
(rc, out, err) = user.remove_user()
if rc != 0:
module.fail_json(name=user.name, msg=err, rc=rc)
result['force'] = user.force
result['remove'] = user.remove
result['diff'] = {
'before': 'user exists\n',
'after': 'user removed\n',
}
elif user.state == 'present':
if not user.user_exists():
if module.check_mode:
module.exit_json(changed=True)
(rc, out, err) = user.create_user()
if module.check_mode:
result['system'] = user.name
else:
result['system'] = user.system
result['create_home'] = user.create_home
result['diff'] = {
'before': 'user does not exist\n',
'after': 'user created\n',
}
else:
# modify user (note: this function is check mode aware)
(rc, out, err) = user.modify_user()
result['append'] = user.append
result['move_home'] = user.move_home
result['diff'] = {
'before':
''.join([
'%s = %s\n' % (key, oldv)
for key, (oldv, newv) in sorted(user.changes.items())
]),
'after':
''.join([
'%s = %s\n' % (key, newv)
for key, (oldv, newv) in sorted(user.changes.items())
]),
}
if rc is not None and rc != 0:
module.fail_json(name=user.name, msg=err, rc=rc)
if user.password is not None:
result['password'] = 'NOT_LOGGING_PASSWORD'
if rc is None:
result['changed'] = False
else:
result['changed'] = True
if out:
result['stdout'] = out
if err:
result['stderr'] = err
if user.user_exists():
info = user.user_info()
if info is False:
result['msg'] = "failed to look up user name: %s" % user.name
result['failed'] = True
result['uid'] = info[2]
result['group'] = info[3]
result['comment'] = info[4]
result['home'] = info[5]
result['shell'] = info[6]
if user.groups is not None:
result['groups'] = user.groups
# handle missing homedirs
info = user.user_info()
if user.home is None:
user.home = info[5]
if not os.path.exists(user.home) and user.create_home:
if not module.check_mode:
user.create_homedir(user.home)
user.chown_homedir(info[2], info[3], user.home)
result['changed'] = True
result['diff']['after'] += 'created %s\n' % user.home
# deal with ssh key
if user.sshkeygen:
# generate ssh key (note: this function is check mode aware)
(rc, out, err) = user.ssh_key_gen()
if rc is not None and rc != 0:
module.fail_json(name=user.name, msg=err, rc=rc)
if rc == 0:
result['changed'] = True
result['diff']['after'] += 'generated SSH key'
(rc, out, err) = user.ssh_key_fingerprint()
if rc == 0:
result['ssh_fingerprint'] = out.strip()
else:
result['ssh_fingerprint'] = err.strip()
result['ssh_key_file'] = user.get_ssh_key_path()
result['ssh_public_key'] = user.get_ssh_public_key()
module.exit_json(**result)
|
def main():
ssh_defaults = dict(
bits=0,
type='rsa',
passphrase=None,
comment='ansible-generated on %s' % socket.gethostname()
)
module = AnsibleModule(
argument_spec=dict(
state=dict(type='str', default='present', choices=['absent', 'present']),
name=dict(type='str', required=True, aliases=['user']),
uid=dict(type='str'),
non_unique=dict(type='bool', default=False),
group=dict(type='str'),
groups=dict(type='list'),
comment=dict(type='str'),
home=dict(type='path'),
shell=dict(type='str'),
password=dict(type='str', no_log=True),
login_class=dict(type='str'),
# following options are specific to macOS
hidden=dict(type='bool'),
# following options are specific to selinux
seuser=dict(type='str'),
# following options are specific to userdel
force=dict(type='bool', default=False),
remove=dict(type='bool', default=False),
# following options are specific to useradd
create_home=dict(type='bool', default=True, aliases=['createhome']),
skeleton=dict(type='str'),
system=dict(type='bool', default=False),
# following options are specific to usermod
move_home=dict(type='bool', default=False),
append=dict(type='bool', default=False),
# following are specific to ssh key generation
generate_ssh_key=dict(type='bool'),
ssh_key_bits=dict(type='int', default=ssh_defaults['bits']),
ssh_key_type=dict(type='str', default=ssh_defaults['type']),
ssh_key_file=dict(type='path'),
ssh_key_comment=dict(type='str', default=ssh_defaults['comment']),
ssh_key_passphrase=dict(type='str', no_log=True),
update_password=dict(type='str', default='always', choices=['always', 'on_create']),
expires=dict(type='float'),
password_lock=dict(type='bool'),
local=dict(type='bool'),
profile=dict(type='str'),
authorization=dict(type='str'),
role=dict(type='str'),
),
supports_check_mode=True
)
user = User(module)
user.check_password_encrypted()
module.debug('User instantiated - platform %s' % user.platform)
if user.distribution:
module.debug('User instantiated - distribution %s' % user.distribution)
rc = None
out = ''
err = ''
result = {}
result['name'] = user.name
result['state'] = user.state
if user.state == 'absent':
if user.user_exists():
if module.check_mode:
module.exit_json(changed=True)
(rc, out, err) = user.remove_user()
if rc != 0:
module.fail_json(name=user.name, msg=err, rc=rc)
result['force'] = user.force
result['remove'] = user.remove
result['diff'] = {
'before': '{0} exists\n'.format(user.name),
'after': 'user removed\n',
}
elif user.state == 'present':
if not user.user_exists():
if module.check_mode:
module.exit_json(changed=True)
(rc, out, err) = user.create_user()
if module.check_mode:
result['system'] = user.name
else:
result['system'] = user.system
result['create_home'] = user.create_home
result['diff'] = {
'before': 'user does not exist\n',
'after': 'user created\n',
}
else:
# modify user (note: this function is check mode aware)
(rc, out, err) = user.modify_user()
result['append'] = user.append
result['move_home'] = user.move_home
result['diff'] = {
'before':
''.join([
'%s = %s\n' % (key, oldv)
for key, (oldv, newv) in sorted(user.changes.items())
]),
'after':
''.join([
'%s = %s\n' % (key, newv)
for key, (oldv, newv) in sorted(user.changes.items())
]),
}
if rc is not None and rc != 0:
module.fail_json(name=user.name, msg=err, rc=rc)
if user.password is not None:
result['password'] = 'NOT_LOGGING_PASSWORD'
if rc is None:
result['changed'] = False
else:
result['changed'] = True
if out:
result['stdout'] = out
if err:
result['stderr'] = err
if user.user_exists():
info = user.user_info()
if info is False:
result['msg'] = "failed to look up user name: %s" % user.name
result['failed'] = True
result['uid'] = info[2]
result['group'] = info[3]
result['comment'] = info[4]
result['home'] = info[5]
result['shell'] = info[6]
if user.groups is not None:
result['groups'] = user.groups
# handle missing homedirs
info = user.user_info()
if user.home is None:
user.home = info[5]
if not os.path.exists(user.home) and user.create_home:
if not module.check_mode:
user.create_homedir(user.home)
user.chown_homedir(info[2], info[3], user.home)
result['changed'] = True
result['diff']['after'] += 'created %s\n' % user.home
# deal with ssh key
if user.sshkeygen:
# generate ssh key (note: this function is check mode aware)
(rc, out, err) = user.ssh_key_gen()
if rc is not None and rc != 0:
module.fail_json(name=user.name, msg=err, rc=rc)
if rc == 0:
result['changed'] = True
result['diff']['after'] += 'generated SSH key'
(rc, out, err) = user.ssh_key_fingerprint()
if rc == 0:
result['ssh_fingerprint'] = out.strip()
else:
result['ssh_fingerprint'] = err.strip()
result['ssh_key_file'] = user.get_ssh_key_path()
result['ssh_public_key'] = user.get_ssh_public_key()
module.exit_json(**result)
|
53,922 |
def parallel_run(
target, args, kwargs, nodes_list, timeout=None, concurrent_tasks=20
):
"""Run target function on nodes in parallel
Args:
target (function): The target function to be executed in parallel.
args (list of tuple): List of arguments for the target function.
kwargs (dict): Keyword arguments for the target function. It will be extended with two keys: 'node' and
'results'. The 'node' key will hold an item of the nodes list. The 'result' key will hold an instance of
multiprocessing.Manager().dict(). It is a proxy of the shared dict that will be used by each process for
returning execution results.
nodes (list of nodes): List of nodes to be used by the target function
timeout (int or float, optional): Total time allowed for the spawned multiple processes to run. Defaults to
None. When timeout is specified, this function will wait at most 'timeout' seconds for the processes to
run. When time is up, this function will try to terminate or even kill all the processes.
Raises:
flag.: In case any of the spawned process cannot be terminated, fail the test.
Returns:
dict: An instance of multiprocessing.Manager().dict(). It is a proxy to the shared dict that is used by all the
spawned processes.
"""
nodes = [node for node in nodes_list]
# Callback API for wait_procs
def on_terminate(worker):
logger.info("process {} terminated with exit code {}".format(
worker.name, worker.returncode)
)
def force_terminate(workers):
# Some processes cannot be terminated. Try to kill them and raise flag.
running_processes = [worker for worker in workers if worker.is_alive()]
if len(running_processes) > 0:
logger.info(
'Found processes still running: {}. Try to kill them.'.format( #lgtm [py/clear-text-logging-sensitive-data]
str(running_processes)
)
)
for p in running_processes:
results[p.name] = [{'failed': True}]
try:
os.kill(p.pid, signal.SIGKILL)
except OSError:
pass
pt_assert(
False,
"""Processes running target "{}" could not be terminated.
Tried killing them. But please check""".format(target.__name__)
)
workers = []
results = Manager().dict()
start_time = datetime.datetime.now()
tasks_done = 0
total_tasks = len(nodes)
tasks_running = 0
total_timeout = timeout * int(len(nodes)/concurrent_tasks) if timeout else None
failed_processes = {}
while tasks_done < total_tasks:
# If execution time of processes exceeds timeout, need to force
# terminate them all.
if total_timeout is not None:
if (datetime.datetime.now() - start_time).seconds > total_timeout:
logger.error('Process execution time exceeds {} seconds.'.format(
str(total_timeout)
))
break
while len(nodes) and tasks_running < concurrent_tasks:
node = nodes.pop(0)
kwargs['node'] = node
kwargs['results'] = results
process_name = "{}--{}".format(target.__name__, node)
worker = SonicProcess(
name=process_name, target=target, args=args,
kwargs=kwargs
)
worker.start()
tasks_running += 1
logger.debug('Started process {} running target "{}"'.format(
worker.pid, process_name
))
workers.append(worker)
gone, alive = wait_procs(workers, timeout=timeout, callback=on_terminate)
logger.debug("task completed {}, running {}".format(
len(gone), len(alive)
))
if len(gone) == 0:
logger.debug("all processes have timedout")
tasks_running -= len(workers)
tasks_done += len(workers)
force_terminate(workers)
else:
tasks_running -= len(gone)
tasks_done += len(gone)
# check if we have any processes that failed - have exitcode non-zero
for worker in gone:
if worker.exitcode != 0:
failed_processes[worker.name] = {}
failed_processes[worker.name]['exit_code'] = worker.exitcode
failed_processes[worker.name]['exception'] = worker.exception
# In case of timeout force terminate spawned processes
for worker in workers:
if worker.is_alive():
logger.error('Process {} is alive, force terminate it.'.format(
worker.name
))
worker.terminate()
results[worker.name] = [{'failed': True}]
end_time = datetime.datetime.now()
delta_time = end_time - start_time
# force terminate any workers still running
force_terminate(workers)
# if we have failed processes, we should log the exception and exit code
# of each Process and fail
if len(failed_processes.keys()):
for process_name, process in failed_processes.items():
if 'exception' in process and process['exception']:
p_exception = process['exception'][0]
p_traceback = process['exception'][1]
p_exitcode = process['exit_code']
logger.error("""Process {} had exit code {} and exception {}
and traceback {}""".format(
process_name, p_exitcode, p_exception, p_traceback
)
)
pt_assert(
False,
'Processes "{}" had failures. Please check the logs'.format(
failed_processes.keys()
)
)
logger.info(
'Completed running processes for target "{}" in {} seconds'.format(
target.__name__, str(delta_time)
)
)
return results
|
def parallel_run(
target, args, kwargs, nodes_list, timeout=None, concurrent_tasks=20
):
"""Run target function on nodes in parallel
Args:
target (function): The target function to be executed in parallel.
args (list of tuple): List of arguments for the target function.
kwargs (dict): Keyword arguments for the target function. It will be extended with two keys: 'node' and
'results'. The 'node' key will hold an item of the nodes list. The 'result' key will hold an instance of
multiprocessing.Manager().dict(). It is a proxy of the shared dict that will be used by each process for
returning execution results.
nodes (list of nodes): List of nodes to be used by the target function
timeout (int or float, optional): Total time allowed for the spawned multiple processes to run. Defaults to
None. When timeout is specified, this function will wait at most 'timeout' seconds for the processes to
run. When time is up, this function will try to terminate or even kill all the processes.
Raises:
flag.: In case any of the spawned process cannot be terminated, fail the test.
Returns:
dict: An instance of multiprocessing.Manager().dict(). It is a proxy to the shared dict that is used by all the
spawned processes.
"""
nodes = [node for node in nodes_list]
# Callback API for wait_procs
def on_terminate(worker):
logger.info("process {} terminated with exit code {}".format(
worker.name, worker.returncode)
)
def force_terminate(workers):
# Some processes cannot be terminated. Try to kill them and raise flag.
running_processes = [worker for worker in workers if worker.is_alive()]
if len(running_processes) > 0:
logger.info(
'Found processes still running: {}. Try to kill them.'.format( #lgtm [py/clear-text-logging-sensitive-data]
str(running_processes)
)
)
for p in running_processes:
results[p.name] = [{'failed': True}]
try:
os.kill(p.pid, signal.SIGKILL)
except OSError:
pass
pt_assert(
False,
"""Processes running target "{}" could not be terminated.
Tried killing them. But please check""".format(target.__name__)
)
workers = []
results = Manager().dict()
start_time = datetime.datetime.now()
tasks_done = 0
total_tasks = len(nodes)
tasks_running = 0
import math
total_timeout = timeout * math.ceil(len(nodes)/float(concurrent_tasks)) if timeout else None
failed_processes = {}
while tasks_done < total_tasks:
# If execution time of processes exceeds timeout, need to force
# terminate them all.
if total_timeout is not None:
if (datetime.datetime.now() - start_time).seconds > total_timeout:
logger.error('Process execution time exceeds {} seconds.'.format(
str(total_timeout)
))
break
while len(nodes) and tasks_running < concurrent_tasks:
node = nodes.pop(0)
kwargs['node'] = node
kwargs['results'] = results
process_name = "{}--{}".format(target.__name__, node)
worker = SonicProcess(
name=process_name, target=target, args=args,
kwargs=kwargs
)
worker.start()
tasks_running += 1
logger.debug('Started process {} running target "{}"'.format(
worker.pid, process_name
))
workers.append(worker)
gone, alive = wait_procs(workers, timeout=timeout, callback=on_terminate)
logger.debug("task completed {}, running {}".format(
len(gone), len(alive)
))
if len(gone) == 0:
logger.debug("all processes have timedout")
tasks_running -= len(workers)
tasks_done += len(workers)
force_terminate(workers)
else:
tasks_running -= len(gone)
tasks_done += len(gone)
# check if we have any processes that failed - have exitcode non-zero
for worker in gone:
if worker.exitcode != 0:
failed_processes[worker.name] = {}
failed_processes[worker.name]['exit_code'] = worker.exitcode
failed_processes[worker.name]['exception'] = worker.exception
# In case of timeout force terminate spawned processes
for worker in workers:
if worker.is_alive():
logger.error('Process {} is alive, force terminate it.'.format(
worker.name
))
worker.terminate()
results[worker.name] = [{'failed': True}]
end_time = datetime.datetime.now()
delta_time = end_time - start_time
# force terminate any workers still running
force_terminate(workers)
# if we have failed processes, we should log the exception and exit code
# of each Process and fail
if len(failed_processes.keys()):
for process_name, process in failed_processes.items():
if 'exception' in process and process['exception']:
p_exception = process['exception'][0]
p_traceback = process['exception'][1]
p_exitcode = process['exit_code']
logger.error("""Process {} had exit code {} and exception {}
and traceback {}""".format(
process_name, p_exitcode, p_exception, p_traceback
)
)
pt_assert(
False,
'Processes "{}" had failures. Please check the logs'.format(
failed_processes.keys()
)
)
logger.info(
'Completed running processes for target "{}" in {} seconds'.format(
target.__name__, str(delta_time)
)
)
return results
|
52,766 |
def render_label(content, label_for=None, label_class=None, label_title='', label_id='', optional=False, is_valid=None, attrs={}):
"""
Render a label with content
"""
if label_for:
attrs['for'] = label_for
if label_class:
attrs['class'] = label_class
if label_title:
attrs['title'] = label_title
if label_id:
attrs['id'] = label_id
opt = ""
if is_valid is not None:
if is_valid:
validation_text = pgettext('form', 'is valid')
else:
validation_text = pgettext('form', 'has errors')
opt += '<strong class="sr-only"> {}</strong>'.format(validation_text)
if text_value(content) == ' ':
# Empty label, e.g. checkbox
attrs.setdefault('class', '')
attrs['class'] += ' label-empty'
# usually checkboxes have overall empty labels and special labels per checkbox
# => remove for-attribute as well as "required"-text appended to label
if 'for' in attrs:
del(attrs['for'])
else:
opt += '<i class="sr-only label-required">, {}</i>'.format(pgettext('form', 'required')) if not optional else ''
builder = '<{tag}{attrs}>{content}{opt}</{tag}>'
return format_html(
builder,
tag='label',
attrs=mark_safe(flatatt(attrs)) if attrs else '',
opt=mark_safe(opt),
content=text_value(content),
)
|
def render_label(content, label_for=None, label_class=None, label_title='', label_id='', optional=False, is_valid=None, attrs=None):
"""
Render a label with content
"""
attrs = attrs or {}
if label_for:
attrs['for'] = label_for
if label_class:
attrs['class'] = label_class
if label_title:
attrs['title'] = label_title
if label_id:
attrs['id'] = label_id
opt = ""
if is_valid is not None:
if is_valid:
validation_text = pgettext('form', 'is valid')
else:
validation_text = pgettext('form', 'has errors')
opt += '<strong class="sr-only"> {}</strong>'.format(validation_text)
if text_value(content) == ' ':
# Empty label, e.g. checkbox
attrs.setdefault('class', '')
attrs['class'] += ' label-empty'
# usually checkboxes have overall empty labels and special labels per checkbox
# => remove for-attribute as well as "required"-text appended to label
if 'for' in attrs:
del(attrs['for'])
else:
opt += '<i class="sr-only label-required">, {}</i>'.format(pgettext('form', 'required')) if not optional else ''
builder = '<{tag}{attrs}>{content}{opt}</{tag}>'
return format_html(
builder,
tag='label',
attrs=mark_safe(flatatt(attrs)) if attrs else '',
opt=mark_safe(opt),
content=text_value(content),
)
|
31,756 |
def verify_and_load_scim_data(scim):
scim = json.loads(scim)
if type(scim) != dict:
raise Exception("SCIM data is not a valid JSON")
return scim
|
def verify_and_load_scim_data(scim):
scim = json.loads(scim)
if not isinstance(scim, dict):
raise Exception("SCIM data is not a valid JSON")
return scim
|
34,845 |
def migrate_domain_format(
domain_path: Union[Text, Path], out_path: Optional[Union[Text, Path]],
) -> None:
"""Converts 2.0 domain to 3.0 format."""
domain_path = Path(domain_path)
out_path = Path(out_path) if out_path else None
domain_parent_dir = domain_path.parent
migrate_file_only = domain_path.is_file()
# Ensure the backup location does not exist yet
# Note: We demand that file as well as folder with this name gets deleted before
# the command is run to avoid confusion afterwards.
suffix = "original_domain"
suffix = f"{suffix}.yml" if migrate_file_only else suffix
backup_location = domain_parent_dir / suffix
if backup_location.exists():
backup_location_str = "directory" if backup_location.isdir() else "file"
raise RasaException(
f"The domain from '{domain_path}' could not be migrated since the "
f"a {backup_location_str} {backup_location} already exists."
f"Please remove that there is no file or folder at {backup_location}."
)
# Choose a default output location if nothing was specified
if out_path is None:
suffix = DEFAULT_DOMAIN_PATH if migrate_file_only else "new_domain"
out_path = domain_parent_dir / suffix
# Ensure the output location is not already in-use
if not migrate_file_only:
if out_path.is_dir() and any(out_path.iterdir()):
raise RasaException(
f"The domain from '{domain_path}' could not be migrated to "
f"{out_path} because that folder is not empty."
"Please remove the folder and try again."
)
else:
if out_path.is_file():
raise RasaException(
f"The domain from '{domain_path}' could not be migrated to "
f"{out_path} because a file already exists."
"Please remove the file and try again."
)
# Sanity Check: Assert the files to be migrated aren't in 3.0 format already
# Note: we do not enforce that the version tag is 2.0 everywhere + validate that
# migrate-able domain files are among these files later
original_files = (
[file for file in domain_path.iterdir() if Domain.is_domain_file(file)]
if domain_path.is_dir()
else [domain_path]
)
migrated_files = [
file
for file in original_files
if rasa.shared.utils.io.read_yaml_file(file).get("version") == "3.0"
]
if migrated_files:
raise RasaException(
f"Some of the given files ({[file for file in migrated_files]}) "
f"have already been migrated to Rasa 3.0 format. Please remove these "
f"migrated files (or replace them with files in 2.0 format) and try again."
)
# Validate given domain file(s) and migrate them
try:
created_out_dir = False
if not migrate_file_only:
if not out_path.is_dir():
rasa.shared.utils.io.raise_warning(
f"The out path provided did not exist yet. Created directory "
f"{out_path}."
)
out_path.mkdir(parents=True)
created_out_dir = True
backup_location.mkdir()
original_domain = _migrate_domain_files(
domain_path, backup_location, out_path
)
else:
if not Domain.is_domain_file(domain_path):
raise RasaException(
f"The file '{domain_path}' could not be validated as a "
f"domain file. Only domain yaml files can be migrated. "
)
original_domain = _create_back_up(domain_path, backup_location)
new_forms, updated_slots = _migrate_form_slots(original_domain)
new_slots = _migrate_auto_fill_and_custom_slots(original_domain, updated_slots)
_write_final_domain(domain_path, new_forms, new_slots, out_path)
rasa.shared.utils.cli.print_success(
f"Your domain file '{str(domain_path)}' was successfully migrated! "
f"The migrated version is now '{str(out_path)}'. "
f"The original domain file is backed-up at '{str(backup_location)}'."
)
except Exception as e:
# Remove the backups if migration couldn't be completed
if backup_location.is_dir():
shutil.rmtree(backup_location)
if out_path.is_dir():
if created_out_dir:
shutil.rmtree(out_path)
else: # just remove contained files so we do not mess with access rights
for f in out_path.glob("*"):
f.unlink()
if backup_location.is_file():
backup_location.unlink()
raise e
|
def migrate_domain_format(
domain_path: Union[Text, Path], out_path: Optional[Union[Text, Path]],
) -> None:
"""Converts 2.0 domain to 3.0 format."""
domain_path = Path(domain_path)
out_path = Path(out_path) if out_path else None
domain_parent_dir = domain_path.parent
migrate_file_only = domain_path.is_file()
# Ensure the backup location does not exist yet
# Note: We demand that file as well as folder with this name gets deleted before
# the command is run to avoid confusion afterwards.
suffix = "original_domain"
suffix = f"{suffix}.yml" if migrate_file_only else suffix
backup_location = domain_parent_dir / suffix
if backup_location.exists():
backup_location_str = "directory" if backup_location.isdir() else "file"
raise RasaException(
f"The domain from '{domain_path}' could not be migrated since the "
f"a {backup_location_str} {backup_location} already exists."
f"Please remove that there is no file or folder at {backup_location}."
)
# Choose a default output location if nothing was specified
if out_path is None:
suffix = DEFAULT_DOMAIN_PATH if migrate_file_only else "new_domain"
out_path = domain_parent_dir / suffix
# Ensure the output location is not already in-use
if not migrate_file_only:
if out_path.is_dir() and any(out_path.iterdir()):
raise RasaException(
f"The domain from '{domain_path}' could not be migrated to "
f"'{out_path}' because that folder is not empty."
"Please remove the folder and try again."
)
else:
if out_path.is_file():
raise RasaException(
f"The domain from '{domain_path}' could not be migrated to "
f"{out_path} because a file already exists."
"Please remove the file and try again."
)
# Sanity Check: Assert the files to be migrated aren't in 3.0 format already
# Note: we do not enforce that the version tag is 2.0 everywhere + validate that
# migrate-able domain files are among these files later
original_files = (
[file for file in domain_path.iterdir() if Domain.is_domain_file(file)]
if domain_path.is_dir()
else [domain_path]
)
migrated_files = [
file
for file in original_files
if rasa.shared.utils.io.read_yaml_file(file).get("version") == "3.0"
]
if migrated_files:
raise RasaException(
f"Some of the given files ({[file for file in migrated_files]}) "
f"have already been migrated to Rasa 3.0 format. Please remove these "
f"migrated files (or replace them with files in 2.0 format) and try again."
)
# Validate given domain file(s) and migrate them
try:
created_out_dir = False
if not migrate_file_only:
if not out_path.is_dir():
rasa.shared.utils.io.raise_warning(
f"The out path provided did not exist yet. Created directory "
f"{out_path}."
)
out_path.mkdir(parents=True)
created_out_dir = True
backup_location.mkdir()
original_domain = _migrate_domain_files(
domain_path, backup_location, out_path
)
else:
if not Domain.is_domain_file(domain_path):
raise RasaException(
f"The file '{domain_path}' could not be validated as a "
f"domain file. Only domain yaml files can be migrated. "
)
original_domain = _create_back_up(domain_path, backup_location)
new_forms, updated_slots = _migrate_form_slots(original_domain)
new_slots = _migrate_auto_fill_and_custom_slots(original_domain, updated_slots)
_write_final_domain(domain_path, new_forms, new_slots, out_path)
rasa.shared.utils.cli.print_success(
f"Your domain file '{str(domain_path)}' was successfully migrated! "
f"The migrated version is now '{str(out_path)}'. "
f"The original domain file is backed-up at '{str(backup_location)}'."
)
except Exception as e:
# Remove the backups if migration couldn't be completed
if backup_location.is_dir():
shutil.rmtree(backup_location)
if out_path.is_dir():
if created_out_dir:
shutil.rmtree(out_path)
else: # just remove contained files so we do not mess with access rights
for f in out_path.glob("*"):
f.unlink()
if backup_location.is_file():
backup_location.unlink()
raise e
|
45,986 |
def lovasz_hinge_loss(input: torch.Tensor, target: torch.Tensor) -> torch.Tensor:
r"""Criterion that computes a surrogate binary intersection-over-union (IoU) loss.
According to [2], we compute the IoU as follows:
.. math::
\text{IoU}(x, class) = \frac{|X \cap Y|}{|X \cup Y|}
[1] approximates this fomular with a surrogate, which is fully differentable.
Where:
- :math:`X` expects to be the scores of each class.
- :math:`Y` expects to be the binary tensor with the class labels.
the loss, is finally computed as:
.. math::
\text{loss}(x, class) = 1 - \text{IoU}(x, class)
Reference:
[1] http://proceedings.mlr.press/v37/yub15.pdf
[2] https://arxiv.org/pdf/1705.08790.pdf
. note::
This loss function only supports binary labels. For multi-class labels please
use the Lovasz-Softmax loss.
Args:
input: logits tensor with shape :math:`(N, 1, H, W)`.
labels: labels tensor with shape :math:`(N, H, W)` with binary values.
Return:
a scalar with the computed loss.
Example:
>>> N = 1 # num_classes
>>> input = torch.randn(1, N, 3, 5, requires_grad=True)
>>> target = torch.empty(1, 3, 5, dtype=torch.long).random_(N)
>>> output = lovasz_hinge_loss(input, target)
>>> output.backward()
"""
if not isinstance(input, torch.Tensor):
raise TypeError(f"Input type is not a torch.Tensor. Got {type(input)}")
if not isinstance(target, torch.Tensor):
raise TypeError(f"Target type is not a torch.Tensor. Got {type(target)}")
if not len(input.shape) == 4:
raise ValueError(f"Invalid input shape, we expect Bx1xHxW. Got: {input.shape}")
if not len(target.shape) == 3:
raise ValueError(f"Invalid target shape, we expect BxHxW. Got: {target.shape}")
if not input.shape[1] == 1:
raise ValueError(f"Invalid input shape, we expect Bx1xHxW. Got: {input.shape}")
if not input.shape[-2:] == target.shape[-2:]:
raise ValueError(f"input and target shapes must be the same. Got: {input.shape} and {target.shape}")
if not input.device == target.device:
raise ValueError(f"input and target must be in the same device. Got: {input.device} and {target.device}")
# flatten input and target [B, -1] and to float
input_flatten: torch.Tensor = input.flatten(start_dim=1)
target_flatten: torch.Tensor = target.flatten(start_dim=1).float()
# get shapes
B, N = input_flatten.shape
# compute probabilities
input_prob: torch.Tensor = torch.sigmoid(input_flatten)
# compute actual loss
signs = 2. * target_flatten - 1.
errors = 1. - input_prob * signs
errors_sorted, permutation = torch.sort(errors, dim=1, descending=True)
batch_index: torch.Tensor = torch.arange(B, device=input.device).repeat_interleave(N, dim=0)
target_sorted: torch.Tensor = target_flatten[batch_index, permutation.view(-1)]
target_sorted: torch.Tensor = target_sorted.view(B, N)
target_sorted_sum: torch.Tensor = target_sorted.sum(dim=1, keepdim=True)
intersection: torch.Tensor = target_sorted_sum - target_sorted.cumsum(dim=1)
union: torch.Tensor = target_sorted_sum + (1. - target_sorted).cumsum(dim=1)
gradient: torch.Tensor = 1. - intersection / union
if N > 1:
gradient[..., 1:] = gradient[..., 1:] - gradient[..., :-1]
loss: torch.Tensor = (F.relu(errors_sorted) * gradient).sum(dim=1).mean()
return loss
|
def lovasz_hinge_loss(input: torch.Tensor, target: torch.Tensor) -> torch.Tensor:
r"""Criterion that computes a surrogate binary intersection-over-union (IoU) loss.
According to [2], we compute the IoU as follows:
.. math::
\text{IoU}(x, class) = \frac{|X \cap Y|}{|X \cup Y|}
[1] approximates this fomular with a surrogate, which is fully differentable.
Where:
- :math:`X` expects to be the scores of each class.
- :math:`Y` expects to be the binary tensor with the class labels.
the loss, is finally computed as:
.. math::
\text{loss}(x, class) = 1 - \text{IoU}(x, class)
Reference:
[1] http://proceedings.mlr.press/v37/yub15.pdf
[2] https://arxiv.org/pdf/1705.08790.pdf
. note::
This loss function only supports binary labels. For multi-class labels please
use the Lovasz-Softmax loss.
Args:
input: logits tensor with shape :math:`(N, 1, H, W)`.
labels: labels tensor with shape :math:`(N, H, W)` with binary values.
Return:
a scalar with the computed loss.
Example:
>>> N = 1 # num_classes
>>> input = torch.randn(1, N, 3, 5, requires_grad=True)
>>> target = torch.empty(1, 3, 5, dtype=torch.long).random_(N)
>>> output = lovasz_hinge_loss(input, target)
>>> output.backward()
"""
if not isinstance(input, torch.Tensor):
raise TypeError(f"Input type is not a torch.Tensor. Got {type(input)}")
if not isinstance(target, torch.Tensor):
raise TypeError(f"Target type is not a torch.Tensor. Got {type(target)}")
if not len(input.shape) == 4:
raise ValueError(f"Invalid input shape, we expect Bx1xHxW. Got: {input.shape}")
if not len(target.shape) == 3:
raise ValueError(f"Invalid target shape, we expect BxHxW. Got: {target.shape}")
if not input.shape[1] == 1:
raise ValueError(f"Invalid input shape, we expect Bx1xHxW. Got: {input.shape}")
if not input.shape[-2:] == target.shape[-2:]:
raise ValueError(f"input and target shapes must be the same. Got: {input.shape} and {target.shape}")
if not input.device == target.device:
raise ValueError(f"input and target must be in the same device. Got: {input.device} and {target.device}")
# flatten input and target [B, -1] and to float
input_flatten: torch.Tensor = input.flatten(start_dim=1)
target_flatten: torch.Tensor = target.flatten(start_dim=1).float()
# get shapes
B, N = input_flatten.shape
# compute probabilities
input_prob: torch.Tensor = torch.sigmoid(input_flatten)
# compute actual loss
signs = 2. * target_flatten - 1.
errors = 1. - input_prob * signs
errors_sorted, permutation = torch.sort(errors, dim=1, descending=True)
batch_index: torch.Tensor = torch.arange(B, device=input.device).repeat_interleave(N, dim=0)
target_sorted: torch.Tensor = target_flatten[batch_index, permutation.view(-1)]
target_sorted: torch.Tensor = target_sorted.view(B, N)
target_sorted_sum: torch.Tensor = target_sorted.sum(dim=1, keepdim=True)
intersection: torch.Tensor = target_sorted_sum - target_sorted.cumsum(dim=1)
union: torch.Tensor = target_sorted_sum + (1. - target_sorted).cumsum(dim=1)
gradient: torch.Tensor = 1. - intersection / union
if N > 1:
gradient[..., 1:] = gradient[..., 1:] - gradient[..., :-1]
loss: torch.Tensor = (errors_sorted.relu() * gradient).sum(dim=1).mean()
return loss
|
58,378 |
def auto_model(layout, scan_length=None, one_vs_rest=False):
"""Create a simple default model for each of the tasks in a BIDSLayout.
Contrasts each trial type against all other trial types and trial types
at the run level and then uses dummy contrasts at each other level
present to aggregate these results up.
Parameters
----------
layout : :obj:`bids.layout.BIDSLayout`
A BIDSLayout instance
scan_length : int
Scan length for loading event variables in cases
where the scan length can not be read from the nifti.
Primarily for testing.
one_vs_rest : bool
Set to True if you would like to autogenerate
contrasts of each trial type against everyother trialtype.
Returns
-------
list
list of model dictionaries for each task
"""
base_name = layout._root.name
tasks = layout.entities['task'].unique()
task_models = []
for task_name in tasks:
# Populate model meta-data
model = OrderedDict()
model["Name"] = "_".join([base_name, task_name])
model["Description"] = ("Autogenerated model for the %s task from %s" %
(task_name, base_name))
model["BIDSModelVersion"]= "1.0.0"
model["Input"] = {"task": [task_name]}
nodes = []
# Make run level block
transformations = OrderedDict(
Transformer='pybids-transforms-v1',
Instructions=[
OrderedDict(
Name='Factor',
Input='trial_type'
)
]
)
run = OrderedDict(Level='Run',
Name='Run',
GroupBy=['run', 'subject'],
Transformations=transformations)
# Get trial types
run_nodes = load_variables(layout, task=task_name, levels=['run'],
scan_length=scan_length)
evs = []
for n in run_nodes.nodes:
evs.extend(n.variables['trial_type'].values.values)
trial_types = np.unique(evs)
trial_type_factors = ["trial_type." + tt for tt in trial_types]
run_model = OrderedDict(Type='glm',
X=trial_type_factors)
# Add HRF
run_model['HRF'] = OrderedDict(
Variables=trial_type_factors,
Model="DoubleGamma",
Parameters=OrderedDict(
PeakDelay=3,
PeakDispersion=6,
UndershootDelay=10,
UndershootDispersion=12,
PeakUndershootRatio=0.2
)
)
run["Model"] = run_model
if one_vs_rest:
# If there are multiple trial types, build contrasts
contrasts = []
for tt in trial_types:
cdict = OrderedDict()
if len(trial_types) > 1:
cdict["Name"] = "run_" + tt + "_vs_others"
else:
cdict["Name"] = "run_" + tt
cdict["ConditionList"] = trial_type_factors
# Calculate weights for contrast
weights = np.ones(len(trial_types))
try:
weights[trial_types != tt] = -1.0 / (len(trial_types) - 1)
except ZeroDivisionError:
pass
cdict["Weights"] = list(weights)
cdict["Test"] = "t"
contrasts.append(cdict)
run["Contrasts"] = contrasts
nodes.append(run)
if one_vs_rest:
# if there are multiple sessions, t-test run level contrasts at
# session level
sessions = layout.get_sessions()
if len(sessions) > 1:
# get contrasts names from previous block
contrast_names = [cc["Name"] for cc in nodes[-1]["Contrasts"]]
nodes.append(_make_passthrough_contrast(
"Session", contrast_names))
subjects = layout.get_subjects()
if len(subjects) > 1:
# get contrasts names from previous block
contrast_names = [cc["Name"] for cc in nodes[-1]["Contrasts"]]
nodes.append(_make_passthrough_contrast(
"Subject", contrast_names))
# get contrasts names from previous block
contrast_names = [cc["Name"] for cc in nodes[-1]["Contrasts"]]
nodes.append(_make_passthrough_contrast(
"Dataset", contrast_names))
model["Nodes"] = nodes
task_models.append(model)
return task_models
|
def auto_model(layout, scan_length=None, one_vs_rest=False):
"""Create a simple default model for each of the tasks in a BIDSLayout.
Contrasts each trial type against all other trial types and trial types
at the run level and then uses dummy contrasts at each other level
present to aggregate these results up.
Parameters
----------
layout : :obj:`bids.layout.BIDSLayout`
A BIDSLayout instance
scan_length : int
Scan length for loading event variables in cases
where the scan length can not be read from the nifti.
Primarily for testing.
one_vs_rest : bool
Set to True if you would like to autogenerate
contrasts of each trial type against everyother trialtype.
Returns
-------
list
list of model dictionaries for each task
"""
base_name = layout._root.name
tasks = layout.entities['task'].unique()
task_models = []
for task_name in tasks:
# Populate model meta-data
model = OrderedDict()
model["Name"] = "_".join([base_name, task_name])
model["Description"] = ("Autogenerated model for the %s task from %s" %
(task_name, base_name))
model["BIDSModelVersion"]= "1.0.0"
model["Input"] = {"task": [task_name]}
nodes = []
# Make run level block
transformations = OrderedDict(
Transformer='pybids-transforms-v1',
Instructions=[
OrderedDict(
Name='Factor',
Input='trial_type'
)
]
)
run = OrderedDict(Level='Run',
Name='Run',
GroupBy=['run', 'subject'],
Transformations=transformations)
# Get trial types
run_nodes = load_variables(layout, task=task_name, levels=['run'],
scan_length=scan_length)
evs = []
for n in run_nodes.nodes:
evs.extend(n.variables['trial_type'].values.values)
trial_types = np.unique(evs)
trial_type_factors = ["trial_type." + tt for tt in trial_types]
run_model = OrderedDict(Type='glm',
X=trial_type_factors)
# Add HRF
run_model['HRF'] = dict(
Variables=trial_type_factors,
Model="DoubleGamma",
Parameters=dict(
PeakDelay=3,
PeakDispersion=6,
UndershootDelay=10,
UndershootDispersion=12,
PeakUndershootRatio=0.2
)
)
run["Model"] = run_model
if one_vs_rest:
# If there are multiple trial types, build contrasts
contrasts = []
for tt in trial_types:
cdict = OrderedDict()
if len(trial_types) > 1:
cdict["Name"] = "run_" + tt + "_vs_others"
else:
cdict["Name"] = "run_" + tt
cdict["ConditionList"] = trial_type_factors
# Calculate weights for contrast
weights = np.ones(len(trial_types))
try:
weights[trial_types != tt] = -1.0 / (len(trial_types) - 1)
except ZeroDivisionError:
pass
cdict["Weights"] = list(weights)
cdict["Test"] = "t"
contrasts.append(cdict)
run["Contrasts"] = contrasts
nodes.append(run)
if one_vs_rest:
# if there are multiple sessions, t-test run level contrasts at
# session level
sessions = layout.get_sessions()
if len(sessions) > 1:
# get contrasts names from previous block
contrast_names = [cc["Name"] for cc in nodes[-1]["Contrasts"]]
nodes.append(_make_passthrough_contrast(
"Session", contrast_names))
subjects = layout.get_subjects()
if len(subjects) > 1:
# get contrasts names from previous block
contrast_names = [cc["Name"] for cc in nodes[-1]["Contrasts"]]
nodes.append(_make_passthrough_contrast(
"Subject", contrast_names))
# get contrasts names from previous block
contrast_names = [cc["Name"] for cc in nodes[-1]["Contrasts"]]
nodes.append(_make_passthrough_contrast(
"Dataset", contrast_names))
model["Nodes"] = nodes
task_models.append(model)
return task_models
|
35,242 |
def format_float_positional(a, *args, **kwargs):
"""Returns the decimal string in positional notation
.. seealso:: :func:`numpy.format_float_positional`
"""
return numpy.format_float_positional(cupy.asnumpy(a), *args, **kwargs)
|
def format_float_positional(a, *args, **kwargs):
"""Returns the decimal string in positional notation
.. seealso:: :func:`numpy.format_float_positional`
"""
return numpy.format_float_positional(cupy.asnumpy(x), *args, **kwargs)
|
31,733 |
def provides_the_analysis_and_timeline_details_of_a_case_command(client, args):
case_id = str(args.get('case_id', ''))
subtenant = args.get('subtenant', None)
response = client.provides_the_analysis_and_timeline_details_of_a_case_request(case_id, subtenant)
insight_headers = [
'signal',
'description'
]
markdown = tableToMarkdown(
f"Insights for {response.get('caseId', '')}", response.get('insights', []), headers=insight_headers)
timeline_headers = [
'event_timestamp',
'category',
'title',
'field_labels',
'ip_address',
'description',
'location',
'sender',
'subject',
'title',
'flagging detectors',
'rule_name'
]
markdown += tableToMarkdown(
f"Event Timeline for {response.get('caseId', '')}", response.get('eventTimeline', []), headers=timeline_headers)
command_results = CommandResults(
readable_output=markdown,
outputs_prefix='AbnormalSecurity.CaseAnalysis',
outputs_key_field='',
outputs=response,
raw_response=response
)
return command_results
|
def provides_the_analysis_and_timeline_details_of_a_case_command(client, args):
case_id = str(args.get('case_id', ''))
subtenant = args.get('subtenant', None)
response = client.provides_the_analysis_and_timeline_details_of_a_case_request(case_id, subtenant)
insight_headers = [
'signal',
'description'
]
markdown = tableToMarkdown(
f"Insights for {response.get('caseId', '')}", response.get('insights', []), headers=insight_headers)
timeline_headers = [
'event_timestamp',
'category',
'title',
'field_labels',
'ip_address',
'description',
'location',
'sender',
'subject',
'title',
'flagging detectors',
'rule_name'
]
markdown += tableToMarkdown(
f"Event Timeline for {response.get('caseId', '')}", response.get('eventTimeline', []), headers=timeline_headers)
command_results = CommandResults(
readable_output=markdown,
outputs_prefix='AbnormalSecurity.CaseAnalysis',
outputs_key_field='caseId',
outputs=response,
raw_response=response
)
return command_results
|
46,526 |
def test_finality_rule_2(state):
# get past first two epochs that finality does not run on
next_epoch(state)
next_epoch(state)
pre_state = deepcopy(state)
test_state = deepcopy(state)
blocks = []
for epoch in range(3):
old_previous_justified_epoch = test_state.previous_justified_epoch
old_previous_justified_root = test_state.previous_justified_root
if epoch == 0:
prev_state, new_blocks, test_state = next_epoch_with_attestations(test_state, True, False)
check_finality(test_state, prev_state, True, False, False)
if epoch == 1:
prev_state, new_blocks, test_state = next_epoch_with_attestations(test_state, False, False)
check_finality(test_state, prev_state, False, True, False)
if epoch == 2:
prev_state, new_blocks, test_state = next_epoch_with_attestations(test_state, False, True)
# finalized by rule 2
check_finality(test_state, prev_state, True, False, True)
assert test_state.finalized_epoch == old_previous_justified_epoch
assert test_state.finalized_root == old_previous_justified_root
blocks += new_blocks
return pre_state, blocks, test_state
|
def test_finality_rule_2(state):
# get past first two epochs that finality does not run on
next_epoch(state)
next_epoch(state)
pre_state = deepcopy(state)
test_state = deepcopy(state)
blocks = []
for epoch in range(3):
old_previous_justified_epoch = test_state.previous_justified_epoch
old_previous_justified_root = test_state.previous_justified_root
if epoch == 0:
prev_state, new_blocks, test_state = next_epoch_with_attestations(test_state, True, False)
check_finality(test_state, prev_state, True, False, False)
if epoch == 1:
prev_state, new_blocks, test_state = next_epoch_with_attestations(test_state, False, False)
check_finality(test_state, prev_state, False, True, False)
elif epoch == 2:
prev_state, new_blocks, test_state = next_epoch_with_attestations(test_state, False, True)
# finalized by rule 2
check_finality(test_state, prev_state, True, False, True)
assert test_state.finalized_epoch == old_previous_justified_epoch
assert test_state.finalized_root == old_previous_justified_root
blocks += new_blocks
return pre_state, blocks, test_state
|
11,493 |
def sample_delete_project():
from azure.ai.language.text.authoring import TextAuthoringClient
from azure.core.credentials import AzureKeyCredential
import os
endpoint = os.environ["AZURE_TEXT_AUTHORING_ENDPOINT"]
key = os.environ["AZURE_TEXT_AUTHORING_KEY"]
client = TextAuthoringClient(endpoint, AzureKeyCredential(key)).text_analysis_authoring
project_name = "Project_Name"
created_projects = client.list_projects()
created_projects_names = map(lambda project: project["projectName"], created_projects)
if(project_name not in created_projects_names):
print("The project cannot be deleted because no such project exists")
return
poller = client.begin_delete_project(project_name)
poller.result() #Waits for the project to get deleted
created_projects = client.list_projects()
created_projects_names = map(lambda project: project["projectName"], created_projects)
if(project_name not in created_projects_names):
print("The project is deleted successfully")
else:
print("An error has occured")
|
def sample_delete_project():
from azure.ai.language.text.authoring import TextAuthoringClient
from azure.core.credentials import AzureKeyCredential
import os
endpoint = os.environ["AZURE_TEXT_AUTHORING_ENDPOINT"]
key = os.environ["AZURE_TEXT_AUTHORING_KEY"]
client = TextAuthoringClient(endpoint, AzureKeyCredential(key)).text_analysis_authoring
project_name = "Project_Name"
created_projects = client.list_projects()
created_projects_names = map(lambda project: project["projectName"], created_projects)
if project_name not in created_projects_names:
print("The project cannot be deleted because no such project exists")
return
poller = client.begin_delete_project(project_name)
poller.result() #Waits for the project to get deleted
created_projects = client.list_projects()
created_projects_names = map(lambda project: project["projectName"], created_projects)
if(project_name not in created_projects_names):
print("The project is deleted successfully")
else:
print("An error has occured")
|
36,872 |
def test_info_in_repo(repo_dir, dvc_repo, caplog):
# adding a file so that dvc creates `.dvc/cache`, that is needed for proper
# supported link types check.
assert main(["add", repo_dir.FOO]) == 0
assert main(["version"]) == 0
assert re.search(re.compile(r"DVC version: \d+\.\d+\.\d+"), caplog.text)
assert re.search(re.compile(r"Python version: \d\.\d\.\d"), caplog.text)
assert re.search(re.compile(r"Platform: .*"), caplog.text)
assert re.search(re.compile(r"Binary: (True|False)"), caplog.text)
assert re.search(re.compile(r"Package Manager: .*"), caplog.text)
assert re.search(
re.compile(r"(Cache: (.*link - (True|False)(,\s)?){3})"), caplog.text
)
|
def test_info_in_repo(repo_dir, dvc_repo, caplog):
# adding a file so that dvc creates `.dvc/cache`, that is needed for proper
# supported link types check.
assert main(["add", repo_dir.FOO]) == 0
assert main(["version"]) == 0
assert re.search(re.compile(r"DVC version: \d+\.\d+\.\d+"), caplog.text)
assert re.search(re.compile(r"Python version: \d\.\d\.\d"), caplog.text)
assert re.search(re.compile(r"Platform: .*"), caplog.text)
assert re.search(re.compile(r"Binary: (True|False)"), caplog.text)
assert re.search(re.compile(r"Package manager: .*"), caplog.text)
assert re.search(
re.compile(r"(Cache: (.*link - (True|False)(,\s)?){3})"), caplog.text
)
|
6,577 |
def get_basic_details(args, item, overwrite_warehouse=True):
"""
:param args: {
"item_code": "",
"warehouse": None,
"customer": "",
"conversion_rate": 1.0,
"selling_price_list": None,
"price_list_currency": None,
"price_list_uom_dependant": None,
"plc_conversion_rate": 1.0,
"doctype": "",
"name": "",
"supplier": None,
"transaction_date": None,
"conversion_rate": 1.0,
"buying_price_list": None,
"is_subcontracted": "Yes" / "No",
"ignore_pricing_rule": 0/1
"project": "",
barcode: "",
serial_no: "",
currency: "",
update_stock: "",
price_list: "",
company: "",
order_type: "",
is_pos: "",
project: "",
qty: "",
stock_qty: "",
conversion_factor: "",
against_blanket_order: 0/1
}
:param item: `item_code` of Item object
:return: frappe._dict
"""
if not item:
item = frappe.get_doc("Item", args.get("item_code"))
if item.variant_of:
item.update_template_tables()
item_defaults = get_item_defaults(item.name, args.company)
item_group_defaults = get_item_group_defaults(item.name, args.company)
brand_defaults = get_brand_defaults(item.name, args.company)
defaults = frappe._dict({
'item_defaults': item_defaults,
'item_group_defaults': item_group_defaults,
'brand_defaults': brand_defaults
})
warehouse = get_item_warehouse(item, args, overwrite_warehouse, defaults)
if args.get('doctype') == "Material Request" and not args.get('material_request_type'):
args['material_request_type'] = frappe.db.get_value('Material Request',
args.get('name'), 'material_request_type', cache=True)
expense_account = None
if args.get('doctype') == 'Purchase Invoice' and item.is_fixed_asset:
from erpnext.assets.doctype.asset_category.asset_category import get_asset_category_account
expense_account = get_asset_category_account(fieldname = "fixed_asset_account", item = args.item_code, company= args.company)
#Set the UOM to the Default Sales UOM or Default Purchase UOM if configured in the Item Master
if not args.get('uom'):
if args.get('doctype') in sales_doctypes:
args.uom = item.sales_uom if item.sales_uom else item.stock_uom
elif (args.get('doctype') in ['Purchase Order', 'Purchase Receipt', 'Purchase Invoice']) or \
(args.get('doctype') == 'Material Request' and args.get('material_request_type') == 'Purchase'):
args.uom = item.purchase_uom if item.purchase_uom else item.stock_uom
else:
args.uom = item.stock_uom
out = frappe._dict({
"item_code": item.name,
"item_name": item.item_name,
"description": cstr(item.description).strip(),
"image": cstr(item.image).strip(),
"warehouse": warehouse,
"income_account": get_default_income_account(args, item_defaults, item_group_defaults, brand_defaults),
"expense_account": expense_account or get_default_expense_account(args, item_defaults, item_group_defaults, brand_defaults) ,
"cost_center": get_default_cost_center(args, item_defaults, item_group_defaults, brand_defaults),
'has_serial_no': item.has_serial_no,
'has_batch_no': item.has_batch_no,
"batch_no": args.get("batch_no"),
"uom": args.uom,
"min_order_qty": flt(item.min_order_qty) if args.doctype == "Material Request" else "",
"qty": flt(args.qty) or 1.0,
"stock_qty": flt(args.qty) or 1.0,
"price_list_rate": 0.0,
"base_price_list_rate": 0.0,
"rate": 0.0,
"base_rate": 0.0,
"amount": 0.0,
"base_amount": 0.0,
"net_rate": 0.0,
"net_amount": 0.0,
"discount_percentage": 0.0,
"supplier": get_default_supplier(args, item_defaults, item_group_defaults, brand_defaults),
"update_stock": args.get("update_stock") if args.get('doctype') in ['Sales Invoice', 'Purchase Invoice'] else 0,
"delivered_by_supplier": item.delivered_by_supplier if args.get("doctype") in ["Sales Order", "Sales Invoice"] else 0,
"is_fixed_asset": item.is_fixed_asset,
"last_purchase_rate": item.last_purchase_rate if args.get("doctype") in ["Purchase Order"] else 0,
"transaction_date": args.get("transaction_date"),
"against_blanket_order": args.get("against_blanket_order"),
"bom_no": item.get("default_bom"),
"weight_per_unit": item.get("weight_per_unit"),
"weight_uom": args.get("weight_uom") or item.get("weight_uom")
})
if item.get("enable_deferred_revenue") or item.get("enable_deferred_expense"):
out.update(calculate_service_end_date(args, item))
# calculate conversion factor
if item.stock_uom == args.uom:
out.conversion_factor = 1.0
else:
out.conversion_factor = args.conversion_factor or \
get_conversion_factor(item.name, args.uom).get("conversion_factor")
args.conversion_factor = out.conversion_factor
out.stock_qty = out.qty * out.conversion_factor
# calculate last purchase rate
if args.get('doctype') in purchase_doctypes:
from erpnext.buying.doctype.purchase_order.purchase_order import item_last_purchase_rate
out.last_purchase_rate = item_last_purchase_rate(args.name, args.conversion_rate, item.name, out.conversion_factor)
# if default specified in item is for another company, fetch from company
for d in [
["Account", "income_account", "default_income_account"],
["Account", "expense_account", "default_expense_account"],
["Cost Center", "cost_center", "cost_center"],
["Warehouse", "warehouse", ""]]:
if not out[d[1]]:
out[d[1]] = frappe.get_cached_value('Company', args.company, d[2]) if d[2] else None
for fieldname in ("item_name", "item_group", "barcodes", "brand", "stock_uom"):
out[fieldname] = item.get(fieldname)
if args.get("manufacturer"):
part_no = get_item_manufacturer_part_no(args.get("item_code"), args.get("manufacturer"))
if part_no:
out["manufacturer_part_no"] = part_no
else:
out["manufacturer_part_no"] = None
out["manufacturer"] = None
else:
data = frappe.get_value("Item", item.name,
["default_item_manufacturer", "default_manufacturer_part_no"] , as_dict=1)
if data:
out.update({
"manufacturer": data.default_item_manufacturer,
"manufacturer_part_no": data.default_manufacturer_part_no
})
child_doctype = args.doctype + ' Item'
meta = frappe.get_meta(child_doctype)
if meta.get_field("barcode"):
update_barcode_value(out)
if out.get("weight_per_unit"):
out['total_weight'] = out.weight_per_unit * out.stock_qty
return out
|
def get_basic_details(args, item, overwrite_warehouse=True):
"""
:param args: {
"item_code": "",
"warehouse": None,
"customer": "",
"conversion_rate": 1.0,
"selling_price_list": None,
"price_list_currency": None,
"price_list_uom_dependant": None,
"plc_conversion_rate": 1.0,
"doctype": "",
"name": "",
"supplier": None,
"transaction_date": None,
"conversion_rate": 1.0,
"buying_price_list": None,
"is_subcontracted": "Yes" / "No",
"ignore_pricing_rule": 0/1
"project": "",
barcode: "",
serial_no: "",
currency: "",
update_stock: "",
price_list: "",
company: "",
order_type: "",
is_pos: "",
project: "",
qty: "",
stock_qty: "",
conversion_factor: "",
against_blanket_order: 0/1
}
:param item: `item_code` of Item object
:return: frappe._dict
"""
if not item:
item = frappe.get_doc("Item", args.get("item_code"))
if item.variant_of:
item.update_template_tables()
item_defaults = get_item_defaults(item.name, args.company)
item_group_defaults = get_item_group_defaults(item.name, args.company)
brand_defaults = get_brand_defaults(item.name, args.company)
defaults = frappe._dict({
'item_defaults': item_defaults,
'item_group_defaults': item_group_defaults,
'brand_defaults': brand_defaults
})
warehouse = get_item_warehouse(item, args, overwrite_warehouse, defaults)
if args.get('doctype') == "Material Request" and not args.get('material_request_type'):
args['material_request_type'] = frappe.db.get_value('Material Request',
args.get('name'), 'material_request_type', cache=True)
expense_account = None
if args.get('doctype') == 'Purchase Invoice' and item.is_fixed_asset:
from erpnext.assets.doctype.asset_category.asset_category import get_asset_category_account
expense_account = get_asset_category_account(fieldname = "fixed_asset_account", item = args.item_code, company= args.company)
#Set the UOM to the Default Sales UOM or Default Purchase UOM if configured in the Item Master
if not args.get('uom'):
if args.get('doctype') in sales_doctypes:
args.uom = item.sales_uom if item.sales_uom else item.stock_uom
elif (args.get('doctype') in ['Purchase Order', 'Purchase Receipt', 'Purchase Invoice']) or \
(args.get('doctype') == 'Material Request' and args.get('material_request_type') == 'Purchase'):
args.uom = item.purchase_uom if item.purchase_uom else item.stock_uom
else:
args.uom = item.stock_uom
out = frappe._dict({
"item_code": item.name,
"item_name": item.item_name,
"description": cstr(item.description).strip(),
"image": cstr(item.image).strip(),
"warehouse": warehouse,
"income_account": get_default_income_account(args, item_defaults, item_group_defaults, brand_defaults),
"expense_account": expense_account or get_default_expense_account(args, item_defaults, item_group_defaults, brand_defaults) ,
"cost_center": get_default_cost_center(args, item_defaults, item_group_defaults, brand_defaults),
'has_serial_no': item.has_serial_no,
'has_batch_no': item.has_batch_no,
"batch_no": args.get("batch_no"),
"uom": args.uom,
"min_order_qty": flt(item.min_order_qty) if args.doctype == "Material Request" else "",
"qty": flt(args.qty) or 1.0,
"stock_qty": flt(args.qty) or 1.0,
"price_list_rate": 0.0,
"base_price_list_rate": 0.0,
"rate": 0.0,
"base_rate": 0.0,
"amount": 0.0,
"base_amount": 0.0,
"net_rate": 0.0,
"net_amount": 0.0,
"discount_percentage": 0.0,
"supplier": get_default_supplier(args, item_defaults, item_group_defaults, brand_defaults),
"update_stock": args.get("update_stock") if args.get('doctype') in ['Sales Invoice', 'Purchase Invoice'] else 0,
"delivered_by_supplier": item.delivered_by_supplier if args.get("doctype") in ["Sales Order", "Sales Invoice"] else 0,
"is_fixed_asset": item.is_fixed_asset,
"last_purchase_rate": item.last_purchase_rate if args.get("doctype") in ["Purchase Order"] else 0,
"transaction_date": args.get("transaction_date"),
"against_blanket_order": args.get("against_blanket_order"),
"bom_no": item.get("default_bom"),
"weight_per_unit": item.get("weight_per_unit"),
"weight_uom": item.get("weight_uom")
})
if item.get("enable_deferred_revenue") or item.get("enable_deferred_expense"):
out.update(calculate_service_end_date(args, item))
# calculate conversion factor
if item.stock_uom == args.uom:
out.conversion_factor = 1.0
else:
out.conversion_factor = args.conversion_factor or \
get_conversion_factor(item.name, args.uom).get("conversion_factor")
args.conversion_factor = out.conversion_factor
out.stock_qty = out.qty * out.conversion_factor
# calculate last purchase rate
if args.get('doctype') in purchase_doctypes:
from erpnext.buying.doctype.purchase_order.purchase_order import item_last_purchase_rate
out.last_purchase_rate = item_last_purchase_rate(args.name, args.conversion_rate, item.name, out.conversion_factor)
# if default specified in item is for another company, fetch from company
for d in [
["Account", "income_account", "default_income_account"],
["Account", "expense_account", "default_expense_account"],
["Cost Center", "cost_center", "cost_center"],
["Warehouse", "warehouse", ""]]:
if not out[d[1]]:
out[d[1]] = frappe.get_cached_value('Company', args.company, d[2]) if d[2] else None
for fieldname in ("item_name", "item_group", "barcodes", "brand", "stock_uom"):
out[fieldname] = item.get(fieldname)
if args.get("manufacturer"):
part_no = get_item_manufacturer_part_no(args.get("item_code"), args.get("manufacturer"))
if part_no:
out["manufacturer_part_no"] = part_no
else:
out["manufacturer_part_no"] = None
out["manufacturer"] = None
else:
data = frappe.get_value("Item", item.name,
["default_item_manufacturer", "default_manufacturer_part_no"] , as_dict=1)
if data:
out.update({
"manufacturer": data.default_item_manufacturer,
"manufacturer_part_no": data.default_manufacturer_part_no
})
child_doctype = args.doctype + ' Item'
meta = frappe.get_meta(child_doctype)
if meta.get_field("barcode"):
update_barcode_value(out)
if out.get("weight_per_unit"):
out['total_weight'] = out.weight_per_unit * out.stock_qty
return out
|
24,332 |
def discover_instances(config, interval, check_ref):
# type: (InstanceConfig, float, weakref.ref[SnmpCheck]) -> None
"""Function looping over a subnet to discover devices, meant to run in a thread.
This is extracted from the check class to not keep a strong reference to
the check instance. This way if the agent unschedules the check and deletes
the reference to the instance, the check is garbage collected properly and
that function can stop.
"""
while True:
start_time = time.time()
for host in config.network_hosts():
check = check_ref()
if check is None or not check._running:
return
host_config = check._build_autodiscovery_config(config.instance, host)
try:
sys_object_oid = check.fetch_sysobject_oid(host_config)
except Exception as e:
check.log.debug("Error scanning host %s: %s", host, e)
del check
continue
try:
profile = check._profile_for_sysobject_oid(sys_object_oid)
except ConfigurationError:
if not (host_config.oid_config.scalar_oids or host_config.oid_config.bulk_oids):
check.log.warning("Host %s didn't match a profile for sysObjectID %s", host, sys_object_oid)
del check
continue
else:
host_config.refresh_with_profile(check.profiles[profile])
host_config.add_profile_tag(profile)
config.discovered_instances[host] = host_config
write_persistent_cache(check.check_id, json.dumps(list(config.discovered_instances)))
del check
check = check_ref()
if check is None:
return
# Write again at the end of the loop, in case some host have been removed since last
write_persistent_cache(check.check_id, json.dumps(list(config.discovered_instances)))
del check
time_elapsed = time.time() - start_time
if interval - time_elapsed > 0:
time.sleep(interval - time_elapsed)
|
def discover_instances(config, interval, check_ref):
# type: (InstanceConfig, float, weakref.ref[SnmpCheck]) -> None
"""Function looping over a subnet to discover devices, meant to run in a thread.
This is extracted from the check class to not keep a strong reference to
the check instance. This way if the agent unschedules the check and deletes
the reference to the instance, the check is garbage collected properly and
that function can stop.
"""
while True:
start_time = time.time()
for host in config.network_hosts():
check = check_ref()
if check is None or not check._running:
return
host_config = check._build_autodiscovery_config(config.instance, host)
try:
sys_object_oid = check.fetch_sysobject_oid(host_config)
except Exception as e:
check.log.debug("Error scanning host %s: %s", host, e)
del check
continue
try:
profile = check._profile_for_sysobject_oid(sys_object_oid)
except ConfigurationError:
if not host_config.oid_config.has_oids():
check.log.warning("Host %s didn't match a profile for sysObjectID %s", host, sys_object_oid)
del check
continue
else:
host_config.refresh_with_profile(check.profiles[profile])
host_config.add_profile_tag(profile)
config.discovered_instances[host] = host_config
write_persistent_cache(check.check_id, json.dumps(list(config.discovered_instances)))
del check
check = check_ref()
if check is None:
return
# Write again at the end of the loop, in case some host have been removed since last
write_persistent_cache(check.check_id, json.dumps(list(config.discovered_instances)))
del check
time_elapsed = time.time() - start_time
if interval - time_elapsed > 0:
time.sleep(interval - time_elapsed)
|
42,679 |
def query_token_spam_list(db: 'DBHandler') -> Set[EthereumToken]:
"""Generate a set of assets that can be ignored combining information of cryptoscamdb
and the list of spam assets KNOWN_ETH_SPAM_TOKENS. This function also makes sure to get the
bad assets in the list of cryptoscamdb and ensures that they exists in the globaldb before
trying to add them.
TODO
This function tries to add as assets to the globaldb the tokens listed in
KNOWN_ETH_SPAM_TOKENS and not the ones coming from cryptoscamdb. The reason is that until the
v2 of the API the response contains both spam addresses and tokens and there is no way to know
if the address is for a contract or not. Checking if the address is a contract takes to much
time. When V2 gets released this can be fixed.
May raise:
- RemoteError
"""
try:
response = requests.get(
url='https://api.cryptoscamdb.org/v1/addresses',
timeout=DEFAULT_TIMEOUT_TUPLE,
)
data = response.json()
success, tokens_info = data['success'], data['result']
except requests.exceptions.RequestException as e:
raise RemoteError(f'Failed to retrieve information from cryptoscamdb. {str(e)}') from e
except (DeserializationError, JSONDecodeError) as e:
raise RemoteError(f'Failed to deserialize data from cryptoscamdb. {str(e)}') from e
except KeyError as e:
raise RemoteError(
f'Response from cryptoscamdb doesn\'t contain expected key. {str(e)}',
) from e
if success is False:
log.error(f'Failed to deserialize data from cryptoscamdb. {data}')
raise RemoteError(
'Failed to deserialize data from cryptoscamdb. Check the logs '
'to get more information',
)
tokens_to_ignore = set()
for token_addr, token_data in tokens_info.items():
if not token_addr.startswith('0x') or token_data[0]['type'] != 'scam':
continue
try:
checksumed_address = to_checksum_address(token_addr)
except ValueError as e:
log.debug(f'Failed to read address from cryptoscamdb. {str(e)}')
continue
try:
token = EthereumToken(checksumed_address)
except UnknownAsset:
continue
if token is not None:
tokens_to_ignore.add(token)
# Try to add custom list
for token_address in KNOWN_ETH_SPAM_TOKENS:
try:
own_token = get_or_create_ethereum_token(
userdb=db,
ethereum_address=token_address,
protocol=SPAM_PROTOCOL,
)
except (RemoteError, NotERC20Conformant) as e:
log.debug(f'Skipping {checksumed_address} due to {str(e)}')
continue
if own_token is not None:
tokens_to_ignore.add(own_token)
return tokens_to_ignore
|
def query_token_spam_list(db: 'DBHandler') -> Set[EthereumToken]:
"""Generate a set of assets that can be ignored combining information of cryptoscamdb
and the list of spam assets KNOWN_ETH_SPAM_TOKENS. This function also makes sure to get the
bad assets in the list of cryptoscamdb and ensures that they exists in the globaldb before
trying to add them.
TODO
This function tries to add as assets to the globaldb the tokens listed in
KNOWN_ETH_SPAM_TOKENS and not the ones coming from cryptoscamdb. The reason is that until the
v2 of the API the response contains both spam addresses and tokens and there is no way to know
if the address is for a contract or not. Checking if the address is a contract takes too much
time. When V2 gets released this can be fixed.
May raise:
- RemoteError
"""
try:
response = requests.get(
url='https://api.cryptoscamdb.org/v1/addresses',
timeout=DEFAULT_TIMEOUT_TUPLE,
)
data = response.json()
success, tokens_info = data['success'], data['result']
except requests.exceptions.RequestException as e:
raise RemoteError(f'Failed to retrieve information from cryptoscamdb. {str(e)}') from e
except (DeserializationError, JSONDecodeError) as e:
raise RemoteError(f'Failed to deserialize data from cryptoscamdb. {str(e)}') from e
except KeyError as e:
raise RemoteError(
f'Response from cryptoscamdb doesn\'t contain expected key. {str(e)}',
) from e
if success is False:
log.error(f'Failed to deserialize data from cryptoscamdb. {data}')
raise RemoteError(
'Failed to deserialize data from cryptoscamdb. Check the logs '
'to get more information',
)
tokens_to_ignore = set()
for token_addr, token_data in tokens_info.items():
if not token_addr.startswith('0x') or token_data[0]['type'] != 'scam':
continue
try:
checksumed_address = to_checksum_address(token_addr)
except ValueError as e:
log.debug(f'Failed to read address from cryptoscamdb. {str(e)}')
continue
try:
token = EthereumToken(checksumed_address)
except UnknownAsset:
continue
if token is not None:
tokens_to_ignore.add(token)
# Try to add custom list
for token_address in KNOWN_ETH_SPAM_TOKENS:
try:
own_token = get_or_create_ethereum_token(
userdb=db,
ethereum_address=token_address,
protocol=SPAM_PROTOCOL,
)
except (RemoteError, NotERC20Conformant) as e:
log.debug(f'Skipping {checksumed_address} due to {str(e)}')
continue
if own_token is not None:
tokens_to_ignore.add(own_token)
return tokens_to_ignore
|
27,395 |
def test_wrap_id(translate_universes):
# If the translation is given a different timestep from what is has, the timestep will
# be updated to the new one given. This should only matter in a ChainReader
ref_u, trans_u = translate_universes
trans_u.dimensions = [363., 364., 365., 90., 90., 90.]
ref_u.dimensions = [363., 364., 365., 90., 90., 90.]
ag = trans_u.residues[24].atoms
trans = wrap(ag)
id_trans = id(trans_u.trajectory.ts)
trans = trans._transform(ref_u.trajectory.ts)
assert id_trans != id(trans_u.trajectory.ts) and id(trans_u.trajectory.ts) == id(ref_u.trajectory.ts)
|
def test_wrap_id(translate_universes):
# If the translation is given a different timestep from what it has, the timestep will
# be updated to the new one given. This should only matter in a ChainReader
ref_u, trans_u = translate_universes
trans_u.dimensions = [363., 364., 365., 90., 90., 90.]
ref_u.dimensions = [363., 364., 365., 90., 90., 90.]
ag = trans_u.residues[24].atoms
trans = wrap(ag)
id_trans = id(trans_u.trajectory.ts)
trans = trans._transform(ref_u.trajectory.ts)
assert id_trans != id(trans_u.trajectory.ts) and id(trans_u.trajectory.ts) == id(ref_u.trajectory.ts)
|
12,149 |
def create_dmd_from_xml(job, path, base_directory_path, state):
# TODO:
# - XML association:
# - Avoid filenames mismatch (metadata XML filename may have changed).
# - Consider equal filenames from different transfer sources?
# - Allow multiple XML metadata per file?
# - Use extra CSV to relate XML file(s)?
xml_filename = os.path.splitext(os.path.relpath(path, "objects"))[0] + ".xml"
transfer_metadata = os.path.join(
base_directory_path, os.path.join("objects", "metadata", "transfers")
)
if not os.path.isdir(transfer_metadata):
return
for dir in os.listdir(transfer_metadata):
xml_path = os.path.join(transfer_metadata, dir, xml_filename)
if not os.path.isfile(xml_path):
continue
try:
tree = etree.parse(xml_path)
valid, errors = _validate_xml(tree)
if not valid:
job.pyprint(
"Errors encountered validating {}:".format(xml_path),
file=sys.stderr,
)
for error in errors:
job.pyprint("\t- {}".format(error), file=sys.stderr)
return
except etree.LxmlError as err:
job.pyprint(
"Could not parse or validate {}\n\t- {}".format(xml_path, err),
file=sys.stderr,
)
return
root = tree.getroot()
_, _, tag = root.tag.partition("}")
state.globalDmdSecCounter += 1
DMDID = "dmdSec_" + state.globalDmdSecCounter.__str__()
dmd_sec = etree.Element(ns.metsBNS + "dmdSec", ID=DMDID)
state.dmdSecs.append(dmd_sec)
md_wrap = etree.SubElement(dmd_sec, ns.metsBNS + "mdWrap")
md_wrap.set("MDTYPE", "OTHER")
md_wrap.set("OTHERMDTYPE", tag.upper())
xml_data = etree.SubElement(md_wrap, ns.metsBNS + "xmlData")
xml_data.append(root)
return DMDID
|
def create_dmd_from_xml(job, path, base_directory_path, state):
# TODO:
# - XML association:
# - Avoid filenames mismatch (metadata XML filename may have changed).
# - Consider equal filenames from different transfer sources?
# - Allow multiple XML metadata per file?
# - Use extra CSV to relate XML file(s)?
xml_filename = os.path.splitext(os.path.relpath(path, "objects"))[0] + ".xml"
transfer_metadata = os.path.join(
base_directory_path, os.path.join("objects", "metadata", "transfers")
)
if not os.path.isdir(transfer_metadata):
return
for dir in os.listdir(transfer_metadata):
xml_path = os.path.join(transfer_metadata, dir, xml_filename)
if not os.path.isfile(xml_path):
continue
try:
tree = etree.parse(xml_path)
valid, errors = _validate_xml(tree)
if not valid:
job.pyprint(
"Errors encountered validating {}:".format(xml_path),
file=sys.stderr,
)
for error in errors:
job.pyprint("\t- {}".format(error), file=sys.stderr)
return
except etree.LxmlError as err:
job.pyprint(
"Could not parse or validate {}\n\t- {}".format(xml_path, err),
file=sys.stderr,
)
return
root = tree.getroot()
_, _, tag = root.tag.partition("}")
state.globalDmdSecCounter += 1
DMDID = "dmdSec_{}".format(state.globalDmdSecCounter)
dmd_sec = etree.Element(ns.metsBNS + "dmdSec", ID=DMDID)
state.dmdSecs.append(dmd_sec)
md_wrap = etree.SubElement(dmd_sec, ns.metsBNS + "mdWrap")
md_wrap.set("MDTYPE", "OTHER")
md_wrap.set("OTHERMDTYPE", tag.upper())
xml_data = etree.SubElement(md_wrap, ns.metsBNS + "xmlData")
xml_data.append(root)
return DMDID
|
31,476 |
def get_access_token(refresh_token: str):
""" Gets access token from os environment if it was saved there. Else, generates access token from refresh token.
Args:
refresh_token(str): refresh token to obtain access token, to send mails using gmail API
Returns:
access_token(str): access token is used to send mails using gmail API
"""
access_token = os.getenv('ACCESS_TOKEN')
valid_until = int(os.getenv('VALID_UNTIL')) if os.getenv('VALID_UNTIL') else None
# check if access token is valid
if access_token and valid_until:
if int(datetime.now().timestamp()) < valid_until:
return access_token
if not refresh_token:
print(f"Error obtaining access token. Failed sending mails.")
sys.exit(1)
# else access token should be obtained from refresh token
http_client = httplib2.Http()
body = {
'refresh_token': refresh_token,
'client_id': GMAIL_CLIENT_ID,
'grant_type': 'refresh_token',
}
resp, content = http_client.request(TOKEN_URL, "POST", urllib.parse.urlencode(body), TOKEN_FORM_HEADERS)
if resp.status not in [200, 201]:
print(f"Error obtaining access token. Failed sending mails.")
sys.exit(1)
parsed_response = json.loads(content)
access_token = parsed_response.get('access_token')
expires_in = parsed_response.get('expires_in', 3595)
time_now = int(datetime.now().timestamp())
time_buffer = 5 # seconds by which to shorten the validity period
if expires_in - time_buffer > 0:
# err on the side of caution with a slightly shorter access token validity period
expires_in = expires_in - time_buffer
# set environment variables
os.environ['ACCESS_TOKEN'] = access_token
os.environ['VALID_UNTIL'] = str(time_now + expires_in)
return access_token
|
def get_access_token(refresh_token: str):
""" Gets access token from os environment if it was saved there. Else, generates access token from refresh token.
Args:
refresh_token(str): refresh token to obtain access token, to send mails using gmail API
Returns:
access_token(str): access token is used to send mails using gmail API
"""
access_token = os.getenv('ACCESS_TOKEN')
valid_until = int(os.getenv('VALID_UNTIL')) if os.getenv('VALID_UNTIL') else None
# check if access token is valid
if access_token and valid_until:
if int(datetime.now().timestamp()) < valid_until:
return access_token
if not refresh_token:
print("Failed to send emails: Error obtaining access token - refresh token wasn't received.")
sys.exit(1)
# else access token should be obtained from refresh token
http_client = httplib2.Http()
body = {
'refresh_token': refresh_token,
'client_id': GMAIL_CLIENT_ID,
'grant_type': 'refresh_token',
}
resp, content = http_client.request(TOKEN_URL, "POST", urllib.parse.urlencode(body), TOKEN_FORM_HEADERS)
if resp.status not in [200, 201]:
print(f"Error obtaining access token. Failed sending mails.")
sys.exit(1)
parsed_response = json.loads(content)
access_token = parsed_response.get('access_token')
expires_in = parsed_response.get('expires_in', 3595)
time_now = int(datetime.now().timestamp())
time_buffer = 5 # seconds by which to shorten the validity period
if expires_in - time_buffer > 0:
# err on the side of caution with a slightly shorter access token validity period
expires_in = expires_in - time_buffer
# set environment variables
os.environ['ACCESS_TOKEN'] = access_token
os.environ['VALID_UNTIL'] = str(time_now + expires_in)
return access_token
|
21,612 |
def _parse_oidc_config_dict(oidc_config: JsonDict) -> "OidcProviderConfig":
"""Take the configuration dict and parse it into an OidcProviderConfig
Raises:
ConfigError if the configuration is malformed.
"""
ump_config = oidc_config.get("user_mapping_provider", {})
ump_config.setdefault("module", DEFAULT_USER_MAPPING_PROVIDER)
ump_config.setdefault("config", {})
(user_mapping_provider_class, user_mapping_provider_config,) = load_module(
ump_config, ("oidc_config", "user_mapping_provider")
)
# Ensure loaded user mapping module has defined all necessary methods
required_methods = [
"get_remote_user_id",
"map_user_attributes",
]
missing_methods = [
method
for method in required_methods
if not hasattr(user_mapping_provider_class, method)
]
if missing_methods:
raise ConfigError(
"Class specified by oidc_config."
"user_mapping_provider.module is missing required "
"methods: %s" % (", ".join(missing_methods),)
)
# MSC2858 will appy certain limits in what can be used as an IdP id, so let's
# enforce those limits now.
idp_id = oidc_config.get("idp_id", "oidc")
valid_idp_chars = set(string.ascii_letters + string.digits + "-._~")
if any(c not in valid_idp_chars for c in idp_id):
raise ConfigError('idp_id may only contain A-Z, a-z, 0-9, "-", ".", "_", "~"')
if len(idp_id) < 0:
raise ConfigError("idp_id must have at least one character")
if len(idp_id) > 128:
raise ConfigError("idp_id may not be more than 128 characters")
return OidcProviderConfig(
idp_id=idp_id,
idp_name=oidc_config.get("idp_name", "OIDC"),
discover=oidc_config.get("discover", True),
issuer=oidc_config["issuer"],
client_id=oidc_config["client_id"],
client_secret=oidc_config["client_secret"],
client_auth_method=oidc_config.get("client_auth_method", "client_secret_basic"),
scopes=oidc_config.get("scopes", ["openid"]),
authorization_endpoint=oidc_config.get("authorization_endpoint"),
token_endpoint=oidc_config.get("token_endpoint"),
userinfo_endpoint=oidc_config.get("userinfo_endpoint"),
jwks_uri=oidc_config.get("jwks_uri"),
skip_verification=oidc_config.get("skip_verification", False),
user_profile_method=oidc_config.get("user_profile_method", "auto"),
allow_existing_users=oidc_config.get("allow_existing_users", False),
user_mapping_provider_class=user_mapping_provider_class,
user_mapping_provider_config=user_mapping_provider_config,
)
|
def _parse_oidc_config_dict(oidc_config: JsonDict) -> "OidcProviderConfig":
"""Take the configuration dict and parse it into an OidcProviderConfig
Raises:
ConfigError if the configuration is malformed.
"""
ump_config = oidc_config.get("user_mapping_provider", {})
ump_config.setdefault("module", DEFAULT_USER_MAPPING_PROVIDER)
ump_config.setdefault("config", {})
(user_mapping_provider_class, user_mapping_provider_config,) = load_module(
ump_config, ("oidc_config", "user_mapping_provider")
)
# Ensure loaded user mapping module has defined all necessary methods
required_methods = [
"get_remote_user_id",
"map_user_attributes",
]
missing_methods = [
method
for method in required_methods
if not hasattr(user_mapping_provider_class, method)
]
if missing_methods:
raise ConfigError(
"Class specified by oidc_config."
"user_mapping_provider.module is missing required "
"methods: %s" % (", ".join(missing_methods),)
)
# MSC2858 will appy certain limits in what can be used as an IdP id, so let's
# enforce those limits now.
idp_id = oidc_config.get("idp_id", "oidc")
valid_idp_chars = set(string.ascii_letters + string.digits + "-._~")
if any(c not in valid_idp_chars for c in idp_id):
raise ConfigError('idp_id may only contain A-Z, a-z, 0-9, "-", ".", "_", "~"')
if not len(idp_id):
raise ConfigError("idp_id must have at least one character")
if len(idp_id) > 128:
raise ConfigError("idp_id may not be more than 128 characters")
return OidcProviderConfig(
idp_id=idp_id,
idp_name=oidc_config.get("idp_name", "OIDC"),
discover=oidc_config.get("discover", True),
issuer=oidc_config["issuer"],
client_id=oidc_config["client_id"],
client_secret=oidc_config["client_secret"],
client_auth_method=oidc_config.get("client_auth_method", "client_secret_basic"),
scopes=oidc_config.get("scopes", ["openid"]),
authorization_endpoint=oidc_config.get("authorization_endpoint"),
token_endpoint=oidc_config.get("token_endpoint"),
userinfo_endpoint=oidc_config.get("userinfo_endpoint"),
jwks_uri=oidc_config.get("jwks_uri"),
skip_verification=oidc_config.get("skip_verification", False),
user_profile_method=oidc_config.get("user_profile_method", "auto"),
allow_existing_users=oidc_config.get("allow_existing_users", False),
user_mapping_provider_class=user_mapping_provider_class,
user_mapping_provider_config=user_mapping_provider_config,
)
|
44,782 |
def is_uri(string):
parsed_uri = urllib.parse.urlparse(string)
if os.name == 'nt':
return not re.match('^[a-zA-Z]:\\\\.*', string)
else:
return len(parsed_uri.scheme) > 0
|
def is_uri(string):
parsed_uri = urllib.parse.urlparse(string)
if os.name == 'nt':
return not re.match(r'^[a-zA-Z]:\\.*', string)
else:
return len(parsed_uri.scheme) > 0
|
13,880 |
def test_non_existing_directory_txt(capsys):
helper_test_non_existing_directory_output(capsys, '--xml')
|
def test_non_existing_directory_txt(capsys):
helper_test_non_existing_directory_output(capsys, '--txt')
|
1,640 |
def test_infer_dim_mle():
# Test small eigenvalues when 'mle' with pathelogical 'X' dataset
X, _ = datasets.make_classification(n_informative=1, n_repeated=18,
n_redundant=1, n_clusters_per_class=1,
random_state=42)
pca = PCA(n_components='mle').fit(X)
assert pca.n_components_ == 0
|
def test_infer_dim_mle():
# Test small eigenvalues when 'mle' with pathological 'X' dataset
X, _ = datasets.make_classification(n_informative=1, n_repeated=18,
n_redundant=1, n_clusters_per_class=1,
random_state=42)
pca = PCA(n_components='mle').fit(X)
assert pca.n_components_ == 0
|
31,380 |
def add_private_content_to_index(private_index_path: str, extract_destination_path: str, index_folder_path: str,
pack_names: set) -> Tuple[Union[list, list], list]:
""" Adds a list of priced packs data-structures to the public index.json file.
Args:
private_index_path: path to where the private index is located.
extract_destination_path (str): full path to extract directory.
index_folder_path (str): downloaded index folder directory path.
pack_names (set): collection of pack names.
Returns:
list: priced packs from private bucket.
"""
private_packs = []
updated_private_packs = []
try:
logging.info("get_private_packs")
private_packs = get_private_packs(private_index_path, pack_names,
extract_destination_path)
logging.info("get_updated_private_packs")
updated_private_packs = get_updated_private_packs(private_packs, index_folder_path)
logging.info("add_private_packs_to_index")
add_private_packs_to_index(index_folder_path, private_index_path)
except Exception as e:
logging.exception(f"Could not add private packs to the index. Additional Info: {str(e)}")
finally:
logging.info("Finished updating index with priced packs")
shutil.rmtree(os.path.dirname(private_index_path), ignore_errors=True)
return private_packs, updated_private_packs
|
def add_private_content_to_index(private_index_path: str, extract_destination_path: str, index_folder_path: str,
pack_names: set) -> Tuple[Union[list, list], list]:
""" Adds a list of priced packs data-structures to the public index.json file. This step should not be skipped even if there are no new or updated private packs.
Args:
private_index_path: path to where the private index is located.
extract_destination_path (str): full path to extract directory.
index_folder_path (str): downloaded index folder directory path.
pack_names (set): collection of pack names.
Returns:
list: priced packs from private bucket.
"""
private_packs = []
updated_private_packs = []
try:
logging.info("get_private_packs")
private_packs = get_private_packs(private_index_path, pack_names,
extract_destination_path)
logging.info("get_updated_private_packs")
updated_private_packs = get_updated_private_packs(private_packs, index_folder_path)
logging.info("add_private_packs_to_index")
add_private_packs_to_index(index_folder_path, private_index_path)
except Exception as e:
logging.exception(f"Could not add private packs to the index. Additional Info: {str(e)}")
finally:
logging.info("Finished updating index with priced packs")
shutil.rmtree(os.path.dirname(private_index_path), ignore_errors=True)
return private_packs, updated_private_packs
|
25,589 |
def check_transaction_gas_used(transaction: TransactionMined) -> None:
""" Raise an exception if the transaction consumed all the gas. """
if was_transaction_successfully_mined(transaction):
return
receipt = transaction.receipt
gas_used = receipt["gasUsed"]
if gas_used >= transaction.startgas:
if isinstance(transaction.data, SmartContractCall):
smart_contract_function = transaction.data.function
# This error happened multiple times, it deserves a refresher on
# frequent reasons why it may happen:
msg = (
f"`{smart_contract_function}` failed and all gas was used "
f"({gas_used}). This can happen for a few reasons: "
f"1. The smart contract code may have an assert inside an if "
f"statement, at the time of gas estimation the condition was false, "
f"but another transaction changed the state of the smart contrat "
f"making the condition true. 2. The call to "
f"`{smart_contract_function}` executes an opcode with variable gas, "
f"at the time of gas estimation the cost was low, but another "
f"transaction changed the environment so that the new cost is high. "
f"This is particularly problematic storage is set to `0`, since the "
f"cost of a `SSTORE` increases 4 times. 3. The cost of the function "
f"varies with external state, if the cost increases because of "
f"another transaction the transaction can fail."
)
elif isinstance(transaction.data, ByteCode):
contract_name = transaction.data.contract_name
msg = f"Deploying {contract_name} failed because all the gas was used!"
else:
assert isinstance(transaction.data, EthTransfer)
msg = f"EthTransfer ailed!"
# Keeping this around just in case the wrong value from the receipt is
# used (Previously the `cumulativeGasUsed` was used, which was
# incorrect).
if gas_used > transaction.startgas:
msg = (
"The receipt `gasUsed` reported in the receipt is higher than the "
"transaction startgas!." + msg
)
raise RaidenUnrecoverableError(msg)
|
def check_transaction_gas_used(transaction: TransactionMined) -> None:
""" Raise an exception if the transaction consumed all the gas. """
if was_transaction_successfully_mined(transaction):
return
receipt = transaction.receipt
gas_used = receipt["gasUsed"]
if gas_used >= transaction.startgas:
if isinstance(transaction.data, SmartContractCall):
smart_contract_function = transaction.data.function
# This error happened multiple times, it deserves a refresher on
# frequent reasons why it may happen:
msg = (
f"`{smart_contract_function}` failed and all gas was used "
f"({gas_used}). This can happen for a few reasons: "
f"1. The smart contract code may have an assert inside an if "
f"statement, at the time of gas estimation the condition was false, "
f"but another transaction changed the state of the smart contrat "
f"making the condition true. 2. The call to "
f"`{smart_contract_function}` executes an opcode with variable gas, "
f"at the time of gas estimation the cost was low, but another "
f"transaction changed the environment so that the new cost is high. "
f"This is particularly problematic storage is set to `0`, since the "
f"cost of a `SSTORE` increases 4 times. 3. The cost of the function "
f"varies with external state, if the cost increases because of "
f"another transaction the transaction can fail."
)
elif isinstance(transaction.data, ByteCode):
contract_name = transaction.data.contract_name
msg = f"Deploying {contract_name} failed because all the gas was used!"
else:
assert isinstance(transaction.data, EthTransfer)
msg = f"EthTransfer failed!"
# Keeping this around just in case the wrong value from the receipt is
# used (Previously the `cumulativeGasUsed` was used, which was
# incorrect).
if gas_used > transaction.startgas:
msg = (
"The receipt `gasUsed` reported in the receipt is higher than the "
"transaction startgas!." + msg
)
raise RaidenUnrecoverableError(msg)
|
2,429 |
def _check_feature_names_in(estimator, input_features=None, *, generate_names=True):
"""Check input_features and generate names if needed.
Commonly used in :term:`get_feature_names_out`.
Parameters
----------
input_features : array-like of str or None, default=None
Input features.
- If `input_features` is `None`, then `feature_names_in_` is
used as feature names in. If `feature_names_in_` is not defined,
then names are generated: `[x0, x1, ..., x(n_features_in_)]`.
- If `input_features` is an array-like, then `input_features` must
match `feature_names_in_` if `feature_names_in_` is defined.
generate_names : bool, default=True
Wether to generate names when `input_features` is `None` and
`estimator.feature_names_in_` is not defined. This is useful for jransformers
validates `input_features` but does not require them in
:term:`get_feature_names_out` i.e. `PCA`.
Returns
-------
feature_names_in : ndarray of str or `None`
Feature names in.
"""
feature_names_in_ = getattr(estimator, "feature_names_in_", None)
n_features_in_ = getattr(estimator, "n_features_in_", None)
if input_features is not None:
input_features = np.asarray(input_features, dtype=object)
if feature_names_in_ is not None and not np.array_equal(
feature_names_in_, input_features
):
raise ValueError("input_features is not equal to feature_names_in_")
if n_features_in_ is not None and len(input_features) != n_features_in_:
raise ValueError(
"input_features should have length equal to number of "
f"features ({n_features_in_}), got {len(input_features)}"
)
return input_features
if feature_names_in_ is not None:
return feature_names_in_
if not generate_names:
return
# Generates feature names if `n_features_in_` is defined
if n_features_in_ is None:
raise ValueError("Unable to generate feature names without n_features_in_")
return np.asarray([f"x{i}" for i in range(n_features_in_)], dtype=object)
|
def _check_feature_names_in(estimator, input_features=None, *, generate_names=True):
"""Check input_features and generate names if needed.
Commonly used in :term:`get_feature_names_out`.
Parameters
----------
input_features : array-like of str or None, default=None
Input features.
- If `input_features` is `None`, then `feature_names_in_` is
used as feature names in. If `feature_names_in_` is not defined,
then names are generated: `[x0, x1, ..., x(n_features_in_)]`.
- If `input_features` is an array-like, then `input_features` must
match `feature_names_in_` if `feature_names_in_` is defined.
generate_names : bool, default=True
Whether to generate names when `input_features` is `None` and
`estimator.feature_names_in_` is not defined. This is useful for jransformers
validates `input_features` but does not require them in
:term:`get_feature_names_out` i.e. `PCA`.
Returns
-------
feature_names_in : ndarray of str or `None`
Feature names in.
"""
feature_names_in_ = getattr(estimator, "feature_names_in_", None)
n_features_in_ = getattr(estimator, "n_features_in_", None)
if input_features is not None:
input_features = np.asarray(input_features, dtype=object)
if feature_names_in_ is not None and not np.array_equal(
feature_names_in_, input_features
):
raise ValueError("input_features is not equal to feature_names_in_")
if n_features_in_ is not None and len(input_features) != n_features_in_:
raise ValueError(
"input_features should have length equal to number of "
f"features ({n_features_in_}), got {len(input_features)}"
)
return input_features
if feature_names_in_ is not None:
return feature_names_in_
if not generate_names:
return
# Generates feature names if `n_features_in_` is defined
if n_features_in_ is None:
raise ValueError("Unable to generate feature names without n_features_in_")
return np.asarray([f"x{i}" for i in range(n_features_in_)], dtype=object)
|
51,992 |
def get_cmake_prefix_path(pkg):
# Note that unlike modifications_from_dependencies, this does not include
# any edits to CMAKE_PREFIX_PATH defined in custom
# setup_dependent_build_environment implementations of dependency packages
build_deps = set(pkg.spec.dependencies(deptype=("build", "test")))
link_deps = set(pkg.spec.traverse(root=False, deptype=("link")))
build_link_deps = build_deps | link_deps
spack_built = []
externals = []
# modifications_from_dependencies updates CMAKE_PREFIX_PATH by first
# prepending all externals and then all non-externals
for dspec in pkg.spec.traverse(root=False, order="post"):
if dspec in build_link_deps:
if dspec.external:
externals.insert(0, dspec)
else:
spack_built.insert(0, dspec)
ordered_build_link_deps = spack_built + externals
cmake_prefix_path_entries = []
for spec in ordered_build_link_deps:
cmake_prefix_path_entries.append(spec.prefix)
try:
cmake_prefix_path_entries.extend(spec.package.cmake_search_paths)
except AttributeError:
pass
return filter_system_paths(cmake_prefix_path_entries)
|
def get_cmake_prefix_path(pkg):
# Note that unlike modifications_from_dependencies, this does not include
# any edits to CMAKE_PREFIX_PATH defined in custom
# setup_dependent_build_environment implementations of dependency packages
build_deps = set(pkg.spec.dependencies(deptype=("build", "test")))
link_deps = set(pkg.spec.traverse(root=False, deptype=("link")))
build_link_deps = build_deps | link_deps
spack_built = []
externals = []
# modifications_from_dependencies updates CMAKE_PREFIX_PATH by first
# prepending all externals and then all non-externals
for dspec in pkg.spec.traverse(root=False, order="post"):
if dspec in build_link_deps:
if dspec.external:
externals.insert(0, dspec)
else:
spack_built.insert(0, dspec)
ordered_build_link_deps = spack_built + externals
cmake_prefix_path_entries = []
for spec in ordered_build_link_deps:
cmake_prefix_path_entries.append(spec.package.cmake_prefix_paths)
return filter_system_paths(cmake_prefix_path_entries)
|
29,754 |
def get_user_releases(user_id: int, stats_range: str) -> Optional[UserReleaseStat]:
"""Get top releases in a tine range for user with given ID.
Args:
user_id: the row ID of the user in the DB
stats_range: the time range to fetch the stats for
"""
with db.engine.connect() as connection:
result = connection.execute(sqlalchemy.text("""
SELECT user_id, release->:range AS {range}, last_updated
FROM statistics.user
WHERE user_id = :user_id
""".format(range=stats_range)), {
'range': stats_range,
'user_id': user_id
})
row = result.fetchone()
return UserReleaseStat(**dict(row)) if row else None
|
def get_user_releases(user_id: int, stats_range: str) -> Optional[UserReleaseStat]:
"""Get top releases in a time range for user with given ID.
Args:
user_id: the row ID of the user in the DB
stats_range: the time range to fetch the stats for
"""
with db.engine.connect() as connection:
result = connection.execute(sqlalchemy.text("""
SELECT user_id, release->:range AS {range}, last_updated
FROM statistics.user
WHERE user_id = :user_id
""".format(range=stats_range)), {
'range': stats_range,
'user_id': user_id
})
row = result.fetchone()
return UserReleaseStat(**dict(row)) if row else None
|
5,362 |
def deploy_windows(
host,
port=445,
timeout=900,
username="Administrator",
password=None,
name=None,
sock_dir=None,
conf_file=None,
start_action=None,
parallel=False,
minion_pub=None,
minion_pem=None,
minion_conf=None,
keep_tmp=False,
script_args=None,
script_env=None,
port_timeout=15,
preseed_minion_keys=None,
win_installer=None,
master=None,
tmp_dir="C:\\salttmp",
opts=None,
master_sign_pub_file=None,
use_winrm=False,
winrm_port=5986,
winrm_use_ssl=True,
winrm_verify_ssl=True,
**kwargs
):
"""
Copy the install files to a remote Windows box, and execute them
"""
if not isinstance(opts, dict):
opts = {}
if use_winrm and not HAS_WINRM:
log.error(
"WinRM requested but module winrm could not be imported."
"Ensure you are using version {} or higher.".format(WINRM_MIN_VER)
)
return False
starttime = time.mktime(time.localtime())
log.debug("Deploying %s at %s (Windows)", host, starttime)
log.trace("HAS_WINRM: %s, use_winrm: %s", HAS_WINRM, use_winrm)
port_available = wait_for_port(host=host, port=port, timeout=port_timeout * 60)
if not port_available:
return False
service_available = False
winrm_session = None
if HAS_WINRM and use_winrm:
winrm_session = wait_for_winrm(
host=host,
port=winrm_port,
username=username,
password=password,
timeout=port_timeout * 60,
use_ssl=winrm_use_ssl,
verify=winrm_verify_ssl,
)
if winrm_session is not None:
service_available = True
else:
service_available = wait_for_psexecsvc(
host=host,
port=port,
username=username,
password=password,
timeout=port_timeout * 60,
)
if port_available and service_available:
log.debug("SMB port %s on %s is available", port, host)
log.debug("Logging into %s:%s as %s", host, port, username)
smb_conn = salt.utils.smb.get_conn(host, username, password, port)
if smb_conn is False:
log.error("Please install smbprotocol to enable SMB functionality")
return False
salt.utils.smb.mkdirs("salttemp", conn=smb_conn)
salt.utils.smb.mkdirs("salt/conf/pki/minion", conn=smb_conn)
if minion_pub:
salt.utils.smb.put_str(
minion_pub, "salt\\conf\\pki\\minion\\minion.pub", conn=smb_conn
)
if minion_pem:
salt.utils.smb.put_str(
minion_pem, "salt\\conf\\pki\\minion\\minion.pem", conn=smb_conn
)
if master_sign_pub_file:
# Read master-sign.pub file
log.debug(
"Copying master_sign.pub file from %s to minion", master_sign_pub_file
)
try:
salt.utils.smb.put_file(
master_sign_pub_file,
"salt\\conf\\pki\\minion\\master_sign.pub",
"C$",
conn=smb_conn,
)
except Exception as e: # pylint: disable=broad-except
log.debug(
"Exception copying master_sign.pub file %s to minion",
master_sign_pub_file,
)
# Copy over win_installer
# win_installer refers to a file such as:
# /root/Salt-Minion-0.17.0-win32-Setup.exe
# ..which exists on the same machine as salt-cloud
comps = win_installer.split("/")
local_path = "/".join(comps[:-1])
installer = comps[-1]
salt.utils.smb.put_file(
win_installer, "salttemp\\{}".format(installer), "C$", conn=smb_conn,
)
if use_winrm:
winrm_cmd(
winrm_session,
"c:\\salttemp\\{}".format(installer),
["/S", "/master={}".format(master), "/minion-name={}".format(name)],
)
else:
cmd = "c:\\salttemp\\{}".format(installer)
args = "/S /master={} /minion-name={}".format(master, name)
stdout, stderr, ret_code = run_psexec_command(
cmd, args, host, username, password
)
if ret_code != 0:
raise Exception("Fail installer {}".format(ret_code))
# Copy over minion_conf
if minion_conf:
if not isinstance(minion_conf, dict):
# Let's not just fail regarding this change, specially
# since we can handle it
raise DeprecationWarning(
"`salt.utils.cloud.deploy_windows` now only accepts "
"dictionaries for its `minion_conf` parameter. "
"Loading YAML..."
)
minion_grains = minion_conf.pop("grains", {})
if minion_grains:
salt.utils.smb.put_str(
salt_config_to_yaml(minion_grains, line_break="\r\n"),
"salt\\conf\\grains",
conn=smb_conn,
)
# Add special windows minion configuration
# that must be in the minion config file
windows_minion_conf = {
"ipc_mode": "tcp",
"root_dir": "c:\\salt",
"pki_dir": "/conf/pki/minion",
"multiprocessing": False,
}
minion_conf = dict(minion_conf, **windows_minion_conf)
salt.utils.smb.put_str(
salt_config_to_yaml(minion_conf, line_break="\r\n"),
"salt\\conf\\minion",
conn=smb_conn,
)
# Delete C:\salttmp\ and installer file
# Unless keep_tmp is True
if not keep_tmp:
if use_winrm:
winrm_cmd(winrm_session, "rmdir", ["/Q", "/S", "C:\\salttemp\\"])
else:
salt.utils.smb.delete_file(
"salttemp\\{}".format(installer), "C$", conn=smb_conn
)
salt.utils.smb.delete_directory("salttemp", "C$", conn=smb_conn)
# Shell out to psexec to ensure salt-minion service started
if use_winrm:
winrm_cmd(winrm_session, "sc", ["stop", "salt-minion"])
time.sleep(5)
winrm_cmd(winrm_session, "sc", ["start", "salt-minion"])
else:
stdout, stderr, ret_code = run_psexec_command(
"cmd.exe", "/c sc stop salt-minion", host, username, password
)
if ret_code != 0:
return False
time.sleep(5)
log.debug("Run psexec: sc start salt-minion")
stdout, stderr, ret_code = run_psexec_command(
"cmd.exe", "/c sc start salt-minion", host, username, password
)
if ret_code != 0:
return False
# Fire deploy action
fire_event(
"event",
"{} has been deployed at {}".format(name, host),
"salt/cloud/{}/deploy_windows".format(name),
args={"name": name},
sock_dir=opts.get("sock_dir", os.path.join(__opts__["sock_dir"], "master")),
transport=opts.get("transport", "zeromq"),
)
return True
return False
|
def deploy_windows(
host,
port=445,
timeout=900,
username="Administrator",
password=None,
name=None,
sock_dir=None,
conf_file=None,
start_action=None,
parallel=False,
minion_pub=None,
minion_pem=None,
minion_conf=None,
keep_tmp=False,
script_args=None,
script_env=None,
port_timeout=15,
preseed_minion_keys=None,
win_installer=None,
master=None,
tmp_dir="C:\\salttmp",
opts=None,
master_sign_pub_file=None,
use_winrm=False,
winrm_port=5986,
winrm_use_ssl=True,
winrm_verify_ssl=True,
**kwargs
):
"""
Copy the install files to a remote Windows box, and execute them
"""
if not isinstance(opts, dict):
opts = {}
if use_winrm and not HAS_WINRM:
log.error(
"WinRM requested but module winrm could not be imported. "
"Ensure you are using version {} or higher.".format(WINRM_MIN_VER)
)
return False
starttime = time.mktime(time.localtime())
log.debug("Deploying %s at %s (Windows)", host, starttime)
log.trace("HAS_WINRM: %s, use_winrm: %s", HAS_WINRM, use_winrm)
port_available = wait_for_port(host=host, port=port, timeout=port_timeout * 60)
if not port_available:
return False
service_available = False
winrm_session = None
if HAS_WINRM and use_winrm:
winrm_session = wait_for_winrm(
host=host,
port=winrm_port,
username=username,
password=password,
timeout=port_timeout * 60,
use_ssl=winrm_use_ssl,
verify=winrm_verify_ssl,
)
if winrm_session is not None:
service_available = True
else:
service_available = wait_for_psexecsvc(
host=host,
port=port,
username=username,
password=password,
timeout=port_timeout * 60,
)
if port_available and service_available:
log.debug("SMB port %s on %s is available", port, host)
log.debug("Logging into %s:%s as %s", host, port, username)
smb_conn = salt.utils.smb.get_conn(host, username, password, port)
if smb_conn is False:
log.error("Please install smbprotocol to enable SMB functionality")
return False
salt.utils.smb.mkdirs("salttemp", conn=smb_conn)
salt.utils.smb.mkdirs("salt/conf/pki/minion", conn=smb_conn)
if minion_pub:
salt.utils.smb.put_str(
minion_pub, "salt\\conf\\pki\\minion\\minion.pub", conn=smb_conn
)
if minion_pem:
salt.utils.smb.put_str(
minion_pem, "salt\\conf\\pki\\minion\\minion.pem", conn=smb_conn
)
if master_sign_pub_file:
# Read master-sign.pub file
log.debug(
"Copying master_sign.pub file from %s to minion", master_sign_pub_file
)
try:
salt.utils.smb.put_file(
master_sign_pub_file,
"salt\\conf\\pki\\minion\\master_sign.pub",
"C$",
conn=smb_conn,
)
except Exception as e: # pylint: disable=broad-except
log.debug(
"Exception copying master_sign.pub file %s to minion",
master_sign_pub_file,
)
# Copy over win_installer
# win_installer refers to a file such as:
# /root/Salt-Minion-0.17.0-win32-Setup.exe
# ..which exists on the same machine as salt-cloud
comps = win_installer.split("/")
local_path = "/".join(comps[:-1])
installer = comps[-1]
salt.utils.smb.put_file(
win_installer, "salttemp\\{}".format(installer), "C$", conn=smb_conn,
)
if use_winrm:
winrm_cmd(
winrm_session,
"c:\\salttemp\\{}".format(installer),
["/S", "/master={}".format(master), "/minion-name={}".format(name)],
)
else:
cmd = "c:\\salttemp\\{}".format(installer)
args = "/S /master={} /minion-name={}".format(master, name)
stdout, stderr, ret_code = run_psexec_command(
cmd, args, host, username, password
)
if ret_code != 0:
raise Exception("Fail installer {}".format(ret_code))
# Copy over minion_conf
if minion_conf:
if not isinstance(minion_conf, dict):
# Let's not just fail regarding this change, specially
# since we can handle it
raise DeprecationWarning(
"`salt.utils.cloud.deploy_windows` now only accepts "
"dictionaries for its `minion_conf` parameter. "
"Loading YAML..."
)
minion_grains = minion_conf.pop("grains", {})
if minion_grains:
salt.utils.smb.put_str(
salt_config_to_yaml(minion_grains, line_break="\r\n"),
"salt\\conf\\grains",
conn=smb_conn,
)
# Add special windows minion configuration
# that must be in the minion config file
windows_minion_conf = {
"ipc_mode": "tcp",
"root_dir": "c:\\salt",
"pki_dir": "/conf/pki/minion",
"multiprocessing": False,
}
minion_conf = dict(minion_conf, **windows_minion_conf)
salt.utils.smb.put_str(
salt_config_to_yaml(minion_conf, line_break="\r\n"),
"salt\\conf\\minion",
conn=smb_conn,
)
# Delete C:\salttmp\ and installer file
# Unless keep_tmp is True
if not keep_tmp:
if use_winrm:
winrm_cmd(winrm_session, "rmdir", ["/Q", "/S", "C:\\salttemp\\"])
else:
salt.utils.smb.delete_file(
"salttemp\\{}".format(installer), "C$", conn=smb_conn
)
salt.utils.smb.delete_directory("salttemp", "C$", conn=smb_conn)
# Shell out to psexec to ensure salt-minion service started
if use_winrm:
winrm_cmd(winrm_session, "sc", ["stop", "salt-minion"])
time.sleep(5)
winrm_cmd(winrm_session, "sc", ["start", "salt-minion"])
else:
stdout, stderr, ret_code = run_psexec_command(
"cmd.exe", "/c sc stop salt-minion", host, username, password
)
if ret_code != 0:
return False
time.sleep(5)
log.debug("Run psexec: sc start salt-minion")
stdout, stderr, ret_code = run_psexec_command(
"cmd.exe", "/c sc start salt-minion", host, username, password
)
if ret_code != 0:
return False
# Fire deploy action
fire_event(
"event",
"{} has been deployed at {}".format(name, host),
"salt/cloud/{}/deploy_windows".format(name),
args={"name": name},
sock_dir=opts.get("sock_dir", os.path.join(__opts__["sock_dir"], "master")),
transport=opts.get("transport", "zeromq"),
)
return True
return False
|
47,934 |
def render_routine(line):
"""Function for rendering single formula
Args:
line (tuple): formula idx, formula string, path to store rendered image
"""
formula, file_idx, folder_path = line
output_path = Path(folder_path, file_idx)
pre_name = os.path.normcase(output_path).replace('/', '_').replace('.', '_')
formula = preprocess_formula(formula)
if not output_path.exists():
tex_filename = Path(folder_path, pre_name + '.tex')
log_filename = tex_filename.with_name(pre_name + '.log')
aux_filename = tex_filename.with_name(pre_name + '.aux')
with open(str(tex_filename), "w") as w:
w.write(template % formula)
subprocess.run(['pdflatex', '-interaction=nonstopmode', '-output-directory', folder_path, str(tex_filename)],
check=False, stdout=PIPE, stderr=PIPE, shell=os.name == 'nt')
for filename in (tex_filename, log_filename, aux_filename):
if filename.exists():
filename.unlink()
pdf_filename = tex_filename.with_name(pre_name + '.pdf')
png_filename = tex_filename.with_name(pre_name + '.png')
if not pdf_filename.exists():
print_info('ERROR: {} cannot compile\n'.format(file_idx))
else:
subprocess.run(['convert', '+profile', '"icc"', '-density', '200', '-quality', '100',
str(pdf_filename), str(png_filename)],
check=True, stdout=PIPE, stderr=PIPE, shell=os.name == 'nt')
if pdf_filename.exists():
pdf_filename.unlink()
if png_filename.exists():
crop_image(str(png_filename), str(output_path))
png_filename.unlink()
else:
print_info("ERROR: {png_filename} does not exists".format(png_filename=png_filename))
|
def render_routine(line):
"""Function for rendering single formula
Args:
line (tuple): formula idx, formula string, path to store rendered image
"""
formula, file_idx, folder_path = line
output_path = Path(folder_path) / file_idx
pre_name = os.path.normcase(output_path).replace('/', '_').replace('.', '_')
formula = preprocess_formula(formula)
if not output_path.exists():
tex_filename = Path(folder_path, pre_name + '.tex')
log_filename = tex_filename.with_name(pre_name + '.log')
aux_filename = tex_filename.with_name(pre_name + '.aux')
with open(str(tex_filename), "w") as w:
w.write(template % formula)
subprocess.run(['pdflatex', '-interaction=nonstopmode', '-output-directory', folder_path, str(tex_filename)],
check=False, stdout=PIPE, stderr=PIPE, shell=os.name == 'nt')
for filename in (tex_filename, log_filename, aux_filename):
if filename.exists():
filename.unlink()
pdf_filename = tex_filename.with_name(pre_name + '.pdf')
png_filename = tex_filename.with_name(pre_name + '.png')
if not pdf_filename.exists():
print_info('ERROR: {} cannot compile\n'.format(file_idx))
else:
subprocess.run(['convert', '+profile', '"icc"', '-density', '200', '-quality', '100',
str(pdf_filename), str(png_filename)],
check=True, stdout=PIPE, stderr=PIPE, shell=os.name == 'nt')
if pdf_filename.exists():
pdf_filename.unlink()
if png_filename.exists():
crop_image(str(png_filename), str(output_path))
png_filename.unlink()
else:
print_info("ERROR: {png_filename} does not exists".format(png_filename=png_filename))
|
17,956 |
def get_plugins():
""" Return a dict of all installed Plugins as {name: EntryPoint}. """
plugins = importlib_metadata.entry_points(group='mkdocs.plugins')
# Allow third-party plugins to override core plugins
pluginmap = {}
for plugin in plugins:
if plugin.name in pluginmap and "mkdocs.contrib" in plugin.value:
continue
pluginmap[plugin.name] = plugin
return pluginmap
|
def get_plugins():
""" Return a dict of all installed Plugins as {name: EntryPoint}. """
plugins = importlib_metadata.entry_points(group='mkdocs.plugins')
# Allow third-party plugins to override core plugins
pluginmap = {}
for plugin in plugins:
if plugin.name in pluginmap and plugin.value.startswith("mkdocs.contrib."):
continue
pluginmap[plugin.name] = plugin
return pluginmap
|
10,893 |
def show(package):
try:
sys.stdout.flush()
output = subprocess.check_output(
[sys.executable] + ['-m', 'pip', 'show', package],
)
for line in output.splitlines():
if line.startswith(b'Name') or line.startswith(b'Version'):
print(line.decode('utf8'))
except subprocess.CalledProcessError:
raise RuntimeError("Upgrade %s failed." % package)
|
def show(package):
try:
sys.stdout.flush()
output = subprocess.check_output(
[sys.executable, '-m', 'pip', 'show', package],
)
for line in output.splitlines():
if line.startswith(b'Name') or line.startswith(b'Version'):
print(line.decode('utf8'))
except subprocess.CalledProcessError:
raise RuntimeError("Upgrade %s failed." % package)
|
30,729 |
def search_group_members(default_base_dn, page_size):
# this command is equivalent to ADGetGroupMembers script
args = demisto.args()
member_type = args.get('member-type')
group_dn = args.get('group-dn')
time_limit = int(args.get('time_limit', 30))
custome_attributes: List[str] = []
default_attributes = DEFAULT_PERSON_ATTRIBUTES if member_type == 'person' else DEFAULT_COMPUTER_ATTRIBUTES
if args.get('attributes'):
custome_attributes = args['attributes'].split(",")
attributes = list(set(custome_attributes + default_attributes))
# neasted search
query = "(&(objectCategory={})(objectClass=user)(memberOf:1.2.840.113556.1.4.1941:={}))".format(member_type,
group_dn)
entries = search_with_paging(
query,
default_base_dn,
attributes=attributes,
page_size=page_size,
time_limit=time_limit
)
members = [{'dn': entry['dn'], 'category': member_type} for entry in entries['flat']]
demisto_entry = {
'ContentsFormat': formats['json'],
'Type': entryTypes['note'],
'Contents': entries['raw'],
'ReadableContentsFormat': formats['markdown'],
'HumanReadable': tableToMarkdown("Active Directory - Get Group Members", entries['flat']),
'EntryContext': {
'ActiveDirectory.Groups(obj.dn ==' + group_dn + ')': {
'dn': group_dn,
'members': members
}
}
}
if member_type == 'person':
demisto_entry['EntryContext']['ActiveDirectory.Users(obj.dn == val.dn)'] = entries['flat']
demisto_entry['EntryContext']['Account'] = [account_entry(
entry, custome_attributes) for entry in entries['flat']]
else:
demisto_entry['EntryContext']['ActiveDirectory.Computers(obj.dn == val.dn)'] = entries['flat']
demisto_entry['EntryContext']['Endpoint'] = [endpoint_entry(
entry, custome_attributes) for entry in entries['flat']]
demisto.results(demisto_entry)
|
def search_group_members(default_base_dn, page_size):
# this command is equivalent to ADGetGroupMembers script
args = demisto.args()
member_type = args.get('member-type')
group_dn = args.get('group-dn')
time_limit = int(args.get('time_limit', 180))
custome_attributes: List[str] = []
default_attributes = DEFAULT_PERSON_ATTRIBUTES if member_type == 'person' else DEFAULT_COMPUTER_ATTRIBUTES
if args.get('attributes'):
custome_attributes = args['attributes'].split(",")
attributes = list(set(custome_attributes + default_attributes))
# neasted search
query = "(&(objectCategory={})(objectClass=user)(memberOf:1.2.840.113556.1.4.1941:={}))".format(member_type,
group_dn)
entries = search_with_paging(
query,
default_base_dn,
attributes=attributes,
page_size=page_size,
time_limit=time_limit
)
members = [{'dn': entry['dn'], 'category': member_type} for entry in entries['flat']]
demisto_entry = {
'ContentsFormat': formats['json'],
'Type': entryTypes['note'],
'Contents': entries['raw'],
'ReadableContentsFormat': formats['markdown'],
'HumanReadable': tableToMarkdown("Active Directory - Get Group Members", entries['flat']),
'EntryContext': {
'ActiveDirectory.Groups(obj.dn ==' + group_dn + ')': {
'dn': group_dn,
'members': members
}
}
}
if member_type == 'person':
demisto_entry['EntryContext']['ActiveDirectory.Users(obj.dn == val.dn)'] = entries['flat']
demisto_entry['EntryContext']['Account'] = [account_entry(
entry, custome_attributes) for entry in entries['flat']]
else:
demisto_entry['EntryContext']['ActiveDirectory.Computers(obj.dn == val.dn)'] = entries['flat']
demisto_entry['EntryContext']['Endpoint'] = [endpoint_entry(
entry, custome_attributes) for entry in entries['flat']]
demisto.results(demisto_entry)
|
20,055 |
def get_datasets(
dataset_ids: List[Union[str, int]], download_data: bool = True, download_qualities: bool = True
) -> List[OpenMLDataset]:
"""Download datasets.
This function iterates :meth:`openml.datasets.get_dataset`.
Parameters
----------
dataset_ids : iterable
Integers or strings representing dataset ids or dataset names.
If dataset names are specified, the least recent still active dataset version is returned.
download_data : bool, optional
If True, also download the data file. Beware that some datasets are large and it might
make the operation noticeably slower. Metadata is also still retrieved.
If False, create the OpenMLDataset and only populate it with the metadata.
The data may later be retrieved through the `OpenMLDataset.get_data` method.
download_qualities : bool, optional
If True, also download qualities.xml file. If false use the file if it was cached.
Returns
-------
datasets : list of datasets
A list of dataset objects.
"""
datasets = []
for dataset_id in dataset_ids:
datasets.append(get_dataset(dataset_id, download_data, download_qualities))
return datasets
|
def get_datasets(
dataset_ids: List[Union[str, int]], download_data: bool = True, download_qualities: bool = True
) -> List[OpenMLDataset]:
"""Download datasets.
This function iterates :meth:`openml.datasets.get_dataset`.
Parameters
----------
dataset_ids : iterable
Integers or strings representing dataset ids or dataset names.
If dataset names are specified, the least recent still active dataset version is returned.
download_data : bool, optional
If True, also download the data file. Beware that some datasets are large and it might
make the operation noticeably slower. Metadata is also still retrieved.
If False, create the OpenMLDataset and only populate it with the metadata.
The data may later be retrieved through the `OpenMLDataset.get_data` method.
download_qualities : bool, optional (default=True)
If True, also download qualities.xml file. If false use the file if it was cached.
Returns
-------
datasets : list of datasets
A list of dataset objects.
"""
datasets = []
for dataset_id in dataset_ids:
datasets.append(get_dataset(dataset_id, download_data, download_qualities))
return datasets
|
53,918 |
def main():
module = AnsibleModule(argument_spec=dict(
asic_instance_id = dict(required = False, type='int', default=None),
skip_interface_pattern_list = dict(required = False, type='list', default=None)
),
supports_check_mode=False)
m_args = module.params
lldpctl_docker_cmd = 'docker exec -i {} lldpctl -f keyvalue'.format('lldp' + (str(m_args['asic_instance_id']) if m_args['asic_instance_id'] else ''))
lldp_output = gather_lldp(module, lldpctl_docker_cmd, m_args['skip_interface_pattern_list'])
try:
data = {'lldpctl': lldp_output['lldp']}
module.exit_json(ansible_facts=data)
except TypeError:
module.fail_json(msg="lldpctl command failed")
|
def main():
module = AnsibleModule(argument_spec=dict(
asic_instance_id = dict(required = False, type='int', default=None),
skip_interface_pattern_list = dict(required = False, type='list', default=None)
),
supports_check_mode=False)
m_args = module.params
lldpctl_docker_cmd = "docker exec -i {} lldpctl -f keyvalue".format("lldp" + (str(m_args["asic_instance_id"]) if m_args["asic_instance_id"] else ""))
lldp_output = gather_lldp(module, lldpctl_docker_cmd, m_args["skip_interface_pattern_list"])
try:
data = {"lldpctl": lldp_output["lldp"]}
module.exit_json(ansible_facts=data)
except TypeError:
module.fail_json(msg="lldpctl command failed")
|
8,821 |
def test_register_url_callback_manual_warning(tmpconfig, caplog):
"""Test that manually registering a callback produces a deprecation warning"""
test_pattern = r'https://(www\.)?example\.com'
def url_handler(*args, **kwargs):
return None
sopel = bot.Sopel(tmpconfig, daemon=False)
sopel.memory["url_callbacks"] = SopelMemory()
# register a callback manually
sopel.memory["url_callbacks"][re.compile(test_pattern)] = url_handler
results = list(sopel.search_url_callbacks("https://www.example.com"))
assert results[0][0] == url_handler, "Callback must be present"
for record in caplog.records:
if "url_callbacks" in record.text and "deprecated" in record.text:
# success
return
raise Exception("No deprecation warning was found")
|
def test_register_url_callback_manual_warning(tmpconfig, caplog):
"""Test that manually registering a callback produces a deprecation warning"""
test_pattern = r'https://(www\.)?example\.com'
def url_handler(*args, **kwargs):
return None
sopel = bot.Sopel(tmpconfig, daemon=False)
sopel.memory["url_callbacks"] = SopelMemory()
# register a callback manually
sopel.memory["url_callbacks"][re.compile(test_pattern)] = url_handler
results = list(sopel.search_url_callbacks("https://www.example.com"))
assert results[0][0] == url_handler, "Callback must be present"
for record in caplog.records:
if "url_callbacks" in record.message and "deprecated" in record.message:
# success
return
raise Exception("No deprecation warning was found")
|
54,089 |
def get_service_data(service: Optional[Union[str, io.IOBase]]) -> Dict[str, Any]:
service = service or os.environ.get('GOOGLE_APPLICATION_CREDENTIALS')
if not service:
cloudsdk_config = os.environ.get('CLOUDSDK_CONFIG')
sdkpath = (cloudsdk_config
or os.path.join(os.path.expanduser('~'), '.config',
'gcloud'))
service = os.path.join(sdkpath, 'application_default_credentials.json')
set_explicitly = bool(cloudsdk_config)
else:
set_explicitly = True
try:
with open(service, 'r') as f:
data: Dict[str, Any] = json.loads(f.read())
return data
except FileNotFoundError:
if set_explicitly:
# only warn users if they have explicitly set the service_file path
raise
return {}
except TypeError:
data: Dict[str, Any] = json.loads(service.read())
return data
except Exception: # pylint: disable=broad-except
return {}
|
def get_service_data(service: Optional[Union[str, io.IOBase]]) -> Dict[str, Any]:
service = service or os.environ.get('GOOGLE_APPLICATION_CREDENTIALS')
if not service:
cloudsdk_config = os.environ.get('CLOUDSDK_CONFIG')
sdkpath = (cloudsdk_config
or os.path.join(os.path.expanduser('~'), '.config',
'gcloud'))
service = os.path.join(sdkpath, 'application_default_credentials.json')
set_explicitly = bool(cloudsdk_config)
else:
set_explicitly = True
try:
with open(service, 'r') as f:
data: Dict[str, Any] = json.loads(f.read())
return data
except FileNotFoundError:
if set_explicitly:
# only warn users if they have explicitly set the service_file path
raise
return {}
except TypeError:
try:
data: Dict[str, Any] = json.loads(service.read())
return data
except Exception: # pylint: disable=broad-except
return {}
return data
except Exception: # pylint: disable=broad-except
return {}
|
42,043 |
def test_plot_slice() -> None:
# Test with no trial.
study = prepare_study_with_trials(no_trials=True)
figure = plot_slice(study)
assert len(figure.findobj(PathCollection)) == 0
study = prepare_study_with_trials(with_c_d=False)
# Test with a trial.
figure = plot_slice(study)
assert len(figure) == 2
assert len(figure[0].findobj(PathCollection)) == 1
assert len(figure[1].findobj(PathCollection)) == 1
assert figure[0].yaxis.label.get_text() == "Objective Value"
# Scatter plot data is available as PathCollection
data0 = figure[0].findobj(PathCollection)[0].get_offsets().data
data1 = figure[1].findobj(PathCollection)[0].get_offsets().data
assert np.allclose(data0, [[1.0, 0.0], [2.5, 1.0]])
assert np.allclose(data1, [[2.0, 0.0], [0.0, 2.0], [1.0, 1.0]])
# Test with a trial to select parameter.
figure = plot_slice(study, params=["param_a"])
assert len(figure.findobj(PathCollection)) == 1
assert figure.yaxis.label.get_text() == "Objective Value"
data0 = figure.findobj(PathCollection)[0].get_offsets().data
np.allclose(data0, [[1.0, 2.0], [2.5, 1.0]])
# Test with a customized target value.
with pytest.warns(UserWarning):
figure = plot_slice(study, params=["param_a"], target=lambda t: t.params["param_b"])
assert len(figure.findobj(PathCollection)) == 1
assert figure.yaxis.label.get_text() == "Objective Value"
data0 = figure.findobj(PathCollection)[0].get_offsets().data
np.allclose(data0, [[1.0, 2.0], [2.5, 1.0]])
# Test with a customized target name.
figure = plot_slice(study, target_name="Target Name")
assert len(figure) == 2
assert len(figure[0].get_lines()) == 0
assert len(figure[1].get_lines()) == 0
assert len(figure[0].findobj(PathCollection)) == 1
assert len(figure[1].findobj(PathCollection)) == 1
assert figure[0].yaxis.label.get_text() == "Target Name"
# Test with wrong parameters.
with pytest.raises(ValueError):
plot_slice(study, params=["optuna"])
# Ignore failed trials.
def fail_objective(_: Trial) -> float:
raise ValueError
study = create_study()
study.optimize(fail_objective, n_trials=1, catch=(ValueError,))
figure = plot_slice(study)
assert len(figure.get_lines()) == 0
assert len(figure.findobj(PathCollection)) == 0
|
def test_plot_slice() -> None:
# Test with no trial.
study = prepare_study_with_trials(no_trials=True)
figure = plot_slice(study)
assert len(figure.findobj(PathCollection)) == 0
study = prepare_study_with_trials(with_c_d=False)
# Test with a trial.
figure = plot_slice(study)
assert len(figure) == 2
assert len(figure[0].findobj(PathCollection)) == 1
assert len(figure[1].findobj(PathCollection)) == 1
assert figure[0].yaxis.label.get_text() == "Objective Value"
# Scatter plot data is available as PathCollection
data0 = figure[0].findobj(PathCollection)[0].get_offsets().data
data1 = figure[1].findobj(PathCollection)[0].get_offsets().data
assert np.allclose(data0, [[1.0, 0.0], [2.5, 1.0]])
assert np.allclose(data1, [[2.0, 0.0], [0.0, 2.0], [1.0, 1.0]])
# Test with a trial to select parameter.
figure = plot_slice(study, params=["param_a"])
assert len(figure.findobj(PathCollection)) == 1
assert figure.yaxis.label.get_text() == "Objective Value"
data0 = figure.findobj(PathCollection)[0].get_offsets().data
assert np.allclose(data0, [[1.0, 2.0], [2.5, 1.0]])
# Test with a customized target value.
with pytest.warns(UserWarning):
figure = plot_slice(study, params=["param_a"], target=lambda t: t.params["param_b"])
assert len(figure.findobj(PathCollection)) == 1
assert figure.yaxis.label.get_text() == "Objective Value"
data0 = figure.findobj(PathCollection)[0].get_offsets().data
np.allclose(data0, [[1.0, 2.0], [2.5, 1.0]])
# Test with a customized target name.
figure = plot_slice(study, target_name="Target Name")
assert len(figure) == 2
assert len(figure[0].get_lines()) == 0
assert len(figure[1].get_lines()) == 0
assert len(figure[0].findobj(PathCollection)) == 1
assert len(figure[1].findobj(PathCollection)) == 1
assert figure[0].yaxis.label.get_text() == "Target Name"
# Test with wrong parameters.
with pytest.raises(ValueError):
plot_slice(study, params=["optuna"])
# Ignore failed trials.
def fail_objective(_: Trial) -> float:
raise ValueError
study = create_study()
study.optimize(fail_objective, n_trials=1, catch=(ValueError,))
figure = plot_slice(study)
assert len(figure.get_lines()) == 0
assert len(figure.findobj(PathCollection)) == 0
|
2,214 |
def test_checksubparams_negative_subpopulation():
X, y, w, c = gen_toy_problem_1d()
theil_sen = TheilSenRegressor(max_subpopulation=-1, random_state=0)
with pytest.error(ValueError):
theil_sen.fit(X, y)
|
def test_checksubparams_negative_subpopulation():
X, y, w, c = gen_toy_problem_1d()
theil_sen = TheilSenRegressor(max_subpopulation=-1, random_state=0)
with pytest.raises(ValueError):
theil_sen.fit(X, y)
|
4,524 |
def main(*args):
"""Contains flow control"""
options, parser = parse_options(args)
if options.regex and options.write_changes:
print("ERROR: --write-changes cannot be used together with "
"--regex", file=sys.stderr)
parser.print_help()
return EX_USAGE
word_regex = options.regex or word_regex_def
try:
word_regex = re.compile(word_regex)
except re.error as err:
print("ERROR: invalid --regex \"%s\" (%s)" %
(word_regex, err), file=sys.stderr)
parser.print_help()
return EX_USAGE
if options.ignore_regex:
try:
ignore_word_regex = re.compile(options.ignore_regex)
except re.error as err:
print("ERROR: invalid --ignore-regex \"%s\" (%s)" %
(options.ignore_regex, err), file=sys.stderr)
parser.print_help()
return EX_USAGE
else:
ignore_word_regex = None
ignore_words_files = options.ignore_words or []
ignore_words = parse_ignore_words_option(options.ignore_words_list)
for ignore_words_file in ignore_words_files:
if not os.path.isfile(ignore_words_file):
print("ERROR: cannot find ignore-words file: %s" %
ignore_words_file, file=sys.stderr)
parser.print_help()
return EX_USAGE
build_ignore_words(ignore_words_file, ignore_words)
uri_regex = options.uri_regex or uri_regex_def
try:
uri_regex = re.compile(uri_regex)
except re.error as err:
print("ERROR: invalid --uri-regex \"%s\" (%s)" %
(uri_regex, err), file=sys.stderr)
parser.print_help()
return EX_USAGE
uri_ignore_words = parse_ignore_words_option(options.uri_ignore_words_list)
if options.dictionary:
dictionaries = options.dictionary
else:
dictionaries = ['-']
use_dictionaries = list()
for dictionary in dictionaries:
if dictionary == "-":
# figure out which builtin dictionaries to use
use = sorted(set(options.builtin.split(',')))
for u in use:
for builtin in _builtin_dictionaries:
if builtin[0] == u:
use_dictionaries.append(
os.path.join(_data_root, 'dictionary%s.txt'
% (builtin[2],)))
break
else:
print("ERROR: Unknown builtin dictionary: %s" % (u,),
file=sys.stderr)
parser.print_help()
return EX_USAGE
else:
if not os.path.isfile(dictionary):
print("ERROR: cannot find dictionary file: %s" % dictionary,
file=sys.stderr)
parser.print_help()
return EX_USAGE
use_dictionaries.append(dictionary)
misspellings = dict()
for dictionary in use_dictionaries:
build_dict(dictionary, misspellings, ignore_words)
colors = TermColors()
if not options.colors or sys.platform == 'win32':
colors.disable()
if options.summary:
summary = Summary()
else:
summary = None
context = None
if options.context is not None:
if (options.before_context is not None) or \
(options.after_context is not None):
print("ERROR: --context/-C cannot be used together with "
"--context-before/-B or --context-after/-A",
file=sys.stderr)
parser.print_help()
return EX_USAGE
context_both = max(0, options.context)
context = (context_both, context_both)
elif (options.before_context is not None) or \
(options.after_context is not None):
context_before = 0
context_after = 0
if options.before_context is not None:
context_before = max(0, options.before_context)
if options.after_context is not None:
context_after = max(0, options.after_context)
context = (context_before, context_after)
exclude_lines = set()
if options.exclude_file:
build_exclude_hashes(options.exclude_file, exclude_lines)
file_opener = FileOpener(options.hard_encoding_detection,
options.quiet_level)
glob_match = GlobMatch(options.skip)
try:
glob_match.match("/random/path") # does not need a real path
except re.error:
print("ERROR: --skip/-S has been fed an invalid glob",
file=sys.stderr)
return EX_USAGE
bad_count = 0
for filename in options.files:
# ignore hidden files
if is_hidden(filename, options.check_hidden):
continue
if os.path.isdir(filename):
for root, dirs, files in os.walk(filename):
if glob_match.match(root): # skip (absolute) directories
del dirs[:]
continue
if is_hidden(root, options.check_hidden): # dir itself hidden
continue
for file_ in files:
# ignore hidden files in directories
if is_hidden(file_, options.check_hidden):
continue
if glob_match.match(file_): # skip files
continue
fname = os.path.join(root, file_)
if glob_match.match(fname): # skip paths
continue
bad_count += parse_file(
fname, colors, summary, misspellings, exclude_lines,
file_opener, word_regex, ignore_word_regex, uri_regex,
uri_ignore_words, context, options)
# skip (relative) directories
dirs[:] = [dir_ for dir_ in dirs if not glob_match.match(dir_)]
elif not glob_match.match(filename): # skip files
bad_count += parse_file(
filename, colors, summary, misspellings, exclude_lines,
file_opener, word_regex, ignore_word_regex, uri_regex,
uri_ignore_words, context, options)
if summary:
print("\n-------8<-------\nSUMMARY:")
print(summary)
if options.count:
print(bad_count, file=sys.stderr)
return EX_DATAERR if bad_count else EX_OK
|
def main(*args):
"""Contains flow control"""
options, parser = parse_options(args)
if options.regex and options.write_changes:
print("ERROR: --write-changes cannot be used together with "
"--regex", file=sys.stderr)
parser.print_help()
return EX_USAGE
word_regex = options.regex or word_regex_def
try:
word_regex = re.compile(word_regex)
except re.error as err:
print("ERROR: invalid --regex \"%s\" (%s)" %
(word_regex, err), file=sys.stderr)
parser.print_help()
return EX_USAGE
if options.ignore_regex:
try:
ignore_word_regex = re.compile(options.ignore_regex)
except re.error as err:
print("ERROR: invalid --ignore-regex \"%s\" (%s)" %
(options.ignore_regex, err), file=sys.stderr)
parser.print_help()
return EX_USAGE
else:
ignore_word_regex = None
ignore_words_files = options.ignore_words or []
ignore_words = parse_ignore_words_option(options.ignore_words_list)
for ignore_words_file in ignore_words_files:
if not os.path.isfile(ignore_words_file):
print("ERROR: cannot find ignore-words file: %s" %
ignore_words_file, file=sys.stderr)
parser.print_help()
return EX_USAGE
build_ignore_words(ignore_words_file, ignore_words)
uri_regex = options.uri_regex or uri_regex_def
try:
uri_regex = re.compile(uri_regex)
except re.error as err:
print("ERROR: invalid --uri-regex \"%s\" (%s)" %
(uri_regex, err), file=sys.stderr)
parser.print_help()
return EX_USAGE
uri_ignore_words = parse_ignore_words_option(options.uri_ignore_words_list)
if options.dictionary:
dictionaries = options.dictionary
else:
dictionaries = ['-']
use_dictionaries = list()
for dictionary in dictionaries:
if dictionary == "-":
# figure out which builtin dictionaries to use
use = sorted(set(options.builtin.split(',')))
for u in use:
for builtin in _builtin_dictionaries:
if builtin[0] == u:
use_dictionaries.append(
os.path.join(_data_root, 'dictionary%s.txt'
% (builtin[2],)))
break
else:
print("ERROR: Unknown builtin dictionary: %s" % (u,),
file=sys.stderr)
parser.print_help()
return EX_USAGE
else:
if not os.path.isfile(dictionary):
print("ERROR: cannot find dictionary file: %s" % dictionary,
file=sys.stderr)
parser.print_help()
return EX_USAGE
use_dictionaries.append(dictionary)
misspellings = dict()
for dictionary in use_dictionaries:
build_dict(dictionary, misspellings, ignore_words)
colors = TermColors()
if not options.colors or sys.platform == 'win32':
colors.disable()
if options.summary:
summary = Summary()
else:
summary = None
context = None
if options.context is not None:
if (options.before_context is not None) or \
(options.after_context is not None):
print("ERROR: --context/-C cannot be used together with "
"--context-before/-B or --context-after/-A",
file=sys.stderr)
parser.print_help()
return EX_USAGE
context_both = max(0, options.context)
context = (context_both, context_both)
elif (options.before_context is not None) or \
(options.after_context is not None):
context_before = 0
context_after = 0
if options.before_context is not None:
context_before = max(0, options.before_context)
if options.after_context is not None:
context_after = max(0, options.after_context)
context = (context_before, context_after)
exclude_lines = set()
if options.exclude_file:
build_exclude_hashes(options.exclude_file, exclude_lines)
file_opener = FileOpener(options.hard_encoding_detection,
options.quiet_level)
glob_match = GlobMatch(options.skip)
try:
glob_match.match("/random/path") # does not need a real path
except re.error:
print("ERROR: --skip/-S has been fed an invalid glob, try escaping special characters",
file=sys.stderr)
return EX_USAGE
bad_count = 0
for filename in options.files:
# ignore hidden files
if is_hidden(filename, options.check_hidden):
continue
if os.path.isdir(filename):
for root, dirs, files in os.walk(filename):
if glob_match.match(root): # skip (absolute) directories
del dirs[:]
continue
if is_hidden(root, options.check_hidden): # dir itself hidden
continue
for file_ in files:
# ignore hidden files in directories
if is_hidden(file_, options.check_hidden):
continue
if glob_match.match(file_): # skip files
continue
fname = os.path.join(root, file_)
if glob_match.match(fname): # skip paths
continue
bad_count += parse_file(
fname, colors, summary, misspellings, exclude_lines,
file_opener, word_regex, ignore_word_regex, uri_regex,
uri_ignore_words, context, options)
# skip (relative) directories
dirs[:] = [dir_ for dir_ in dirs if not glob_match.match(dir_)]
elif not glob_match.match(filename): # skip files
bad_count += parse_file(
filename, colors, summary, misspellings, exclude_lines,
file_opener, word_regex, ignore_word_regex, uri_regex,
uri_ignore_words, context, options)
if summary:
print("\n-------8<-------\nSUMMARY:")
print(summary)
if options.count:
print(bad_count, file=sys.stderr)
return EX_DATAERR if bad_count else EX_OK
|
30,106 |
def test_sig_fileinfo_does_not_exit(runtmp):
# test on file that does not exist
with pytest.raises(SourmashCommandFailed):
runtmp.run_sourmash('sig', 'fileinfo', 'does-not-exist')
assert "Cannot open 'does-not-exist'." in runtmp.last_result.err
|
def test_sig_fileinfo_does_not_exist(runtmp):
# test on file that does not exist
with pytest.raises(SourmashCommandFailed):
runtmp.run_sourmash('sig', 'fileinfo', 'does-not-exist')
assert "Cannot open 'does-not-exist'." in runtmp.last_result.err
|
39,817 |
def multidimensional_deconfliction(association_set):
"""Solves the Multidimensional Assignment Problem (MAP)
The assignment problem becomes more complex when time is added as a dimension.
This basic solution finds all the conflicts in an association set and then creates a
matrix of sums of conflicts in seconds, which is then passed to assign2D to solve as a
simple 2D assignment problem. Therefore, each object will only ever be assigned to one other
at any one time. In the case of an association that only partially overlaps, the time range
of the "weaker" one (the one eliminated by assign2D) will be trimmed
until there is no conflict.
Due to the possibility of more than two conflicting associations at the same time,
this algorithm is recursive, but it is not expected many (if any) recursions will be required
for most uses.
Parameters
----------
association_set: The :class:`AssociationSet` to de-conflict
Returns
-------
: :class:`AssociationSet`
The association set without contradictory associations
"""
# Check if there are any conflicts
no_conflicts = True
for assoc1 in association_set:
for assoc2 in association_set:
if conflicts(assoc1, assoc2):
no_conflicts = False
if no_conflicts:
return association_set
objects = list(association_set.object_set)
length = len(objects)
totals = numpy.zeros((length, length)) # Time objects i and j are associated for in total
for association in association_set.associations:
if len(association.objects) != 2:
raise ValueError("Supplied set must only contain pairs of associated objects")
obj_indices = [objects.index(list(association.objects)[0]),
objects.index(list(association.objects)[1])]
totals[obj_indices[0], obj_indices[1]] = association.time_range.duration.total_seconds()
make_symmetric(totals)
totals = numpy.rint(totals).astype(int)
numpy.fill_diagonal(totals, 0) # Don't want to count associations of an object with itself
solved_2d = assign2D(totals, maximize=True)[1]
winning_indices = [] # Pairs that are chosen by assign2D
for i in range(length):
if i != solved_2d[i]:
winning_indices.append([i, solved_2d[i]])
cleaned_set = AssociationSet()
if len(winning_indices) == 0:
raise ValueError("Problem unsolvable using this method")
for winner in winning_indices:
assoc = association_set.associations_including_objects({objects[winner[0]],
objects[winner[1]]})
cleaned_set.add(assoc)
association_set.remove(assoc)
# Recursive step
runners_up = set()
for assoc1 in association_set.associations:
for assoc2 in association_set.associations:
if conflicts(assoc1, assoc2):
runners_up = multidimensional_deconfliction(association_set).associations
# At this point, none of association_set should conflict with one another
for runner_up in runners_up:
for winner in cleaned_set:
if conflicts(runner_up, winner):
runner_up.time_range.minus(winner.time_range)
if runner_up.time_range is not None:
cleaned_set.add(runner_up)
else:
runners_up.remove(runner_up)
return cleaned_set
|
def multidimensional_deconfliction(association_set):
"""Solves the Multidimensional Assignment Problem (MAP)
The assignment problem becomes more complex when time is added as a dimension.
This basic solution finds all the conflicts in an association set and then creates a
matrix of sums of conflicts in seconds, which is then passed to assign2D to solve as a
simple 2D assignment problem. Therefore, each object will only ever be assigned to one other
at any one time. In the case of an association that only partially overlaps, the time range
of the "weaker" one (the one eliminated by assign2D) will be trimmed
until there is no conflict.
Due to the possibility of more than two conflicting associations at the same time,
this algorithm is recursive, but it is not expected many (if any) recursions will be required
for most uses.
Parameters
----------
association_set: The :class:`AssociationSet` to de-conflict
Returns
-------
: :class:`AssociationSet`
The association set without contradictory associations
"""
# Check if there are any conflicts
no_conflicts = True
for assoc1 in association_set:
for assoc2 in association_set:
if conflicts(assoc1, assoc2):
no_conflicts = False
if no_conflicts:
return association_set
objects = list(association_set.object_set)
length = len(objects)
totals = numpy.zeros((length, length)) # Time objects i and j are associated for in total
for association in association_set.associations:
if len(association.objects) != 2:
raise ValueError("Supplied set must only contain pairs of associated objects")
obj_indices = [objects.index(list(association.objects)[0]),
objects.index(list(association.objects)[1])]
totals[obj_indices[0], obj_indices[1]] = association.time_range.duration.total_seconds()
make_symmetric(totals)
totals = numpy.rint(totals).astype(int)
numpy.fill_diagonal(totals, 0) # Don't want to count associations of an object with itself
solved_2d = assign2D(totals, maximize=True)[1]
winning_indices = [] # Pairs that are chosen by assign2D
for i in range(length):
if i != solved_2d[i]:
winning_indices.append([i, solved_2d[i]])
cleaned_set = AssociationSet()
if len(winning_indices) == 0:
raise ValueError("Problem unsolvable using this method")
for winner in winning_indices:
assoc = association_set.associations_including_objects({objects[winner[0]],
objects[winner[1]]})
cleaned_set.add(assoc)
association_set.remove(assoc)
# Recursive step
runners_up = set()
for assoc1 in association_set.associations:
for assoc2 in association_set.associations:
if conflicts(assoc1, assoc2):
runners_up = multidimensional_deconfliction(association_set).associations
# At this point, none of association_set should conflict with one another
for runner_up in runners_up:
for winner in cleaned_set:
if conflicts(runner_up, winner):
runner_up.time_range.minus(winner.time_range)
if runner_up.time_range is not None:
cleaned_set.add(runner_up)
else:
runners_up.remove(runner_up)
return cleaned_set
|
25,270 |
def multi_attribute(*attribute_paths):
"""Creates a projection that extracts the values of
one or more attribute paths.
Args:
attribute_paths (str): Extracts values from these paths, if given.
Returns:
Projection[any]: A projection that extracts the values of the given
attribute paths.
"""
return _MultiAttributeProjection(*attribute_paths)
|
def multi_attribute(*attribute_paths):
"""Creates a projection that extracts the values of
one or more attribute paths.
Args:
*attribute_paths (str): Paths to extract the attributes from.
Returns:
Projection[any]: A projection that extracts the values of the given
attribute paths.
"""
return _MultiAttributeProjection(*attribute_paths)
|
31,461 |
def main() -> None:
params = demisto.params()
base_url: str = urljoin(params.get('base_url', '').rstrip('/'), '/api/aed/v2')
verify_certificate: bool = not params.get('insecure', False)
proxy: bool = params.get('proxy', False)
api_token = params.get('api_token')
commands = init_commands_dict()
demisto_command = demisto.command()
try:
handle_proxy()
client = Client(
base_url=base_url,
verify=verify_certificate,
api_token=api_token,
proxy=proxy)
if not demisto_command or demisto_command not in commands:
raise NotImplementedError(f'Command {demisto_command} is not implemented.')
demisto.debug(f'Command being called is {demisto_command}')
func_to_execute = dict_safe_get(commands, [demisto_command, 'func'])
meta_data = dict_safe_get(commands, [demisto_command, 'meta_data'])
demisto_args = demisto.args()
results = func_to_execute(client, demisto_args, meta_data) if meta_data \
else func_to_execute(client, demisto_args)
return_results(results)
# Log exceptions
except Exception as e:
return_error(f'Failed to execute {demisto_command} command. Error: {str(e)}')
|
def main() -> None:
params = demisto.params()
base_url: str = urljoin(params.get('base_url', '').rstrip('/'), '/api/aed/v2')
verify_certificate: bool = not params.get('insecure', False)
proxy: bool = params.get('proxy', False)
api_token = params.get('api_token')
commands = init_commands_dict()
demisto_command = demisto.command()
try:
handle_proxy()
client = Client(
base_url=base_url,
verify=verify_certificate,
api_token=api_token,
proxy=proxy)
if not demisto_command or demisto_command not in commands:
raise NotImplementedError(f'Command {demisto_command} is not implemented.')
demisto.debug(f'Command being called is {demisto_command}')
func_to_execute = dict_safe_get(commands, [demisto_command, 'func'])
meta_data = dict_safe_get(commands, [demisto_command, 'meta_data'])
demisto_args = demisto.args()
results = func_to_execute(client, demisto_args, meta_data) if meta_data \
else func_to_execute(client, demisto_args)
return_results(results)
# Log exceptions
except Exception as e:
return_error(f'Failed to execute {demisto_command} command. Error: {str(e)}', error=e)
|
7,546 |
def get_pkg_data_path(*path, package=None):
"""Make path from source-included data directories.
Parameters
----------
*path : str
Name/location of the desired data file/directory.
May be a tuple of strings -- for ``os.path`` intelligent path joining.
package : str, optional, keyword only
If specified, look for a file relative to the given package, rather
than the default of looking relative to the calling module's package.
Returns
-------
path : str
Name/location of the desired data file/directory.
"""
if package is None:
module = find_current_module(1, finddiff=['astropy.utils.data', 'contextlib'])
if module is None:
# not called from inside an astropy package. So just pass name
# through
return os.path.join(*path)
if not hasattr(module, '__package__') or not module.__package__:
# The __package__ attribute may be missing or set to None; see
# PEP-366, also astropy issue #1256
if '.' in module.__name__:
package = module.__name__.rpartition('.')[0]
else:
package = module.__name__
else:
package = module.__package__
else:
module = resolve_name(package)
rootpkgname = package.partition('.')[0]
rootpkg = resolve_name(rootpkgname)
module_path = os.path.dirname(module.__file__)
path = os.path.join(module_path, *path)
root_dir = os.path.dirname(rootpkg.__file__)
if not _is_inside(path, root_dir):
raise RuntimeError(f"attempted to get a local data file outside "
f"of the {rootpkgname} tree.")
return path
|
def get_pkg_data_path(*path, package=None):
"""Make path from source-included data directories.
Parameters
----------
*path : str
Name/location of the desired data file/directory.
May be a tuple of strings -- for ``os.path`` intelligent path joining.
package : str or `None`, optional, keyword only
If specified, look for a file relative to the given package, rather
than the default of looking relative to the calling module's package.
Returns
-------
path : str
Name/location of the desired data file/directory.
"""
if package is None:
module = find_current_module(1, finddiff=['astropy.utils.data', 'contextlib'])
if module is None:
# not called from inside an astropy package. So just pass name
# through
return os.path.join(*path)
if not hasattr(module, '__package__') or not module.__package__:
# The __package__ attribute may be missing or set to None; see
# PEP-366, also astropy issue #1256
if '.' in module.__name__:
package = module.__name__.rpartition('.')[0]
else:
package = module.__name__
else:
package = module.__package__
else:
module = resolve_name(package)
rootpkgname = package.partition('.')[0]
rootpkg = resolve_name(rootpkgname)
module_path = os.path.dirname(module.__file__)
path = os.path.join(module_path, *path)
root_dir = os.path.dirname(rootpkg.__file__)
if not _is_inside(path, root_dir):
raise RuntimeError(f"attempted to get a local data file outside "
f"of the {rootpkgname} tree.")
return path
|
43,770 |
def gradient(H, x, delta=0.005291772):
r"""Compute the gradient :math:`\nabla_x \hat{H}(x)` of the electronic Hamiltonian
:math:`\hat{H}(x)` for a given set of nuclear coordinates :math:`x` using central
differences.
Args:
H (callable): function with signature ``H(x)`` that builds the electronic
Hamiltonian for a given set of coordinates ``x``
x (array[float]): 1D array with the coordinates in Angstroms. The size of the array
should be ``3*N`` where ``N`` is the number of atoms in the molecule.
delta (float): Step size in Angstroms used to displace the nuclear coordinates.
Its default value corresponds to 0.01 Bohr radius.
Returns:
Iterable[pennylane.Hamiltonian]: list with the gradient vector :math:`\nabla_x \hat{H}(x)`
**Example**
>>> def H(x):
... return qml.qchem.molecular_hamiltonian(['H', 'H'], x)[0]
>>> x = np.array([0., 0., 0.35, 0., 0., -0.35])
>>> grad = gradient(H, x)
>>> print(len(grad), grad[5])
6 (0.7763135743293005) [I0]
+ (0.08534360840293387) [Z0]
+ (0.08534360840293387) [Z1]
+ (-0.2669341092545041) [Z2]
+ (-0.26693410925450134) [Z3]
+ (0.025233628744274508) [Z0 Z1]
+ (-0.0072162443961340415) [Y0 X1 X2 Y3]
+ (0.0072162443961340415) [Y0 Y1 X2 X3]
+ (0.0072162443961340415) [X0 X1 Y2 Y3]
+ (-0.0072162443961340415) [X0 Y1 Y2 X3]
+ (0.030654287745411964) [Z0 Z2]
+ (0.023438043349280003) [Z0 Z3]
+ (0.023438043349280003) [Z1 Z2]
+ (0.030654287745411964) [Z1 Z3]
+ (0.02494407786332001) [Z2 Z3]
"""
grad = [derivative(H, x, i, delta=delta) for i in range(x.size)]
return grad
|
def gradient(H, x, delta=0.005291772):
r"""Compute the gradient :math:`\nabla_x \hat{H}(x)` of the electronic Hamiltonian
:math:`\hat{H}(x)` for a given set of nuclear coordinates :math:`x` using central
differences.
Args:
H (callable): function with signature ``H(x)`` that builds the electronic
Hamiltonian for a given set of coordinates ``x``
x (array[float]): 1D array with the coordinates in Angstroms. The size of the array
should be ``3*N`` where ``N`` is the number of atoms in the molecule.
delta (float): Step size in Angstroms used to displace the nuclear coordinates.
Its default value corresponds to 0.01 Bohr radii.
Returns:
Iterable[pennylane.Hamiltonian]: list with the gradient vector :math:`\nabla_x \hat{H}(x)`
**Example**
>>> def H(x):
... return qml.qchem.molecular_hamiltonian(['H', 'H'], x)[0]
>>> x = np.array([0., 0., 0.35, 0., 0., -0.35])
>>> grad = gradient(H, x)
>>> print(len(grad), grad[5])
6 (0.7763135743293005) [I0]
+ (0.08534360840293387) [Z0]
+ (0.08534360840293387) [Z1]
+ (-0.2669341092545041) [Z2]
+ (-0.26693410925450134) [Z3]
+ (0.025233628744274508) [Z0 Z1]
+ (-0.0072162443961340415) [Y0 X1 X2 Y3]
+ (0.0072162443961340415) [Y0 Y1 X2 X3]
+ (0.0072162443961340415) [X0 X1 Y2 Y3]
+ (-0.0072162443961340415) [X0 Y1 Y2 X3]
+ (0.030654287745411964) [Z0 Z2]
+ (0.023438043349280003) [Z0 Z3]
+ (0.023438043349280003) [Z1 Z2]
+ (0.030654287745411964) [Z1 Z3]
+ (0.02494407786332001) [Z2 Z3]
"""
grad = [derivative(H, x, i, delta=delta) for i in range(x.size)]
return grad
|
41,542 |
def threshold_analysis(model_path, ds_lst, model_params, testing_params, metric="dice", increment=0.1,
fname_out="thr.png", cuda_available=True):
"""Run a threshold analysis to find the optimal threshold on a sub-dataset.
Args:
model_path (str): Model path.
ds_lst (list): List of loaders.
model_params (dict): Model's parameters.
testing_params (dict): Testing parameters
metric (str): Choice between "dice" and "recall_specificity". If "recall_specificity", then a ROC analysis
is performed.
increment (float): Increment between tested thresholds.
fname_out (str): Plot output filename.
cuda_available (bool): If True, CUDA is available.
Returns:
float: optimal threshold.
"""
if metric not in ["dice", "recall_specificity"]:
print('\nChoice of metric for threshold analysis: dice, recall_specificity.')
exit()
# Load model
model = torch.load(model_path)
# Eval mode
model.eval()
# List of thresholds
thr_list = list(np.arange(0.0, 1.0, increment))[1:]
# Init metric manager for each thr
metric_fns = [imed_metrics.recall_score,
imed_metrics.dice_score,
imed_metrics.specificity_score]
metric_dict = {thr: imed_metrics.MetricManager(metric_fns) for thr in thr_list}
# Load
loader = DataLoader(ConcatDataset(ds_lst), batch_size=8,
shuffle=False, pin_memory=True, sampler=None,
collate_fn=imed_loader_utils.imed_collate,
num_workers=0)
# Run inference
preds_npy, gt_npy = run_inference(loader, model, model_params,
testing_params,
ofolder=None,
cuda_available=cuda_available)
# Make sure the GT is binarized
gt_npy = [threshold_predictions(gt, thr=0.5) for gt in gt_npy]
# Move threshold
for thr in thr_list:
preds_thr = [threshold_predictions(copy.deepcopy(pred), thr=thr) for pred in preds_npy]
metric_dict[thr](preds_thr, gt_npy)
# Get results
tpr_list, fpr_list, dice_list = [], [], []
for thr in thr_list:
result_thr = metric_dict[thr].get_results()
tpr_list.append(result_thr["recall_score"])
fpr_list.append(1 - result_thr["specificity_score"])
dice_list.append(result_thr["dice_score"])
# Get optimal threshold
if metric == "dice":
diff_list = dice_list
else:
diff_list = [tpr - fpr for tpr, fpr in zip(tpr_list, fpr_list)]
optimal_idx = np.max(np.where(diff_list == np.max(diff_list)))
optimal_threshold = thr_list[optimal_idx]
print('\tOptimal threshold: {}'.format(optimal_threshold))
# Save plot
print('\tSaving plot: {}'.format(fname_out))
if metric == "dice":
# Run plot
imed_metrics.plot_dice_thr(thr_list, dice_list, optimal_idx, fname_out)
else:
# Add 0 and 1 as extrema
tpr_list = [0.0] + tpr_list + [1.0]
fpr_list = [0.0] + fpr_list + [1.0]
optimal_idx += 1
# Run plot
imed_metrics.plot_roc_curve(tpr_list, fpr_list, optimal_idx, fname_out)
return optimal_threshold
|
def threshold_analysis(model_path, ds_lst, model_params, testing_params, metric="dice", increment=0.1,
fname_out="thr.png", cuda_available=True):
"""Run a threshold analysis to find the optimal threshold on a sub-dataset.
Args:
model_path (str): Model path.
ds_lst (list): List of loaders.
model_params (dict): Model's parameters.
testing_params (dict): Testing parameters
metric (str): Choice between "dice" and "recall_specificity". If "recall_specificity", then a ROC analysis
is performed.
increment (float): Increment between tested thresholds.
fname_out (str): Plot output filename.
cuda_available (bool): If True, CUDA is available.
Returns:
float: optimal threshold.
"""
if metric not in ["dice", "recall_specificity"]:
print('\nChoice of metric for threshold analysis: dice, recall_specificity.')
exit()
# Load model
model = torch.load(model_path)
# Eval mode
model.eval()
# List of thresholds
thr_list = list(np.arange(0.0, 1.0, increment))[1:]
# Init metric manager for each thr
metric_fns = [imed_metrics.recall_score,
imed_metrics.dice_score,
imed_metrics.specificity_score]
metric_dict = {thr: imed_metrics.MetricManager(metric_fns) for thr in thr_list}
# Load
loader = DataLoader(ConcatDataset(ds_lst), batch_size=8,
shuffle=False, pin_memory=True, sampler=None,
collate_fn=imed_loader_utils.imed_collate,
num_workers=0)
# Run inference
preds_npy, gt_npy = run_inference(loader, model, model_params,
testing_params,
ofolder=None,
cuda_available=cuda_available)
# Make sure the GT is binarized
gt_npy = threshold_predictions(gt_npy, thr=0.5)
# Move threshold
for thr in thr_list:
preds_thr = [threshold_predictions(copy.deepcopy(pred), thr=thr) for pred in preds_npy]
metric_dict[thr](preds_thr, gt_npy)
# Get results
tpr_list, fpr_list, dice_list = [], [], []
for thr in thr_list:
result_thr = metric_dict[thr].get_results()
tpr_list.append(result_thr["recall_score"])
fpr_list.append(1 - result_thr["specificity_score"])
dice_list.append(result_thr["dice_score"])
# Get optimal threshold
if metric == "dice":
diff_list = dice_list
else:
diff_list = [tpr - fpr for tpr, fpr in zip(tpr_list, fpr_list)]
optimal_idx = np.max(np.where(diff_list == np.max(diff_list)))
optimal_threshold = thr_list[optimal_idx]
print('\tOptimal threshold: {}'.format(optimal_threshold))
# Save plot
print('\tSaving plot: {}'.format(fname_out))
if metric == "dice":
# Run plot
imed_metrics.plot_dice_thr(thr_list, dice_list, optimal_idx, fname_out)
else:
# Add 0 and 1 as extrema
tpr_list = [0.0] + tpr_list + [1.0]
fpr_list = [0.0] + fpr_list + [1.0]
optimal_idx += 1
# Run plot
imed_metrics.plot_roc_curve(tpr_list, fpr_list, optimal_idx, fname_out)
return optimal_threshold
|
1,698 |
def isotonic_regression(y, sample_weight=None, y_min=None, y_max=None,
increasing=True):
"""Solve the isotonic regression model::
min sum w[i] (y[i] - y_[i]) ** 2
subject to y_min = y_[1] <= y_[2] ... <= y_[n] = y_max
where:
- y[i] are inputs (real numbers)
- y_[i] are fitted
- w[i] are optional strictly positive weights (default to 1.0)
Read more in the :ref:`User Guide <isotonic>`.
Parameters
----------
y : array-like of shape (n_samples,)
The data.
sample_weight : array-like of shape (n_samples,), default=None
Weights on each point of the regression.
If None, weight is set to 1 (equal weights).
y_min : optional, default: None
If not None, set the lowest value of the fit to y_min.
y_max : optional, default: None
If not None, set the highest value of the fit to y_max.
increasing : boolean, optional, default: True
Whether to compute ``y_`` is increasing (if set to True) or decreasing
(if set to False)
Returns
-------
y_ : list of floats
Isotonic fit of y.
References
----------
"Active set algorithms for isotonic regression; A unifying framework"
by Michael J. Best and Nilotpal Chakravarti, section 3.
"""
order = np.s_[:] if increasing else np.s_[::-1]
y = check_array(y, ensure_2d=False, dtype=[np.float64, np.float32])
y = np.array(y[order], dtype=y.dtype)
sample_weight = _check_sample_weight(sample_weight, y, dtype=y.dtype)
sample_weight = np.ascontiguousarray(sample_weight[order])
_inplace_contiguous_isotonic_regression(y, sample_weight)
if y_min is not None or y_max is not None:
# Older versions of np.clip don't accept None as a bound, so use np.inf
if y_min is None:
y_min = -np.inf
if y_max is None:
y_max = np.inf
np.clip(y, y_min, y_max, y)
return y[order]
|
def isotonic_regression(y, sample_weight=None, y_min=None, y_max=None,
increasing=True):
"""Solve the isotonic regression model::
min sum w[i] (y[i] - y_[i]) ** 2
subject to y_min = y_[1] <= y_[2] ... <= y_[n] = y_max
where:
- y[i] are inputs (real numbers)
- y_[i] are fitted
- w[i] are optional strictly positive weights (default to 1.0)
Read more in the :ref:`User Guide <isotonic>`.
Parameters
----------
y : array-like of shape (n_samples,)
The data.
sample_weight : array-like of shape (n_samples,), default=None
Weights on each point of the regression.
If None, weight is set to 1 (equal weights).
y_min : optional, default: None
If not None, set the lowest value of the fit to y_min.
y_max : optional, default: None
If not None, set the highest value of the fit to y_max.
increasing : boolean, optional, default: True
Whether to compute ``y_`` is increasing (if set to True) or decreasing
(if set to False)
Returns
-------
y_ : list of floats
Isotonic fit of y.
References
----------
"Active set algorithms for isotonic regression; A unifying framework"
by Michael J. Best and Nilotpal Chakravarti, section 3.
"""
order = np.s_[:] if increasing else np.s_[::-1]
y = check_array(y, ensure_2d=False, dtype=[np.float64, np.float32])
y = np.array(y[order], dtype=y.dtype)
sample_weight = _check_sample_weight(sample_weight, y, dtype=y.dtype)
sample_weight = sample_weight[order]
_inplace_contiguous_isotonic_regression(y, sample_weight)
if y_min is not None or y_max is not None:
# Older versions of np.clip don't accept None as a bound, so use np.inf
if y_min is None:
y_min = -np.inf
if y_max is None:
y_max = np.inf
np.clip(y, y_min, y_max, y)
return y[order]
|
8,903 |
def test_list_serialize_value_error():
option = types.ListAttribute('foo')
with pytest.raises(ValueError):
option.serialize('value 1')
with pytest.raises(ValueError):
option.serialize(('1', '2', '3')) # tuple are not allowed
|
def test_list_serialize_value_error():
option = types.ListAttribute('foo')
with pytest.raises(ValueError):
option.serialize('value 1')
with pytest.raises(ValueError):
option.serialize(('1', '2', '3')) # tuples are not allowed
|
43,163 |
def reorder_graph(g, node_permute_algo=None, edge_permute_algo='src',
store_ids=True, permute_config=None):
r"""Return a new graph with nodes and edges re-ordered/re-labeled
according to the specified permute algorithm.
Support homogeneous graph only for the moment.
The re-ordering has two 2 steps: first re-order nodes and then re-order edges.
For node permutation, users can re-order by the :attr:`node_permute_algo`
argument. For edge permutation, user can re-arrange edges according to their
source nodes or destination nodes by the :attr:`edge_permute_algo` argument.
Some of the permutation algorithms are only implemented in CPU, so if the
input graph is on GPU, it will be copied to CPU first. The storage order of
the node and edge features in the graph are permuted accordingly.
Parameters
----------
g : DGLGraph
The homogeneous graph.
node_permute_algo: None or str, optional
The permutation algorithm to re-order nodes. If given, the options are ``rcmk`` or
``metis`` or ``custom``.
* ``None``: No nodes are re-ordered.
* ``rcmk``: Use the `Reverse Cuthill–McKee <https://docs.scipy.org/doc/scipy/reference/
generated/scipy.sparse.csgraph.reverse_cuthill_mckee.html#
scipy-sparse-csgraph-reverse-cuthill-mckee>`__ from ``scipy`` to generate nodes
permutation.
* ``metis``: Use the :func:`~dgl.metis_partition_assignment` function
to partition the input graph, which gives a cluster assignment of each node.
DGL then sorts the assignment array so the new node order will put nodes of
the same cluster together. Please note that the generated nodes permutation
of ``metis`` is non-deterministic due to algorithm's nature.
* ``custom``: Reorder the graph according to the user-provided node permutation
array (provided in :attr:`permute_config`).
edge_permute_algo: str, optional
The permutation algorithm to reorder edges. Options are ``src`` or ``dst`` or
``custom``. ``src`` is the default value.
* ``src``: Edges are arranged according to their source nodes.
* ``dst``: Edges are arranged according to their destination nodes.
* ``custom``: Edges are arranged according to the user-provided edge permutation
array (provided in :attr:`permute_config`).
store_ids: bool, optional
If True, DGL will store the original node and edge IDs in the ndata and edata
of the resulting graph under name ``dgl.NID`` and ``dgl.EID``, respectively.
permute_config: dict, optional
Additional key-value config data for the specified permutation algorithm.
* For ``rcmk``, this argument is not required.
* For ``metis``, users should specify the number of partitions ``k`` (e.g.,
``permute_config={'k':10}`` to partition the graph to 10 clusters).
* For ``custom`` node reordering, users should provide a node permutation
array ``nodes_perm``. The array must be an integer list or a tensor with
the same device of the input graph.
* For ``custom`` edge reordering, users should provide an edge permutation
array ``edges_perm``. The array must be an integer list or a tensor with
the same device of the input graph.
Returns
-------
DGLGraph
The re-ordered graph.
Examples
--------
>>> import dgl
>>> import torch
>>> g = dgl.graph((torch.tensor([0, 1, 2, 3, 4]), torch.tensor([2, 2, 3, 2, 3])))
>>> g.ndata['h'] = torch.arange(g.num_nodes() * 2).view(g.num_nodes(), 2)
>>> g.edata['w'] = torch.arange(g.num_edges() * 1).view(g.num_edges(), 1)
>>> g.ndata
{'h': tensor([[0, 1],
[2, 3],
[4, 5],
[6, 7],
[8, 9]])}
>>> g.edata
{'w': tensor([[0],
[1],
[2],
[3],
[4]])}
Reorder according to ``'rcmk'`` permute algorithm.
>>> rg = dgl.reorder_graph(g, node_permute_algo='rcmk')
>>> rg.ndata
{'h': tensor([[8, 9],
[6, 7],
[2, 3],
[4, 5],
[0, 1]]), '_ID': tensor([4, 3, 1, 2, 0])}
>>> rg.edata
{'w': tensor([[4],
[3],
[1],
[2],
[0]]), '_ID': tensor([4, 3, 1, 2, 0])}
Reorder according to ``'metis'`` permute algorithm.
>>> rg = dgl.reorder_graph(g, node_permute_algo='metis', permute_config={'k':2})
>>> rg.ndata
{'h': tensor([[4, 5],
[2, 3],
[0, 1],
[8, 9],
[6, 7]]), '_ID': tensor([2, 1, 0, 4, 3])}
>>> rg.edata
{'w': tensor([[2],
[1],
[0],
[4],
[3]]), '_ID': tensor([2, 1, 0, 4, 3])}
Reorder according to ``'custom'`` permute algorithm with user-provided nodes_perm.
>>> rg = dgl.reorder_graph(g, node_permute_algo='custom',
... permute_config={'nodes_perm': [3, 2, 0, 4, 1]})
>>> rg.ndata
{'h': tensor([[6, 7],
[4, 5],
[0, 1],
[8, 9],
[2, 3]]), '_ID': tensor([3, 2, 0, 4, 1])}
>>> rg.edata
{'w': tensor([[3],
[2],
[0],
[4],
[1]]), '_ID': tensor([3, 2, 0, 4, 1])}
Reorder nodes according to ``'rcmk'`` and reorder edges according to ``dst``
edge permute algorithm.
>>> rg = dgl.reorder_graph(g, node_permute_algo='rcmk', edge_permute_algo='dst')
>>> print(rg.ndata)
{'h': tensor([[8, 9],
[6, 7],
[2, 3],
[4, 5],
[0, 1]]), '_ID': tensor([4, 3, 1, 2, 0])}
>>> print(rg.edata)
{'w': tensor([[4],
[2],
[3],
[1],
[0]]), '_ID': tensor([4, 2, 3, 1, 0])}
Nodes are not reordered but edges are reordered according to ``'custom'`` permute
algorithm with user-provided edges_perm.
>>> rg = dgl.reorder_graph(g, edge_permute_algo='custom',
... permute_config={'edges_perm': [1, 2, 3, 4, 0]})
>>> print(rg.ndata)
{'h': tensor([[0, 1],
[2, 3],
[4, 5],
[6, 7],
[8, 9]]), '_ID': tensor([0, 1, 2, 3, 4])}
>>> print(rg.edata)
{'w': tensor([[1],
[2],
[3],
[4],
[0]]), '_ID': tensor([1, 2, 3, 4, 0])}
"""
# sanity checks
if not g.is_homogeneous:
raise DGLError("Only homogeneous graphs are supported.")
expected_node_algo = ['rcmk', 'metis', 'custom']
if node_permute_algo is not None and node_permute_algo not in expected_node_algo:
raise DGLError("Unexpected node_permute_algo is specified: {}. Expected algos: {}".format(
node_permute_algo, expected_node_algo))
expected_edge_algo = ['src', 'dst', 'custom']
if edge_permute_algo not in expected_edge_algo:
raise DGLError("Unexpected edge_permute_algo is specified: {}. Expected algos: {}".format(
edge_permute_algo, expected_edge_algo))
g.edata['__orig__'] = F.arange(0, g.num_edges(), g.idtype, g.device)
# reorder nodes
if node_permute_algo == 'rcmk':
nodes_perm = rcmk_perm(g)
rg = subgraph.node_subgraph(g, nodes_perm, store_ids=False)
elif node_permute_algo == 'metis':
if permute_config is None or 'k' not in permute_config:
raise DGLError(
"Partition parts 'k' is required for metis. Please specify in permute_config.")
nodes_perm = metis_perm(g, permute_config['k'])
rg = subgraph.node_subgraph(g, nodes_perm, store_ids=False)
elif node_permute_algo == 'custom':
if permute_config is None or 'nodes_perm' not in permute_config:
raise DGLError(
"node_permute_algo is specified as custom, but no 'nodes_perm' is specified in \
permute_config.")
nodes_perm = permute_config['nodes_perm']
if len(nodes_perm) != g.num_nodes():
raise DGLError("Length of 'nodes_perm' ({}) does not \
match graph num_nodes ({}).".format(len(nodes_perm), g.num_nodes()))
rg = subgraph.node_subgraph(g, nodes_perm, store_ids=False)
else:
nodes_perm = F.arange(0, g.num_nodes(), g.idtype, g.device)
rg = g.clone()
if store_ids:
rg.ndata[NID] = F.copy_to(F.tensor(nodes_perm, g.idtype), g.device)
g.edata.pop('__orig__')
# reorder edges
if edge_permute_algo == 'src':
edges_perm = np.argsort(F.asnumpy(rg.edges()[0]))
rg = subgraph.edge_subgraph(
rg, edges_perm, relabel_nodes=False, store_ids=False)
elif edge_permute_algo == 'dst':
edges_perm = np.argsort(F.asnumpy(rg.edges()[1]))
rg = subgraph.edge_subgraph(
rg, edges_perm, relabel_nodes=False, store_ids=False)
elif edge_permute_algo == 'custom':
if permute_config is None or 'edges_perm' not in permute_config:
raise DGLError(
"edge_permute_algo is specified as custom, but no 'edges_perm' is specified in \
permute_config.")
edges_perm = permute_config['edges_perm']
# First revert the edge reorder caused by node reorder and then
# apply user-provided edge permutation
rev_id = F.argsort(rg.edata['__orig__'], 0, False)
edges_perm = F.astype(F.gather_row(rev_id, F.tensor(edges_perm)), rg.idtype)
rg = subgraph.edge_subgraph(
rg, edges_perm, relabel_nodes=False, store_ids=False)
if store_ids:
rg.edata[EID] = rg.edata.pop('__orig__')
return rg
|
def reorder_graph(g, node_permute_algo=None, edge_permute_algo='src',
store_ids=True, permute_config=None):
r"""Return a new graph with nodes and edges re-ordered/re-labeled
according to the specified permute algorithm.
Support homogeneous graph only for the moment.
The re-ordering has two 2 steps: first re-order nodes and then re-order edges.
For node permutation, users can re-order by the :attr:`node_permute_algo`
argument. For edge permutation, user can re-arrange edges according to their
source nodes or destination nodes by the :attr:`edge_permute_algo` argument.
Some of the permutation algorithms are only implemented in CPU, so if the
input graph is on GPU, it will be copied to CPU first. The storage order of
the node and edge features in the graph are permuted accordingly.
Parameters
----------
g : DGLGraph
The homogeneous graph.
node_permute_algo: None or str, optional
The permutation algorithm to re-order nodes. If given, the options are ``rcmk`` or
``metis`` or ``custom``.
* ``None``: Keep the current node order.
* ``rcmk``: Use the `Reverse Cuthill–McKee <https://docs.scipy.org/doc/scipy/reference/
generated/scipy.sparse.csgraph.reverse_cuthill_mckee.html#
scipy-sparse-csgraph-reverse-cuthill-mckee>`__ from ``scipy`` to generate nodes
permutation.
* ``metis``: Use the :func:`~dgl.metis_partition_assignment` function
to partition the input graph, which gives a cluster assignment of each node.
DGL then sorts the assignment array so the new node order will put nodes of
the same cluster together. Please note that the generated nodes permutation
of ``metis`` is non-deterministic due to algorithm's nature.
* ``custom``: Reorder the graph according to the user-provided node permutation
array (provided in :attr:`permute_config`).
edge_permute_algo: str, optional
The permutation algorithm to reorder edges. Options are ``src`` or ``dst`` or
``custom``. ``src`` is the default value.
* ``src``: Edges are arranged according to their source nodes.
* ``dst``: Edges are arranged according to their destination nodes.
* ``custom``: Edges are arranged according to the user-provided edge permutation
array (provided in :attr:`permute_config`).
store_ids: bool, optional
If True, DGL will store the original node and edge IDs in the ndata and edata
of the resulting graph under name ``dgl.NID`` and ``dgl.EID``, respectively.
permute_config: dict, optional
Additional key-value config data for the specified permutation algorithm.
* For ``rcmk``, this argument is not required.
* For ``metis``, users should specify the number of partitions ``k`` (e.g.,
``permute_config={'k':10}`` to partition the graph to 10 clusters).
* For ``custom`` node reordering, users should provide a node permutation
array ``nodes_perm``. The array must be an integer list or a tensor with
the same device of the input graph.
* For ``custom`` edge reordering, users should provide an edge permutation
array ``edges_perm``. The array must be an integer list or a tensor with
the same device of the input graph.
Returns
-------
DGLGraph
The re-ordered graph.
Examples
--------
>>> import dgl
>>> import torch
>>> g = dgl.graph((torch.tensor([0, 1, 2, 3, 4]), torch.tensor([2, 2, 3, 2, 3])))
>>> g.ndata['h'] = torch.arange(g.num_nodes() * 2).view(g.num_nodes(), 2)
>>> g.edata['w'] = torch.arange(g.num_edges() * 1).view(g.num_edges(), 1)
>>> g.ndata
{'h': tensor([[0, 1],
[2, 3],
[4, 5],
[6, 7],
[8, 9]])}
>>> g.edata
{'w': tensor([[0],
[1],
[2],
[3],
[4]])}
Reorder according to ``'rcmk'`` permute algorithm.
>>> rg = dgl.reorder_graph(g, node_permute_algo='rcmk')
>>> rg.ndata
{'h': tensor([[8, 9],
[6, 7],
[2, 3],
[4, 5],
[0, 1]]), '_ID': tensor([4, 3, 1, 2, 0])}
>>> rg.edata
{'w': tensor([[4],
[3],
[1],
[2],
[0]]), '_ID': tensor([4, 3, 1, 2, 0])}
Reorder according to ``'metis'`` permute algorithm.
>>> rg = dgl.reorder_graph(g, node_permute_algo='metis', permute_config={'k':2})
>>> rg.ndata
{'h': tensor([[4, 5],
[2, 3],
[0, 1],
[8, 9],
[6, 7]]), '_ID': tensor([2, 1, 0, 4, 3])}
>>> rg.edata
{'w': tensor([[2],
[1],
[0],
[4],
[3]]), '_ID': tensor([2, 1, 0, 4, 3])}
Reorder according to ``'custom'`` permute algorithm with user-provided nodes_perm.
>>> rg = dgl.reorder_graph(g, node_permute_algo='custom',
... permute_config={'nodes_perm': [3, 2, 0, 4, 1]})
>>> rg.ndata
{'h': tensor([[6, 7],
[4, 5],
[0, 1],
[8, 9],
[2, 3]]), '_ID': tensor([3, 2, 0, 4, 1])}
>>> rg.edata
{'w': tensor([[3],
[2],
[0],
[4],
[1]]), '_ID': tensor([3, 2, 0, 4, 1])}
Reorder nodes according to ``'rcmk'`` and reorder edges according to ``dst``
edge permute algorithm.
>>> rg = dgl.reorder_graph(g, node_permute_algo='rcmk', edge_permute_algo='dst')
>>> print(rg.ndata)
{'h': tensor([[8, 9],
[6, 7],
[2, 3],
[4, 5],
[0, 1]]), '_ID': tensor([4, 3, 1, 2, 0])}
>>> print(rg.edata)
{'w': tensor([[4],
[2],
[3],
[1],
[0]]), '_ID': tensor([4, 2, 3, 1, 0])}
Nodes are not reordered but edges are reordered according to ``'custom'`` permute
algorithm with user-provided edges_perm.
>>> rg = dgl.reorder_graph(g, edge_permute_algo='custom',
... permute_config={'edges_perm': [1, 2, 3, 4, 0]})
>>> print(rg.ndata)
{'h': tensor([[0, 1],
[2, 3],
[4, 5],
[6, 7],
[8, 9]]), '_ID': tensor([0, 1, 2, 3, 4])}
>>> print(rg.edata)
{'w': tensor([[1],
[2],
[3],
[4],
[0]]), '_ID': tensor([1, 2, 3, 4, 0])}
"""
# sanity checks
if not g.is_homogeneous:
raise DGLError("Only homogeneous graphs are supported.")
expected_node_algo = ['rcmk', 'metis', 'custom']
if node_permute_algo is not None and node_permute_algo not in expected_node_algo:
raise DGLError("Unexpected node_permute_algo is specified: {}. Expected algos: {}".format(
node_permute_algo, expected_node_algo))
expected_edge_algo = ['src', 'dst', 'custom']
if edge_permute_algo not in expected_edge_algo:
raise DGLError("Unexpected edge_permute_algo is specified: {}. Expected algos: {}".format(
edge_permute_algo, expected_edge_algo))
g.edata['__orig__'] = F.arange(0, g.num_edges(), g.idtype, g.device)
# reorder nodes
if node_permute_algo == 'rcmk':
nodes_perm = rcmk_perm(g)
rg = subgraph.node_subgraph(g, nodes_perm, store_ids=False)
elif node_permute_algo == 'metis':
if permute_config is None or 'k' not in permute_config:
raise DGLError(
"Partition parts 'k' is required for metis. Please specify in permute_config.")
nodes_perm = metis_perm(g, permute_config['k'])
rg = subgraph.node_subgraph(g, nodes_perm, store_ids=False)
elif node_permute_algo == 'custom':
if permute_config is None or 'nodes_perm' not in permute_config:
raise DGLError(
"node_permute_algo is specified as custom, but no 'nodes_perm' is specified in \
permute_config.")
nodes_perm = permute_config['nodes_perm']
if len(nodes_perm) != g.num_nodes():
raise DGLError("Length of 'nodes_perm' ({}) does not \
match graph num_nodes ({}).".format(len(nodes_perm), g.num_nodes()))
rg = subgraph.node_subgraph(g, nodes_perm, store_ids=False)
else:
nodes_perm = F.arange(0, g.num_nodes(), g.idtype, g.device)
rg = g.clone()
if store_ids:
rg.ndata[NID] = F.copy_to(F.tensor(nodes_perm, g.idtype), g.device)
g.edata.pop('__orig__')
# reorder edges
if edge_permute_algo == 'src':
edges_perm = np.argsort(F.asnumpy(rg.edges()[0]))
rg = subgraph.edge_subgraph(
rg, edges_perm, relabel_nodes=False, store_ids=False)
elif edge_permute_algo == 'dst':
edges_perm = np.argsort(F.asnumpy(rg.edges()[1]))
rg = subgraph.edge_subgraph(
rg, edges_perm, relabel_nodes=False, store_ids=False)
elif edge_permute_algo == 'custom':
if permute_config is None or 'edges_perm' not in permute_config:
raise DGLError(
"edge_permute_algo is specified as custom, but no 'edges_perm' is specified in \
permute_config.")
edges_perm = permute_config['edges_perm']
# First revert the edge reorder caused by node reorder and then
# apply user-provided edge permutation
rev_id = F.argsort(rg.edata['__orig__'], 0, False)
edges_perm = F.astype(F.gather_row(rev_id, F.tensor(edges_perm)), rg.idtype)
rg = subgraph.edge_subgraph(
rg, edges_perm, relabel_nodes=False, store_ids=False)
if store_ids:
rg.edata[EID] = rg.edata.pop('__orig__')
return rg
|
41,346 |
def rename_doubles_from_list(metric_names):
"""
This function takes a list in the format `['a', ['b', 'a'], 'c', 'a', 'c']`
and returns a list where each double is added a number so that there are no
more doubles in the list: `['a1', ['b', 'a2'], 'c1', 'a3', 'c2']`. It does so
using the provided counts and using the numbering Counter object.
"""
counts = Counter(flatten_metric_names(metric_names))
numbering = Counter()
def get_name(name):
if counts[name] > 1:
numbering[name] += 1
return name + str(numbering[name])
return name
return [
[get_name(name) for name in names] if not isinstance(names, str) else get_name(names) for names in metric_names
]
|
def rename_doubles_from_list(metric_names: List) -> List:
"""
This function takes a list in the format `['a', ['b', 'a'], 'c', 'a', 'c']`
and returns a list where each double is added a number so that there are no
more doubles in the list: `['a1', ['b', 'a2'], 'c1', 'a3', 'c2']`. It does so
using the provided counts and using the numbering Counter object.
"""
counts = Counter(flatten_metric_names(metric_names))
numbering = Counter()
def get_name(name):
if counts[name] > 1:
numbering[name] += 1
return name + str(numbering[name])
return name
return [
[get_name(name) for name in names] if not isinstance(names, str) else get_name(names) for names in metric_names
]
|
5,670 |
def pdist(X, metric='euclidean', *args, **kwargs):
"""
Pairwise distances between observations in n-dimensional space.
See Notes for common calling conventions.
Parameters
----------
X : ndarray
An m by n array of m original observations in an
n-dimensional space.
metric : str or function, optional
The distance metric to use. The distance function can
be 'braycurtis', 'canberra', 'chebyshev', 'cityblock',
'correlation', 'cosine', 'dice', 'euclidean', 'hamming',
'jaccard', 'jensenshannon', 'kulsinski', 'mahalanobis', 'matching',
'minkowski', 'rogerstanimoto', 'russellrao', 'seuclidean',
'sokalmichener', 'sokalsneath', 'sqeuclidean', 'yule'.
*args : tuple. Deprecated.
Additional arguments should be passed as keyword arguments
**kwargs : dict, optional
Extra arguments to `metric`: refer to each metric documentation for a
list of all possible arguments.
Some possible arguments:
p : scalar
The p-norm to apply for Minkowski, weighted and unweighted.
Default: 2.
w : ndarray
The weight vector for metrics that support weights (e.g., Minkowski).
V : ndarray
The variance vector for standardized Euclidean.
Default: var(X, axis=0, ddof=1)
VI : ndarray
The inverse of the covariance matrix for Mahalanobis.
Default: inv(cov(X.T)).T
out : ndarray.
The output array
If not None, condensed distance matrix Y is stored in this array.
Note: metric independent, it will become a regular keyword arg in a
future scipy version
Returns
-------
Y : ndarray
Returns a condensed distance matrix Y. For
each :math:`i` and :math:`j` (where :math:`i<j<m`),where m is the number
of original observations. The metric ``dist(u=X[i], v=X[j])``
is computed and stored in entry
``m * i + j - (i + 2) * (i + 1) // 2``.
See Also
--------
squareform : converts between condensed distance matrices and
square distance matrices.
Notes
-----
See ``squareform`` for information on how to calculate the index of
this entry or to convert the condensed distance matrix to a
redundant square matrix.
The following are common calling conventions.
1. ``Y = pdist(X, 'euclidean')``
Computes the distance between m points using Euclidean distance
(2-norm) as the distance metric between the points. The points
are arranged as m n-dimensional row vectors in the matrix X.
2. ``Y = pdist(X, 'minkowski', p=2.)``
Computes the distances using the Minkowski distance
:math:`||u-v||_p` (p-norm) where :math:`p \\geq 1`.
3. ``Y = pdist(X, 'cityblock')``
Computes the city block or Manhattan distance between the
points.
4. ``Y = pdist(X, 'seuclidean', V=None)``
Computes the standardized Euclidean distance. The standardized
Euclidean distance between two n-vectors ``u`` and ``v`` is
.. math::
\\sqrt{\\sum {(u_i-v_i)^2 / V[x_i]}}
V is the variance vector; V[i] is the variance computed over all
the i'th components of the points. If not passed, it is
automatically computed.
5. ``Y = pdist(X, 'sqeuclidean')``
Computes the squared Euclidean distance :math:`||u-v||_2^2` between
the vectors.
6. ``Y = pdist(X, 'cosine')``
Computes the cosine distance between vectors u and v,
.. math::
1 - \\frac{u \\cdot v}
{{||u||}_2 {||v||}_2}
where :math:`||*||_2` is the 2-norm of its argument ``*``, and
:math:`u \\cdot v` is the dot product of ``u`` and ``v``.
7. ``Y = pdist(X, 'correlation')``
Computes the correlation distance between vectors u and v. This is
.. math::
1 - \\frac{(u - \\bar{u}) \\cdot (v - \\bar{v})}
{{||(u - \\bar{u})||}_2 {||(v - \\bar{v})||}_2}
where :math:`\\bar{v}` is the mean of the elements of vector v,
and :math:`x \\cdot y` is the dot product of :math:`x` and :math:`y`.
8. ``Y = pdist(X, 'hamming')``
Computes the normalized Hamming distance, or the proportion of
those vector elements between two n-vectors ``u`` and ``v``
which disagree. To save memory, the matrix ``X`` can be of type
boolean.
9. ``Y = pdist(X, 'jaccard')``
Computes the Jaccard distance between the points. Given two
vectors, ``u`` and ``v``, the Jaccard distance is the
proportion of those elements ``u[i]`` and ``v[i]`` that
disagree.
10. ``Y = pdist(X, 'chebyshev')``
Computes the Chebyshev distance between the points. The
Chebyshev distance between two n-vectors ``u`` and ``v`` is the
maximum norm-1 distance between their respective elements. More
precisely, the distance is given by
.. math::
d(u,v) = \\max_i {|u_i-v_i|}
11. ``Y = pdist(X, 'canberra')``
Computes the Canberra distance between the points. The
Canberra distance between two points ``u`` and ``v`` is
.. math::
d(u,v) = \\sum_i \\frac{|u_i-v_i|}
{|u_i|+|v_i|}
12. ``Y = pdist(X, 'braycurtis')``
Computes the Bray-Curtis distance between the points. The
Bray-Curtis distance between two points ``u`` and ``v`` is
.. math::
d(u,v) = \\frac{\\sum_i {|u_i-v_i|}}
{\\sum_i {|u_i+v_i|}}
13. ``Y = pdist(X, 'mahalanobis', VI=None)``
Computes the Mahalanobis distance between the points. The
Mahalanobis distance between two points ``u`` and ``v`` is
:math:`\\sqrt{(u-v)(1/V)(u-v)^T}` where :math:`(1/V)` (the ``VI``
variable) is the inverse covariance. If ``VI`` is not None,
``VI`` will be used as the inverse covariance matrix.
14. ``Y = pdist(X, 'yule')``
Computes the Yule distance between each pair of boolean
vectors. (see yule function documentation)
15. ``Y = pdist(X, 'matching')``
Synonym for 'hamming'.
16. ``Y = pdist(X, 'dice')``
Computes the Dice distance between each pair of boolean
vectors. (see dice function documentation)
17. ``Y = pdist(X, 'kulsinski')``
Computes the Kulsinski distance between each pair of
boolean vectors. (see kulsinski function documentation)
18. ``Y = pdist(X, 'rogerstanimoto')``
Computes the Rogers-Tanimoto distance between each pair of
boolean vectors. (see rogerstanimoto function documentation)
19. ``Y = pdist(X, 'russellrao')``
Computes the Russell-Rao distance between each pair of
boolean vectors. (see russellrao function documentation)
20. ``Y = pdist(X, 'sokalmichener')``
Computes the Sokal-Michener distance between each pair of
boolean vectors. (see sokalmichener function documentation)
21. ``Y = pdist(X, 'sokalsneath')``
Computes the Sokal-Sneath distance between each pair of
boolean vectors. (see sokalsneath function documentation)
22. ``Y = pdist(X, 'wminkowski', p=2, w=w)``
Computes the weighted Minkowski distance between each pair of
vectors. (see wminkowski function documentation)
23. ``Y = pdist(X, f)``
Computes the distance between all pairs of vectors in X
using the user supplied 2-arity function f. For example,
Euclidean distance between the vectors could be computed
as follows::
dm = pdist(X, lambda u, v: np.sqrt(((u-v)**2).sum()))
Note that you should avoid passing a reference to one of
the distance functions defined in this library. For example,::
dm = pdist(X, sokalsneath)
would calculate the pair-wise distances between the vectors in
X using the Python function sokalsneath. This would result in
sokalsneath being called :math:`{n \\choose 2}` times, which
is inefficient. Instead, the optimized C version is more
efficient, and we call it using the following syntax.::
dm = pdist(X, 'sokalsneath')
"""
# You can also call this as:
# Y = pdist(X, 'test_abc')
# where 'abc' is the metric being tested. This computes the distance
# between all pairs of vectors in X using the distance metric 'abc' but
# with a more succinct, verifiable, but less efficient implementation.
X = _asarray_validated(X, sparse_ok=False, objects_ok=True, mask_ok=True,
check_finite=False)
kwargs = _args_to_kwargs_xdist(args, kwargs, metric, "pdist")
X = np.asarray(X, order='c')
s = X.shape
if len(s) != 2:
raise ValueError('A 2-dimensional array must be passed.')
m, n = s
out = kwargs.pop("out", None)
if out is None:
dm = np.empty((m * (m - 1)) // 2, dtype=np.double)
else:
if out.shape != (m * (m - 1) // 2,):
raise ValueError("output array has incorrect shape.")
if not out.flags.c_contiguous:
raise ValueError("Output array must be C-contiguous.")
if out.dtype != np.double:
raise ValueError("Output array must be double type.")
dm = out
# compute blocklist for deprecated kwargs
if(metric in _METRICS['jensenshannon'].aka
or metric == 'test_jensenshannon' or metric == jensenshannon):
kwargs_blocklist = ["p", "w", "V", "VI"]
elif(metric in _METRICS['minkowski'].aka
or metric in _METRICS['wminkowski'].aka
or metric in ['test_minkowski', 'test_wminkowski']
or metric in [minkowski, wminkowski]):
kwargs_blocklist = ["V", "VI"]
elif(metric in _METRICS['seuclidean'].aka or
metric == 'test_seuclidean' or metric == seuclidean):
kwargs_blocklist = ["p", "w", "VI"]
elif(metric in _METRICS['mahalanobis'].aka
or metric == 'test_mahalanobis' or metric == mahalanobis):
kwargs_blocklist = ["p", "w", "V"]
else:
kwargs_blocklist = ["p", "V", "VI"]
_filter_deprecated_kwargs(kwargs, kwargs_blocklist)
if callable(metric):
mstr = getattr(metric, '__name__', 'UnknownCustomMetric')
metric_name = _METRIC_ALIAS.get(mstr, None)
if metric_name is not None:
X, typ, kwargs = _validate_pdist_input(X, m, n,
metric_name, **kwargs)
k = 0
for i in range(0, m - 1):
for j in range(i + 1, m):
dm[k] = metric(X[i], X[j], **kwargs)
k = k + 1
elif isinstance(metric, str):
mstr = metric.lower()
mstr, kwargs = _select_weighted_metric(mstr, kwargs, out)
metric_name = _METRIC_ALIAS.get(mstr, None)
if metric_name is not None:
X, typ, kwargs = _validate_pdist_input(X, m, n,
metric_name, **kwargs)
# get pdist wrapper
pdist_fn = getattr(_distance_wrap,
"pdist_%s_%s_wrap" % (metric_name, typ))
pdist_fn(X, dm, **kwargs)
return dm
elif mstr in ['old_cosine', 'old_cos']:
warnings.warn('"old_cosine" is deprecated and will be removed in '
'a future version. Use "cosine" instead.',
DeprecationWarning)
X = _convert_to_double(X)
norms = np.einsum('ij,ij->i', X, X, dtype=np.double)
np.sqrt(norms, out=norms)
nV = norms.reshape(m, 1)
# The numerator u * v
nm = np.dot(X, X.T)
# The denom. ||u||*||v||
de = np.dot(nV, nV.T)
dm = 1.0 - (nm / de)
dm[range(0, m), range(0, m)] = 0.0
dm = squareform(dm)
elif mstr.startswith("test_"):
if mstr in _TEST_METRICS:
dm = pdist(X, _TEST_METRICS[mstr], **kwargs)
else:
raise ValueError('Unknown "Test" Distance Metric: %s' % mstr[5:])
else:
raise ValueError('Unknown Distance Metric: %s' % mstr)
else:
raise TypeError('2nd argument metric must be a string identifier '
'or a function.')
return dm
|
def pdist(X, metric='euclidean', *args, **kwargs):
"""
Pairwise distances between observations in n-dimensional space.
See Notes for common calling conventions.
Parameters
----------
X : ndarray
An m by n array of m original observations in an
n-dimensional space.
metric : str or function, optional
The distance metric to use. The distance function can
be 'braycurtis', 'canberra', 'chebyshev', 'cityblock',
'correlation', 'cosine', 'dice', 'euclidean', 'hamming',
'jaccard', 'jensenshannon', 'kulsinski', 'mahalanobis', 'matching',
'minkowski', 'rogerstanimoto', 'russellrao', 'seuclidean',
'sokalmichener', 'sokalsneath', 'sqeuclidean', 'yule'.
*args : tuple. Deprecated.
Additional arguments should be passed as keyword arguments
**kwargs : dict, optional
Extra arguments to `metric`: refer to each metric documentation for a
list of all possible arguments.
Some possible arguments:
p : scalar
The p-norm to apply for Minkowski, weighted and unweighted.
Default: 2.
w : ndarray
The weight vector for metrics that support weights (e.g., Minkowski).
V : ndarray
The variance vector for standardized Euclidean.
Default: var(X, axis=0, ddof=1)
VI : ndarray
The inverse of the covariance matrix for Mahalanobis.
Default: inv(cov(X.T)).T
out : ndarray.
The output array
If not None, condensed distance matrix Y is stored in this array.
Note: metric independent, it will become a regular keyword arg in a
future scipy version
Returns
-------
Y : ndarray
Returns a condensed distance matrix Y. For
each :math:`i` and :math:`j` (where :math:`i<j<m`),where m is the number
of original observations. The metric ``dist(u=X[i], v=X[j])``
is computed and stored in entry
``m * i + j - ((i + 2) * (i + 1)) // 2``.
See Also
--------
squareform : converts between condensed distance matrices and
square distance matrices.
Notes
-----
See ``squareform`` for information on how to calculate the index of
this entry or to convert the condensed distance matrix to a
redundant square matrix.
The following are common calling conventions.
1. ``Y = pdist(X, 'euclidean')``
Computes the distance between m points using Euclidean distance
(2-norm) as the distance metric between the points. The points
are arranged as m n-dimensional row vectors in the matrix X.
2. ``Y = pdist(X, 'minkowski', p=2.)``
Computes the distances using the Minkowski distance
:math:`||u-v||_p` (p-norm) where :math:`p \\geq 1`.
3. ``Y = pdist(X, 'cityblock')``
Computes the city block or Manhattan distance between the
points.
4. ``Y = pdist(X, 'seuclidean', V=None)``
Computes the standardized Euclidean distance. The standardized
Euclidean distance between two n-vectors ``u`` and ``v`` is
.. math::
\\sqrt{\\sum {(u_i-v_i)^2 / V[x_i]}}
V is the variance vector; V[i] is the variance computed over all
the i'th components of the points. If not passed, it is
automatically computed.
5. ``Y = pdist(X, 'sqeuclidean')``
Computes the squared Euclidean distance :math:`||u-v||_2^2` between
the vectors.
6. ``Y = pdist(X, 'cosine')``
Computes the cosine distance between vectors u and v,
.. math::
1 - \\frac{u \\cdot v}
{{||u||}_2 {||v||}_2}
where :math:`||*||_2` is the 2-norm of its argument ``*``, and
:math:`u \\cdot v` is the dot product of ``u`` and ``v``.
7. ``Y = pdist(X, 'correlation')``
Computes the correlation distance between vectors u and v. This is
.. math::
1 - \\frac{(u - \\bar{u}) \\cdot (v - \\bar{v})}
{{||(u - \\bar{u})||}_2 {||(v - \\bar{v})||}_2}
where :math:`\\bar{v}` is the mean of the elements of vector v,
and :math:`x \\cdot y` is the dot product of :math:`x` and :math:`y`.
8. ``Y = pdist(X, 'hamming')``
Computes the normalized Hamming distance, or the proportion of
those vector elements between two n-vectors ``u`` and ``v``
which disagree. To save memory, the matrix ``X`` can be of type
boolean.
9. ``Y = pdist(X, 'jaccard')``
Computes the Jaccard distance between the points. Given two
vectors, ``u`` and ``v``, the Jaccard distance is the
proportion of those elements ``u[i]`` and ``v[i]`` that
disagree.
10. ``Y = pdist(X, 'chebyshev')``
Computes the Chebyshev distance between the points. The
Chebyshev distance between two n-vectors ``u`` and ``v`` is the
maximum norm-1 distance between their respective elements. More
precisely, the distance is given by
.. math::
d(u,v) = \\max_i {|u_i-v_i|}
11. ``Y = pdist(X, 'canberra')``
Computes the Canberra distance between the points. The
Canberra distance between two points ``u`` and ``v`` is
.. math::
d(u,v) = \\sum_i \\frac{|u_i-v_i|}
{|u_i|+|v_i|}
12. ``Y = pdist(X, 'braycurtis')``
Computes the Bray-Curtis distance between the points. The
Bray-Curtis distance between two points ``u`` and ``v`` is
.. math::
d(u,v) = \\frac{\\sum_i {|u_i-v_i|}}
{\\sum_i {|u_i+v_i|}}
13. ``Y = pdist(X, 'mahalanobis', VI=None)``
Computes the Mahalanobis distance between the points. The
Mahalanobis distance between two points ``u`` and ``v`` is
:math:`\\sqrt{(u-v)(1/V)(u-v)^T}` where :math:`(1/V)` (the ``VI``
variable) is the inverse covariance. If ``VI`` is not None,
``VI`` will be used as the inverse covariance matrix.
14. ``Y = pdist(X, 'yule')``
Computes the Yule distance between each pair of boolean
vectors. (see yule function documentation)
15. ``Y = pdist(X, 'matching')``
Synonym for 'hamming'.
16. ``Y = pdist(X, 'dice')``
Computes the Dice distance between each pair of boolean
vectors. (see dice function documentation)
17. ``Y = pdist(X, 'kulsinski')``
Computes the Kulsinski distance between each pair of
boolean vectors. (see kulsinski function documentation)
18. ``Y = pdist(X, 'rogerstanimoto')``
Computes the Rogers-Tanimoto distance between each pair of
boolean vectors. (see rogerstanimoto function documentation)
19. ``Y = pdist(X, 'russellrao')``
Computes the Russell-Rao distance between each pair of
boolean vectors. (see russellrao function documentation)
20. ``Y = pdist(X, 'sokalmichener')``
Computes the Sokal-Michener distance between each pair of
boolean vectors. (see sokalmichener function documentation)
21. ``Y = pdist(X, 'sokalsneath')``
Computes the Sokal-Sneath distance between each pair of
boolean vectors. (see sokalsneath function documentation)
22. ``Y = pdist(X, 'wminkowski', p=2, w=w)``
Computes the weighted Minkowski distance between each pair of
vectors. (see wminkowski function documentation)
23. ``Y = pdist(X, f)``
Computes the distance between all pairs of vectors in X
using the user supplied 2-arity function f. For example,
Euclidean distance between the vectors could be computed
as follows::
dm = pdist(X, lambda u, v: np.sqrt(((u-v)**2).sum()))
Note that you should avoid passing a reference to one of
the distance functions defined in this library. For example,::
dm = pdist(X, sokalsneath)
would calculate the pair-wise distances between the vectors in
X using the Python function sokalsneath. This would result in
sokalsneath being called :math:`{n \\choose 2}` times, which
is inefficient. Instead, the optimized C version is more
efficient, and we call it using the following syntax.::
dm = pdist(X, 'sokalsneath')
"""
# You can also call this as:
# Y = pdist(X, 'test_abc')
# where 'abc' is the metric being tested. This computes the distance
# between all pairs of vectors in X using the distance metric 'abc' but
# with a more succinct, verifiable, but less efficient implementation.
X = _asarray_validated(X, sparse_ok=False, objects_ok=True, mask_ok=True,
check_finite=False)
kwargs = _args_to_kwargs_xdist(args, kwargs, metric, "pdist")
X = np.asarray(X, order='c')
s = X.shape
if len(s) != 2:
raise ValueError('A 2-dimensional array must be passed.')
m, n = s
out = kwargs.pop("out", None)
if out is None:
dm = np.empty((m * (m - 1)) // 2, dtype=np.double)
else:
if out.shape != (m * (m - 1) // 2,):
raise ValueError("output array has incorrect shape.")
if not out.flags.c_contiguous:
raise ValueError("Output array must be C-contiguous.")
if out.dtype != np.double:
raise ValueError("Output array must be double type.")
dm = out
# compute blocklist for deprecated kwargs
if(metric in _METRICS['jensenshannon'].aka
or metric == 'test_jensenshannon' or metric == jensenshannon):
kwargs_blocklist = ["p", "w", "V", "VI"]
elif(metric in _METRICS['minkowski'].aka
or metric in _METRICS['wminkowski'].aka
or metric in ['test_minkowski', 'test_wminkowski']
or metric in [minkowski, wminkowski]):
kwargs_blocklist = ["V", "VI"]
elif(metric in _METRICS['seuclidean'].aka or
metric == 'test_seuclidean' or metric == seuclidean):
kwargs_blocklist = ["p", "w", "VI"]
elif(metric in _METRICS['mahalanobis'].aka
or metric == 'test_mahalanobis' or metric == mahalanobis):
kwargs_blocklist = ["p", "w", "V"]
else:
kwargs_blocklist = ["p", "V", "VI"]
_filter_deprecated_kwargs(kwargs, kwargs_blocklist)
if callable(metric):
mstr = getattr(metric, '__name__', 'UnknownCustomMetric')
metric_name = _METRIC_ALIAS.get(mstr, None)
if metric_name is not None:
X, typ, kwargs = _validate_pdist_input(X, m, n,
metric_name, **kwargs)
k = 0
for i in range(0, m - 1):
for j in range(i + 1, m):
dm[k] = metric(X[i], X[j], **kwargs)
k = k + 1
elif isinstance(metric, str):
mstr = metric.lower()
mstr, kwargs = _select_weighted_metric(mstr, kwargs, out)
metric_name = _METRIC_ALIAS.get(mstr, None)
if metric_name is not None:
X, typ, kwargs = _validate_pdist_input(X, m, n,
metric_name, **kwargs)
# get pdist wrapper
pdist_fn = getattr(_distance_wrap,
"pdist_%s_%s_wrap" % (metric_name, typ))
pdist_fn(X, dm, **kwargs)
return dm
elif mstr in ['old_cosine', 'old_cos']:
warnings.warn('"old_cosine" is deprecated and will be removed in '
'a future version. Use "cosine" instead.',
DeprecationWarning)
X = _convert_to_double(X)
norms = np.einsum('ij,ij->i', X, X, dtype=np.double)
np.sqrt(norms, out=norms)
nV = norms.reshape(m, 1)
# The numerator u * v
nm = np.dot(X, X.T)
# The denom. ||u||*||v||
de = np.dot(nV, nV.T)
dm = 1.0 - (nm / de)
dm[range(0, m), range(0, m)] = 0.0
dm = squareform(dm)
elif mstr.startswith("test_"):
if mstr in _TEST_METRICS:
dm = pdist(X, _TEST_METRICS[mstr], **kwargs)
else:
raise ValueError('Unknown "Test" Distance Metric: %s' % mstr[5:])
else:
raise ValueError('Unknown Distance Metric: %s' % mstr)
else:
raise TypeError('2nd argument metric must be a string identifier '
'or a function.')
return dm
|
55,074 |
def sample(op=None, wires=None):
r"""Sample from the supplied observable, with the number of shots
determined from the ``dev.shots`` attribute of the corresponding device.
If no observable is provided then sample from the device specific raw samples.
The samples are drawn from the eigenvalues :math:`\{\lambda_i\}` of the observable.
The probability of drawing eigenvalue :math:`\lambda_i` is given by
:math:`p(\lambda_i) = |\langle \xi_i | \psi \rangle|^2`, where :math:`| \xi_i \rangle`
is the corresponding basis state from the observable's eigenbasis.
If no observable was provided then the raw samples obtained from device are returned
(eg. for a qubit device, samples from the computational device are returned). In this
case, `wires` can be specified so that sample results only include measurement results
of the qubits of interest.
**Example 1:**
.. code-block:: python3
dev = qml.device("default.qubit", wires=2, shots=4)
@qml.qnode(dev)
def circuit(x):
qml.RX(x, wires=0)
qml.Hadamard(wires=1)
qml.CNOT(wires=[0, 1])
return qml.sample(qml.PauliY(0))
Executing this QNode:
>>> circuit(0.5)
array([ 1., 1., 1., -1.])
**Example 2:**
.. code-block:: python3
dev = qml.device("default.qubit", wires=2, shots=4)
@qml.qnode(dev)
def circuit(x):
qml.RX(x, wires=0)
qml.Hadamard(wires=1)
qml.CNOT(wires=[0, 1])
return qml.sample()
Executing this QNode:
>>> circuit(0.5)
array([[0, 1],
[0, 0],
[1, 1],
[0, 0]])
Args:
op (Observable or None): a quantum observable object
wires (Sequence[int] or int or None): the wires we wish to sample from
Raises:
QuantumFunctionError: `op` is not an instance of :class:`~.Observable`
ValueError: Cannot set wires if an observable is provided
"""
if not isinstance(op, Observable) and op is not None: # None type is also allowed for op
raise qml.QuantumFunctionError(
"{} is not an observable: cannot be used with sample".format(op.name)
)
if wires is not None:
if op is not None:
raise ValueError("Cannot set the wires if an observable is provided.")
return MeasurementProcess(Sample, obs=op, wires=qml.wires.Wires(wires))
return MeasurementProcess(Sample, obs=op)
|
def sample(op=None, wires=None):
r"""Sample from the supplied observable, with the number of shots
determined from the ``dev.shots`` attribute of the corresponding device.
If no observable is provided then basis state samples are returned directly
from the device.
The samples are drawn from the eigenvalues :math:`\{\lambda_i\}` of the observable.
The probability of drawing eigenvalue :math:`\lambda_i` is given by
:math:`p(\lambda_i) = |\langle \xi_i | \psi \rangle|^2`, where :math:`| \xi_i \rangle`
is the corresponding basis state from the observable's eigenbasis.
If no observable was provided then the raw samples obtained from device are returned
(eg. for a qubit device, samples from the computational device are returned). In this
case, `wires` can be specified so that sample results only include measurement results
of the qubits of interest.
**Example 1:**
.. code-block:: python3
dev = qml.device("default.qubit", wires=2, shots=4)
@qml.qnode(dev)
def circuit(x):
qml.RX(x, wires=0)
qml.Hadamard(wires=1)
qml.CNOT(wires=[0, 1])
return qml.sample(qml.PauliY(0))
Executing this QNode:
>>> circuit(0.5)
array([ 1., 1., 1., -1.])
**Example 2:**
.. code-block:: python3
dev = qml.device("default.qubit", wires=2, shots=4)
@qml.qnode(dev)
def circuit(x):
qml.RX(x, wires=0)
qml.Hadamard(wires=1)
qml.CNOT(wires=[0, 1])
return qml.sample()
Executing this QNode:
>>> circuit(0.5)
array([[0, 1],
[0, 0],
[1, 1],
[0, 0]])
Args:
op (Observable or None): a quantum observable object
wires (Sequence[int] or int or None): the wires we wish to sample from
Raises:
QuantumFunctionError: `op` is not an instance of :class:`~.Observable`
ValueError: Cannot set wires if an observable is provided
"""
if not isinstance(op, Observable) and op is not None: # None type is also allowed for op
raise qml.QuantumFunctionError(
"{} is not an observable: cannot be used with sample".format(op.name)
)
if wires is not None:
if op is not None:
raise ValueError("Cannot set the wires if an observable is provided.")
return MeasurementProcess(Sample, obs=op, wires=qml.wires.Wires(wires))
return MeasurementProcess(Sample, obs=op)
|
26,400 |
def generate_config(context):
""" Entry point for the deployment resources. """
project_id = context.properties.get('projectID', context.env['project'])
resources = []
for ii, role in enumerate(context.properties['roles']):
for i, member in enumerate(role['members']):
policy_get_name = 'get-iam-policy-{}-{}-{}'.format(context.env['name'], ii, i)
resources.append(
{
'name': policy_get_name,
'type': 'gcp-types/cloudresourcemanager-v1:virtual.projects.iamMemberBinding',
'properties':
{
'resource': project_id,
'role': role['role'],
'member': member
}
}
)
return {"resources": resources}
|
def generate_config(context):
""" Entry point for the deployment resources. """
project_id = context.properties.get('projectId', context.env['project'])
resources = []
for ii, role in enumerate(context.properties['roles']):
for i, member in enumerate(role['members']):
policy_get_name = 'get-iam-policy-{}-{}-{}'.format(context.env['name'], ii, i)
resources.append(
{
'name': policy_get_name,
'type': 'gcp-types/cloudresourcemanager-v1:virtual.projects.iamMemberBinding',
'properties':
{
'resource': project_id,
'role': role['role'],
'member': member
}
}
)
return {"resources": resources}
|
17,036 |
def _get_appliance_by_device_id(
hass: HomeAssistant, device_id: str
) -> api.HomeConnectDevice | None:
"""Return a Home Connect appliance instance given an device_id."""
for hc_api in hass.data[DOMAIN].values():
for dev_dict in hc_api.devices:
device = dev_dict[CONF_DEVICE]
if device.device_id == device_id:
return device.appliance
raise ValueError("Appliance for device id %s not found" % device_id)
|
def _get_appliance_by_device_id(
hass: HomeAssistant, device_id: str
) -> api.HomeConnectDevice | None:
"""Return a Home Connect appliance instance given an device_id."""
for hc_api in hass.data[DOMAIN].values():
for dev_dict in hc_api.devices:
device = dev_dict[CONF_DEVICE]
if device.device_id == device_id:
return device.appliance
raise ValueError(f"Appliance for device id {device_id} not found")
|
12,130 |
def _block2event(block, seed_map, id_default, ph2comp):
"""
Read HypoDD event block
"""
lines = block.strip().splitlines()
yr, mo, dy, hr, mn, sc, la, lo, dp, mg, eh, ez, rms, id_ = lines[0].split()
time = UTCDateTime('{}-{}-{} {}-{}-{}'.format(yr, mo, dy, hr, mn, sc))
picks = []
arrivals = []
for line in lines[1:]:
sta, reltime, weight, phase = line.split()
comp = ph2comp.get(phase, '')
wid = seed_map.get(sta, id_default)
_waveform_id = WaveformStreamID(seed_string=wid.format(sta, comp))
pick = Pick(waveform_id=_waveform_id, phase_hint=phase,
time=time + float(reltime))
arrival = Arrival(phase=phase, pick_id=pick.resource_id,
time_weight=float(weight))
picks.append(pick)
arrivals.append(arrival)
qu = None if rms == '0.0' else OriginQuality(standard_error=float(rms))
origin = Origin(arrivals=arrivals,
quality=qu,
latitude=float(la),
longitude=float(lo),
depth=1000 * float(dp),
time=time)
magnitude = Magnitude(mag=mg)
event = Event(resource_id=id_,
picks=picks,
origins=[origin],
magnitudes=[magnitude],
preferred_origin_id=origin.resource_id,
preferred_magnitude_id=magnitude.resource_id)
return event
|
def _block2event(block, seed_map, id_default, ph2comp):
"""
Read HypoDD event block
"""
lines = block.strip().splitlines()
yr, mo, dy, hr, mn, sc, la, lo, dp, mg, eh, ez, rms, id_ = lines[0].split()
time = UTCDateTime('{}-{}-{} {}-{}-{}'.format(yr, mo, dy, hr, mn, sc))
picks = []
arrivals = []
for line in lines[1:]:
sta, reltime, weight, phase = line.split()
comp = ph2comp.get(phase, '')
wid = seed_map.get(sta, id_default)
_waveform_id = WaveformStreamID(seed_string=wid.format(sta, comp))
pick = Pick(waveform_id=_waveform_id, phase_hint=phase,
time=time + float(reltime))
arrival = Arrival(phase=phase, pick_id=pick.resource_id,
time_weight=float(weight))
picks.append(pick)
arrivals.append(arrival)
qu = None if rms == '0.0' else OriginQuality(standard_error=float(rms))
origin = Origin(arrivals=arrivals,
quality=qu,
latitude=float(la),
longitude=float(lo),
depth=1000 * float(dp),
time=time)
magnitude = Magnitude(mag=mg, resource_id="smi:local/magnitude/" + id_)
event = Event(resource_id=id_,
picks=picks,
origins=[origin],
magnitudes=[magnitude],
preferred_origin_id=origin.resource_id,
preferred_magnitude_id=magnitude.resource_id)
return event
|
52,923 |
def _matplotlib_fig_titles(fig):
titles = []
# get supertitle if exists
if fig._suptitle:
titles.append(fig._suptitle.get_text())
# get titles from all axes, for all locs
title_locs = ['left', 'center', 'right']
for ax in fig.axes:
for loc in title_locs:
text = ax.get_title(loc=loc)
if text:
titles.append(text)
fig_titles = ', '.join(titles)
return fig_titles
|
def _matplotlib_fig_titles(fig):
titles = []
# get supertitle if exists
suptitle = getattr(fig, "_suptitle", None)
if suptitle is not None:
titles.append(suptitle.get_text())
# get titles from all axes, for all locs
title_locs = ['left', 'center', 'right']
for ax in fig.axes:
for loc in title_locs:
text = ax.get_title(loc=loc)
if text:
titles.append(text)
fig_titles = ', '.join(titles)
return fig_titles
|
35,645 |
def resnet101(weights: Optional[ResNet101Weights] = None, progress: bool = True, **kwargs: Any) -> ResNet:
if "pretrained" in kwargs:
warnings.warn("The argument pretrained is deprecated, please use weights instead.")
weights = ResNet101Weights.ImageNet1K_RefV1 if kwargs.pop("pretrained") else None
weights = ResNet101Weights.verify(weights)
return _resnet(BasicBlock, [3, 4, 23, 3], weights, progress, **kwargs)
|
def resnet101(weights: Optional[ResNet101Weights] = None, progress: bool = True, **kwargs: Any) -> ResNet:
if "pretrained" in kwargs:
warnings.warn("The argument pretrained is deprecated, please use weights instead.")
weights = ResNet101Weights.ImageNet1K_RefV1 if kwargs.pop("pretrained") else None
weights = ResNet101Weights.verify(weights)
return _resnet(Bottleneck, [3, 4, 23, 3], weights, progress, **kwargs)
|
5,645 |
def qr(a, overwrite_a=False, lwork=None, mode='full', pivoting=False,
check_finite=True):
"""
Compute QR decomposition of a matrix.
Calculate the decomposition ``A = Q R`` where Q is unitary/orthogonal
and R upper triangular.
Parameters
----------
a : (M, N) array_like
Matrix to be decomposed
overwrite_a : bool, optional(default - False)
Whether data in a is overwritten (may improve performance if overwrite_a
is set True by reusing the existing input data structure rather than
creating a new one.)
lwork : int, optional
Work array size, lwork >= a.shape[1]. If None or -1, an optimal size
is computed.
mode : {'full', 'r', 'economic', 'raw'}, optional
Determines what information is to be returned: either both Q and R
('full', default), only R ('r') or both Q and R but computed in
economy-size ('economic', see Notes). The final option 'raw'
(added in SciPy 0.11) makes the function return two matrices
(Q, TAU) in the internal format used by LAPACK.
pivoting : bool, optional
Whether or not factorization should include pivoting for rank-revealing
qr decomposition. If pivoting, compute the decomposition
``A P = Q R`` as above, but where P is chosen such that the diagonal
of R is non-increasing.
check_finite : bool, optional
Whether to check that the input matrix contains only finite numbers.
Disabling may give a performance gain, but may result in problems
(crashes, non-termination) if the inputs do contain infinities or NaNs.
Returns
-------
Q : float or complex ndarray
Of shape (M, M), or (M, K) for ``mode='economic'``. Not returned
if ``mode='r'``.
R : float or complex ndarray
Of shape (M, N), or (K, N) for ``mode='economic'``. ``K = min(M, N)``.
P : int ndarray
Of shape (N,) for ``pivoting=True``. Not returned if
``pivoting=False``.
Raises
------
LinAlgError
Raised if decomposition fails
Notes
-----
This is an interface to the LAPACK routines dgeqrf, zgeqrf,
dorgqr, zungqr, dgeqp3, and zgeqp3.
If ``mode=economic``, the shapes of Q and R are (M, K) and (K, N) instead
of (M,M) and (M,N), with ``K=min(M,N)``.
Examples
--------
>>> from scipy import linalg
>>> a = np.random.randn(9, 6)
>>> q, r = linalg.qr(a)
>>> np.allclose(a, np.dot(q, r))
True
>>> q.shape, r.shape
((9, 9), (9, 6))
>>> r2 = linalg.qr(a, mode='r')
>>> np.allclose(r, r2)
True
>>> q3, r3 = linalg.qr(a, mode='economic')
>>> q3.shape, r3.shape
((9, 6), (6, 6))
>>> q4, r4, p4 = linalg.qr(a, pivoting=True)
>>> d = np.abs(np.diag(r4))
>>> np.all(d[1:] <= d[:-1])
True
>>> np.allclose(a[:, p4], np.dot(q4, r4))
True
>>> q4.shape, r4.shape, p4.shape
((9, 9), (9, 6), (6,))
>>> q5, r5, p5 = linalg.qr(a, mode='economic', pivoting=True)
>>> q5.shape, r5.shape, p5.shape
((9, 6), (6, 6), (6,))
"""
# 'qr' was the old default, equivalent to 'full'. Neither 'full' nor
# 'qr' are used below.
# 'raw' is used internally by qr_multiply
if mode not in ['full', 'qr', 'r', 'economic', 'raw']:
raise ValueError("Mode argument should be one of ['full', 'r',"
"'economic', 'raw']")
if check_finite:
a1 = numpy.asarray_chkfinite(a)
else:
a1 = numpy.asarray(a)
if len(a1.shape) != 2:
raise ValueError("expected a 2-D array")
M, N = a1.shape
overwrite_a = overwrite_a or (_datacopied(a1, a))
if pivoting:
geqp3, = get_lapack_funcs(('geqp3',), (a1,))
qr, jpvt, tau = safecall(geqp3, "geqp3", a1, overwrite_a=overwrite_a)
jpvt -= 1 # geqp3 returns a 1-based index array, so subtract 1
else:
geqrf, = get_lapack_funcs(('geqrf',), (a1,))
qr, tau = safecall(geqrf, "geqrf", a1, lwork=lwork,
overwrite_a=overwrite_a)
if mode not in ['economic', 'raw'] or M < N:
R = numpy.triu(qr)
else:
R = numpy.triu(qr[:N, :])
if pivoting:
Rj = R, jpvt
else:
Rj = R,
if mode == 'r':
return Rj
elif mode == 'raw':
return ((qr, tau),) + Rj
gor_un_gqr, = get_lapack_funcs(('orgqr',), (qr,))
if M < N:
Q, = safecall(gor_un_gqr, "gorgqr/gungqr", qr[:, :M], tau,
lwork=lwork, overwrite_a=1)
elif mode == 'economic':
Q, = safecall(gor_un_gqr, "gorgqr/gungqr", qr, tau, lwork=lwork,
overwrite_a=1)
else:
t = qr.dtype.char
qqr = numpy.empty((M, M), dtype=t)
qqr[:, :N] = qr
Q, = safecall(gor_un_gqr, "gorgqr/gungqr", qqr, tau, lwork=lwork,
overwrite_a=1)
return (Q,) + Rj
|
def qr(a, overwrite_a=False, lwork=None, mode='full', pivoting=False,
check_finite=True):
"""
Compute QR decomposition of a matrix.
Calculate the decomposition ``A = Q R`` where Q is unitary/orthogonal
and R upper triangular.
Parameters
----------
a : (M, N) array_like
Matrix to be decomposed
overwrite_a : bool, optional(default - False)
Whether data in a is overwritten (may improve performance if overwrite_a
is set to True by reusing the existing input data structure rather than
creating a new one.)
lwork : int, optional
Work array size, lwork >= a.shape[1]. If None or -1, an optimal size
is computed.
mode : {'full', 'r', 'economic', 'raw'}, optional
Determines what information is to be returned: either both Q and R
('full', default), only R ('r') or both Q and R but computed in
economy-size ('economic', see Notes). The final option 'raw'
(added in SciPy 0.11) makes the function return two matrices
(Q, TAU) in the internal format used by LAPACK.
pivoting : bool, optional
Whether or not factorization should include pivoting for rank-revealing
qr decomposition. If pivoting, compute the decomposition
``A P = Q R`` as above, but where P is chosen such that the diagonal
of R is non-increasing.
check_finite : bool, optional
Whether to check that the input matrix contains only finite numbers.
Disabling may give a performance gain, but may result in problems
(crashes, non-termination) if the inputs do contain infinities or NaNs.
Returns
-------
Q : float or complex ndarray
Of shape (M, M), or (M, K) for ``mode='economic'``. Not returned
if ``mode='r'``.
R : float or complex ndarray
Of shape (M, N), or (K, N) for ``mode='economic'``. ``K = min(M, N)``.
P : int ndarray
Of shape (N,) for ``pivoting=True``. Not returned if
``pivoting=False``.
Raises
------
LinAlgError
Raised if decomposition fails
Notes
-----
This is an interface to the LAPACK routines dgeqrf, zgeqrf,
dorgqr, zungqr, dgeqp3, and zgeqp3.
If ``mode=economic``, the shapes of Q and R are (M, K) and (K, N) instead
of (M,M) and (M,N), with ``K=min(M,N)``.
Examples
--------
>>> from scipy import linalg
>>> a = np.random.randn(9, 6)
>>> q, r = linalg.qr(a)
>>> np.allclose(a, np.dot(q, r))
True
>>> q.shape, r.shape
((9, 9), (9, 6))
>>> r2 = linalg.qr(a, mode='r')
>>> np.allclose(r, r2)
True
>>> q3, r3 = linalg.qr(a, mode='economic')
>>> q3.shape, r3.shape
((9, 6), (6, 6))
>>> q4, r4, p4 = linalg.qr(a, pivoting=True)
>>> d = np.abs(np.diag(r4))
>>> np.all(d[1:] <= d[:-1])
True
>>> np.allclose(a[:, p4], np.dot(q4, r4))
True
>>> q4.shape, r4.shape, p4.shape
((9, 9), (9, 6), (6,))
>>> q5, r5, p5 = linalg.qr(a, mode='economic', pivoting=True)
>>> q5.shape, r5.shape, p5.shape
((9, 6), (6, 6), (6,))
"""
# 'qr' was the old default, equivalent to 'full'. Neither 'full' nor
# 'qr' are used below.
# 'raw' is used internally by qr_multiply
if mode not in ['full', 'qr', 'r', 'economic', 'raw']:
raise ValueError("Mode argument should be one of ['full', 'r',"
"'economic', 'raw']")
if check_finite:
a1 = numpy.asarray_chkfinite(a)
else:
a1 = numpy.asarray(a)
if len(a1.shape) != 2:
raise ValueError("expected a 2-D array")
M, N = a1.shape
overwrite_a = overwrite_a or (_datacopied(a1, a))
if pivoting:
geqp3, = get_lapack_funcs(('geqp3',), (a1,))
qr, jpvt, tau = safecall(geqp3, "geqp3", a1, overwrite_a=overwrite_a)
jpvt -= 1 # geqp3 returns a 1-based index array, so subtract 1
else:
geqrf, = get_lapack_funcs(('geqrf',), (a1,))
qr, tau = safecall(geqrf, "geqrf", a1, lwork=lwork,
overwrite_a=overwrite_a)
if mode not in ['economic', 'raw'] or M < N:
R = numpy.triu(qr)
else:
R = numpy.triu(qr[:N, :])
if pivoting:
Rj = R, jpvt
else:
Rj = R,
if mode == 'r':
return Rj
elif mode == 'raw':
return ((qr, tau),) + Rj
gor_un_gqr, = get_lapack_funcs(('orgqr',), (qr,))
if M < N:
Q, = safecall(gor_un_gqr, "gorgqr/gungqr", qr[:, :M], tau,
lwork=lwork, overwrite_a=1)
elif mode == 'economic':
Q, = safecall(gor_un_gqr, "gorgqr/gungqr", qr, tau, lwork=lwork,
overwrite_a=1)
else:
t = qr.dtype.char
qqr = numpy.empty((M, M), dtype=t)
qqr[:, :N] = qr
Q, = safecall(gor_un_gqr, "gorgqr/gungqr", qqr, tau, lwork=lwork,
overwrite_a=1)
return (Q,) + Rj
|
31,255 |
def main() -> None:
params: any = demisto.params()
host: str = params.get('host')
port: int = int(params.get('port'))
args: any = demisto.args()
if "host" in args and "port" in args:
host: str = args.get('host')
port: int = int(args.get('port'))
command: str = demisto.command()
demisto.debug(f'Command being called is {command}')
commands = {
'arduino-set-pin': arduino_set_pin_command,
'arduino-get-pin': arduino_get_pin_command,
'arduino-send-data': arduino_send_data_command
}
# try:
server: Server = Server(host, port)
if demisto.command() == 'test-module':
return_results(test_module(server))
elif command in commands:
return_results(commands[command](server, args))
else:
return_error(f"{command} command not recognised")
|
def main():
params: any = demisto.params()
host: str = params.get('host')
port: int = int(params.get('port'))
args: any = demisto.args()
if "host" in args and "port" in args:
host: str = args.get('host')
port: int = int(args.get('port'))
command: str = demisto.command()
demisto.debug(f'Command being called is {command}')
commands = {
'arduino-set-pin': arduino_set_pin_command,
'arduino-get-pin': arduino_get_pin_command,
'arduino-send-data': arduino_send_data_command
}
# try:
server: Server = Server(host, port)
if demisto.command() == 'test-module':
return_results(test_module(server))
elif command in commands:
return_results(commands[command](server, args))
else:
return_error(f"{command} command not recognised")
|
18,204 |
def commands(parser, args):
if args.update_completion:
if args.format != 'names' or any([
args.aliases, args.update, args.header
]):
tty.die("--update-completion can only be specified alone.")
# this runs the command multiple times with different arguments
return update_completion(parser, args)
else:
# run commands nomally
return _commands(parser, args)
|
def commands(parser, args):
if args.update_completion:
if args.format != 'names' or any([
args.aliases, args.update, args.header
]):
tty.die("--update-completion can only be specified alone.")
# this runs the command multiple times with different arguments
return update_completion(parser, args)
else:
# run commands normally
return _commands(parser, args)
|
31,180 |
def get_process_context(alert, process_type):
process_context = {
'Name': alert.get(f'{process_type}_process_image_name'),
'MD5': alert.get(f'{process_type}_process_image_md5'),
'SHA256': alert.get(f'{process_type}_process_image_sha256'),
'PID': alert.get(f'{process_type}_process_os_pid'),
'CommandLine': alert.get(f'{process_type}_process_command_line'),
'Path': alert.get(f'{process_type}_process_image_path'),
'Start Time': alert.get(f'{process_type}_process_execution_time'),
'Hostname': alert.get('host_name')
}
remove_nulls_from_dictionary(process_context)
# If the process contains only 'HostName' , don't create an indicator
if len(process_context.keys()) == 1 and 'Hostname' in process_context.keys():
return {}
return process_context
|
def get_process_context(alert, process_type):
process_context = {
'Name': alert.get(f'{process_type}_process_image_name'),
'MD5': alert.get(f'{process_type}_process_image_md5'),
'SHA256': alert.get(f'{process_type}_process_image_sha256'),
'PID': alert.get(f'{process_type}_process_os_pid'),
'CommandLine': alert.get(f'{process_type}_process_command_line'),
'Path': alert.get(f'{process_type}_process_image_path'),
'Start Time': alert.get(f'{process_type}_process_execution_time'),
'Hostname': alert.get('host_name'),
}
remove_nulls_from_dictionary(process_context)
# If the process contains only 'HostName' , don't create an indicator
if len(process_context.keys()) == 1 and 'Hostname' in process_context.keys():
return {}
return process_context
|
11,941 |
def on_image_revision_delete(instance, *args, **kwargs):
if not instance.image:
return
# from attachments.models.py (line 166) :
# Remove file
path = instance.image.path.split("/")[:-1]
instance.image.delete(save=False)
'''
# if backendstorage capabillity checking is required, this should be used.
# (in attachments also)
path = None
try:
path = instance.image.path.split("/")[:-1]
except NotImplementedError:
# This backend storage doesn't implement 'path' so there is no path to delete
pass
except ValueError:
# in case of Value error
# https://github.com/django-wiki/django-wiki/issues/936
pass
finally:
# Remove image file
instance.image.delete(save=False)
if path is None:
# This backend storage doesn't implement 'path' so there is no path to delete
# or some other error
return
'''
# Clean up empty directories
# Check for empty folders in the path. Delete the first two.
if len(path[-1]) == 32:
# Path was (most likely) obscurified so we should look 2 levels down
max_depth = 2
else:
max_depth = 1
for depth in range(0, max_depth):
delete_path = "/".join(path[:-depth] if depth > 0 else path)
try:
dir_list = os.listdir(os.path.join(django_settings.MEDIA_ROOT, delete_path))
except OSError:
# Path does not exist, so let's not try to remove it...
dir_list = None
if not (dir_list is None) and len(dir_list) == 0:
os.rmdir(delete_path)
|
def on_image_revision_delete(instance, *args, **kwargs):
if not instance.image:
return
# from attachments.models.py (line 166) :
# Remove file
path = os.path.dirname(instance.image.path)
instance.image.delete(save=False)
'''
# if backendstorage capabillity checking is required, this should be used.
# (in attachments also)
path = None
try:
path = instance.image.path.split("/")[:-1]
except NotImplementedError:
# This backend storage doesn't implement 'path' so there is no path to delete
pass
except ValueError:
# in case of Value error
# https://github.com/django-wiki/django-wiki/issues/936
pass
finally:
# Remove image file
instance.image.delete(save=False)
if path is None:
# This backend storage doesn't implement 'path' so there is no path to delete
# or some other error
return
'''
# Clean up empty directories
# Check for empty folders in the path. Delete the first two.
if len(path[-1]) == 32:
# Path was (most likely) obscurified so we should look 2 levels down
max_depth = 2
else:
max_depth = 1
for depth in range(0, max_depth):
delete_path = "/".join(path[:-depth] if depth > 0 else path)
try:
dir_list = os.listdir(os.path.join(django_settings.MEDIA_ROOT, delete_path))
except OSError:
# Path does not exist, so let's not try to remove it...
dir_list = None
if not (dir_list is None) and len(dir_list) == 0:
os.rmdir(delete_path)
|
32,337 |
def reset_base_pack_version(client: demisto_client):
"""
Resets base pack version to prod version.
Args:
client (demisto_client): The client to connect to.
"""
host = client.api_client.configuration.host.replace('api-', '')
try:
# make the search request
response_data, status_code, _ = demisto_client.generic_request_func(client,
path='/contentpacks/marketplace/Base',
method='GET',
accept='application/json',
_request_timeout=None)
if 200 <= status_code < 300:
result_object = ast.literal_eval(response_data)
if result_object and result_object.get('currentVersion'):
logging.debug('Found Base pack in bucket!')
pack_data = {
'id': result_object.get('id'),
'version': result_object.get('currentVersion')
}
# install latest version of Base pack
logging.info(f'updating base pack to version {result_object.get("currentVersion")}')
return install_packs(client, host, [pack_data], False)
else:
raise Exception('Did not find Base pack')
else:
result_object = ast.literal_eval(response_data)
msg = result_object.get('message', '')
err_msg = f'Search request for base pack, failed with status code ' \
f'{status_code}\n{msg}'
raise Exception(err_msg)
except Exception:
logging.exception('Search request Base pack has failed.')
return False
|
def reset_base_pack_version(client: demisto_client):
"""
Resets base pack version to prod version.
Args:
client (demisto_client): The client to connect to.
"""
host = client.api_client.configuration.host.replace('https://api-', 'https://')
try:
# make the search request
response_data, status_code, _ = demisto_client.generic_request_func(client,
path='/contentpacks/marketplace/Base',
method='GET',
accept='application/json',
_request_timeout=None)
if 200 <= status_code < 300:
result_object = ast.literal_eval(response_data)
if result_object and result_object.get('currentVersion'):
logging.debug('Found Base pack in bucket!')
pack_data = {
'id': result_object.get('id'),
'version': result_object.get('currentVersion')
}
# install latest version of Base pack
logging.info(f'updating base pack to version {result_object.get("currentVersion")}')
return install_packs(client, host, [pack_data], False)
else:
raise Exception('Did not find Base pack')
else:
result_object = ast.literal_eval(response_data)
msg = result_object.get('message', '')
err_msg = f'Search request for base pack, failed with status code ' \
f'{status_code}\n{msg}'
raise Exception(err_msg)
except Exception:
logging.exception('Search request Base pack has failed.')
return False
|
48,313 |
def remove_initiator(module, array, ini):
changed = False
if module.check_mode:
module.exit_json(changed=changed)
try:
ini_id = ini['id']
ok = array.remove_initiator(
ini_id)
if ok:
module.log(msg='Initiator {0} removed.'.format(ini_id))
changed = True
else:
module.fail_json(msg='Initiator {0} remove failed.'.format(ini_id))
except Exception:
pass
module.exit_json(changed=changed)
|
def remove_initiator(module, array, ini):
changed = False
if module.check_mode:
module.exit_json(changed=changed)
try:
ini_id = ini['id']
ok = array.remove_initiator(
ini_id)
if ok:
msg = 'Initiator {0} removed.'.format(ini_id)
module.log(msg=msg)
changed = True
else:
module.fail_json(msg='Initiator {0} remove failed.'.format(ini_id))
except Exception:
pass
module.exit_json(changed=changed)
|
58,086 |
def main() -> None:
"""Main function, parses params and runs command functions."""
ip = demisto.params().get("ip")
token = demisto.params().get("token", None)
user = demisto.params().get("credentials", {}).get("identifier", None)
password = demisto.params().get("credentials", {}).get("password", None)
check_cert = demisto.params().get("check_cert", False)
demisto.debug(f"Command being called is {demisto.command()}")
try:
client = GwClient(ip=ip, check_cert=check_cert)
client.auth(
user=user if user != "" else None,
password=password if password != "" else None,
token=token
)
if demisto.command() == "test-module":
return_results(
test_module(client=client)
)
elif demisto.command() == "gw-list-alerts":
return_results(
gw_list_alerts(client=client, args=demisto.args())
)
elif demisto.command() == "gw-get-alert":
return_results(
gw_get_alert(client=client, args=demisto.args())
)
elif demisto.command() == "gw-add-malcore-list-entry":
return_results(
gw_add_malcore_list_entry(client=client, args=demisto.args())
)
elif demisto.command() == "gw-del-malcore-list-entry":
return_results(
gw_del_malcore_list_entry(client=client, args=demisto.args())
)
elif demisto.command() == "gw-add-dga-list-entry":
return_results(
gw_add_dga_list_entry(client=client, args=demisto.args())
)
elif demisto.command() == "gw-del-dga-list-entry":
return_results(
gw_del_dga_list_entry(client=client, args=demisto.args())
)
elif demisto.command() == "gw-es-query":
return_results(
gw_es_query(client=client, args=demisto.args())
)
elif demisto.command() == "gw-add-ignore-asset-name":
return_results(
gw_add_ignore_asset_name(client=client, args=demisto.args())
)
elif demisto.command() == "gw-add-ignore-kuser-ip":
return_results(
gw_add_ignore_kuser_ip(client=client, args=demisto.args())
)
elif demisto.command() == "gw-add-ignore-kuser-name":
return_results(
gw_add_ignore_kuser_name(client=client, args=demisto.args())
)
elif demisto.command() == "gw-add-ignore-mac-address":
return_results(
gw_add_ignore_mac_address(client=client, args=demisto.args())
)
elif demisto.command() == "gw-del-ignore-asset-name":
return_results(
gw_del_ignore_asset_name(client=client, args=demisto.args())
)
elif demisto.command() == "gw-del-ignore-kuser-ip":
return_results(
gw_del_ignore_kuser_ip(client=client, args=demisto.args())
)
elif demisto.command() == "gw-del-ignore-kuser-name":
return_results(
gw_del_ignore_kuser_name(client=client, args=demisto.args())
)
elif demisto.command() == "gw-del-ignore-mac-address":
return_results(
gw_del_ignore_mac_address(client=client, args=demisto.args())
)
elif demisto.command() == "gw-send-malware":
return_results(
gw_send_malware(client=client, args=demisto.args())
)
elif demisto.command() == "gw-send-powershell":
return_results(
gw_send_powershell(client=client, args=demisto.args())
)
elif demisto.command() == "gw-send-shellcode":
return_results(
gw_send_shellcode(client=client, args=demisto.args())
)
except Exception as e:
demisto.error(traceback.format_exc())
return_error(
f"Failed to execute {demisto.command()} command.\nError: {str(e)}"
)
|
def main() -> None:
"""Main function, parses params and runs command functions."""
params = demisto.params()
ip = params.get("ip")
token = params.get("token", None)
user = params.get("credentials", {}).get("identifier", None)
password = params.get("credentials", {}).get("password", None)
check_cert = params.get("check_cert", False)
demisto.debug(f"Command being called is {demisto.command()}")
try:
client = GwClient(ip=ip, check_cert=check_cert)
client.auth(
user=user if user != "" else None,
password=password if password != "" else None,
token=token
)
if demisto.command() == "test-module":
return_results(
test_module(client=client)
)
elif demisto.command() == "gw-list-alerts":
return_results(
gw_list_alerts(client=client, args=demisto.args())
)
elif demisto.command() == "gw-get-alert":
return_results(
gw_get_alert(client=client, args=demisto.args())
)
elif demisto.command() == "gw-add-malcore-list-entry":
return_results(
gw_add_malcore_list_entry(client=client, args=demisto.args())
)
elif demisto.command() == "gw-del-malcore-list-entry":
return_results(
gw_del_malcore_list_entry(client=client, args=demisto.args())
)
elif demisto.command() == "gw-add-dga-list-entry":
return_results(
gw_add_dga_list_entry(client=client, args=demisto.args())
)
elif demisto.command() == "gw-del-dga-list-entry":
return_results(
gw_del_dga_list_entry(client=client, args=demisto.args())
)
elif demisto.command() == "gw-es-query":
return_results(
gw_es_query(client=client, args=demisto.args())
)
elif demisto.command() == "gw-add-ignore-asset-name":
return_results(
gw_add_ignore_asset_name(client=client, args=demisto.args())
)
elif demisto.command() == "gw-add-ignore-kuser-ip":
return_results(
gw_add_ignore_kuser_ip(client=client, args=demisto.args())
)
elif demisto.command() == "gw-add-ignore-kuser-name":
return_results(
gw_add_ignore_kuser_name(client=client, args=demisto.args())
)
elif demisto.command() == "gw-add-ignore-mac-address":
return_results(
gw_add_ignore_mac_address(client=client, args=demisto.args())
)
elif demisto.command() == "gw-del-ignore-asset-name":
return_results(
gw_del_ignore_asset_name(client=client, args=demisto.args())
)
elif demisto.command() == "gw-del-ignore-kuser-ip":
return_results(
gw_del_ignore_kuser_ip(client=client, args=demisto.args())
)
elif demisto.command() == "gw-del-ignore-kuser-name":
return_results(
gw_del_ignore_kuser_name(client=client, args=demisto.args())
)
elif demisto.command() == "gw-del-ignore-mac-address":
return_results(
gw_del_ignore_mac_address(client=client, args=demisto.args())
)
elif demisto.command() == "gw-send-malware":
return_results(
gw_send_malware(client=client, args=demisto.args())
)
elif demisto.command() == "gw-send-powershell":
return_results(
gw_send_powershell(client=client, args=demisto.args())
)
elif demisto.command() == "gw-send-shellcode":
return_results(
gw_send_shellcode(client=client, args=demisto.args())
)
except Exception as e:
demisto.error(traceback.format_exc())
return_error(
f"Failed to execute {demisto.command()} command.\nError: {str(e)}"
)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.