id
int64 11
59.9k
| original
stringlengths 33
150k
| modified
stringlengths 37
150k
|
---|---|---|
47,893 |
def find_sentence_range(context, s, e):
# find start of sentence
for c_s in range(s, max(-1, s - 200), -1):
if context[c_s] in "\n\.":
c_s += 1
break
# find end of sentence
for c_e in range(max(0, e - 1), min(len(context), s + 200), +1):
if context[c_e] in "\n\.":
break
return c_s, c_e
|
def find_sentence_range(context, s, e):
# find start of sentence
for c_s in range(s, max(-1, s - 200), -1):
if context[c_s] in "\n\.":
c_s += 1
break
# find end of sentence
for c_e in range(max(0, e - 1), min(len(context), e + 200), +1):
if context[c_e] in "\n\.":
break
return c_s, c_e
|
49,628 |
def test_repr_html_hlg_layers():
pytest.importorskip("jinja2")
hg = HighLevelGraph(
{"a": {"a": 1, ("a", 0): 2, "b": 3}, "b": {"c": 4}},
{"a": set(), "b": set()},
)
assert xml.etree.ElementTree.fromstring(hg._repr_html_()) is not None
for layer in hg.layers.values():
assert xml.etree.ElementTree.fromstring(layer._repr_html_(hg.layers)) is not None
|
def test_repr_html_hlg_layers():
pytest.importorskip("jinja2")
hg = HighLevelGraph(
{"a": {"a": 1, ("a", 0): 2, "b": 3}, "b": {"c": 4}},
{"a": set(), "b": set()},
)
assert xml.etree.ElementTree.fromstring(hg._repr_html_()) is not None
for layer in hg.layers.values():
assert (
xml.etree.ElementTree.fromstring(layer._repr_html_(hg.layers)) is not None
)
|
5,733 |
def _get_docs(func):
"""
Decorator to take the docstring from original
function and assign to the multimethod.
"""
func.__doc__ = getattr(_api, func.__name__).__doc__
@functools.wraps(func)
def inner(*args, **kwargs):
return func(*args, **kwargs)
return inner
|
def _get_docs(func):
"""
Decorator to take the docstring from original
function and assign to the multimethod.
"""
func.__doc__ = getattr(_api, func.__name__).__doc__
return func
|
22,817 |
def _to_0_or_1_if_bool(v: Any) -> Union[Any, str]:
if isinstance(v, bool):
return 1 if v else 0
return v
|
def _to_0_or_1_if_bool(v: Any) -> Union[Any, int]:
if isinstance(v, bool):
return 1 if v else 0
return v
|
31,798 |
def test_module(client):
"""
Performs basic get request to get item samples
"""
response = client.authenticate_user()
if response.get('authorized'):
demisto.results("ok")
else:
demisto.results("Error in API call in FireMonSecurityManager Integrations")
|
def test_module(client):
"""
Performs basic get request to get item samples
"""
response = client.authenticate_user()
if response.get('authorized'):
demisto.results("ok")
else:
return "Error in API call in FireMonSecurityManager Integrations"
|
39,479 |
def df_getitem_str_literal_idx_main_codelines(self, idx):
"""Generate main code lines for df.getitem"""
try:
col_idx = self.columns.index(idx)
except ValueError:
func_lines = [' raise KeyError']
else:
col_data = f'data_{col_idx}'
func_lines = [
f' {col_data} = get_dataframe_data(self, {col_idx})',
f' return pandas.Series({col_data}, index=self._index, name=idx)'
]
return func_lines
|
def df_getitem_str_literal_idx_main_codelines(self, idx):
"""Generate main code lines for df.getitem"""
try:
col_idx = self.columns.index(idx)
except ValueError:
func_lines = [' raise KeyError']
else:
col_data = f'data_{col_idx}'
func_lines = [
f' {col_data} = get_dataframe_data(self, {col_idx})',
f' return pandas.Series({col_data}, index=self.index, name=idx)'
]
return func_lines
|
33,124 |
def mean0(E, axis=0, rescale=True):
r"""`center`, but only return the anomalies (not the mean).
Parameters
----------
E: ndarray
Ensemble which going to be inflated
axis: int, optional
The axis to be centered. Default: 0
rescale: bool, optional
If True, inflate to compensate for reduction in the expected variance.
The inflation factor is \(\sqrt{\frac{N}{N - 1}}\). Act as a way for
unbiased variance estimation?
where N is the ensemble size. Default: True
Returns
-------
ndarray
Ensemble anomalies
"""
return center(E, axis=axis, rescale=rescale)[0]
|
def mean0(E, axis=0, rescale=True):
"""Like `center`, but only return the anomalies (not the mean)."""
return center(E, axis=axis, rescale=rescale)[0]
|
57,803 |
def main():
"""
Intercept and execute commands.
"""
# IdentityNow API Base URL (https://org.api.identitynow.com)
base_url = demisto.params().get('identitynow_url')
# OAuth 2.0 Credentials
client_id = demisto.params().get('client_id')
client_secret = demisto.params().get('client_secret')
grant_type = 'client_credentials'
# Convert the argument to an int or set to MAX_INCIDENTS_TO_FETCH
max_results = int(demisto.params().get('max_fetch'))
if not max_results or max_results > MAX_INCIDENTS_TO_FETCH:
max_results = MAX_INCIDENTS_TO_FETCH
# first_fetch_str = demisto.params().get('first_fetch', '3 days')
# Other configs
verify_certificate = not demisto.params().get('insecure', False)
proxy = demisto.params().get('proxy', False)
request_timeout = 10
headers = get_headers(base_url, client_id, client_secret, grant_type)
client = Client(
base_url=base_url,
verify=verify_certificate,
proxy=proxy,
headers=headers,
max_results=max_results,
request_timeout=request_timeout)
demisto.debug(f'Command being called is {demisto.command()}')
try:
results = None
if demisto.command() == 'test-module':
# This is the call made when pressing the integration Test button.
results = test_connection(base_url, client_id, client_secret, grant_type)
elif demisto.command() == 'identitynow-search-identities':
query = demisto.args().get('query', None)
offset = int(demisto.args().get('offset', OFFSET_DEFAULT))
limit = int(demisto.args().get('limit', LIMIT_DEFAULT))
response = search(client, 'identities', query, offset, limit)
results = build_results('IdentityNow.Identity', 'id', response)
elif demisto.command() == 'identitynow-get-accounts':
id = demisto.args().get('id', None)
name = demisto.args().get('name', None)
native_identity = demisto.args().get('native_identity', None)
offset = int(demisto.args().get('offset', OFFSET_DEFAULT))
limit = int(demisto.args().get('limit', LIMIT_DEFAULT))
response = get_accounts(client, id, name, native_identity, offset, limit)
results = build_results('IdentityNow.Account', 'id', response)
elif demisto.command() == 'identitynow-get-accountactivities':
id = demisto.args().get('id', None)
requested_for = demisto.args().get('requested_for', None)
requested_by = demisto.args().get('requested_by', None)
regarding_identity = demisto.args().get('regarding_identity', None)
type = demisto.args().get('type', None)
offset = int(demisto.args().get('offset', OFFSET_DEFAULT))
limit = int(demisto.args().get('limit', LIMIT_DEFAULT))
response = get_account_activities(client, id, requested_for, requested_by, regarding_identity, type, offset,
limit)
results = build_results('IdentityNow.AccountActivity', 'id', response)
elif demisto.command() == 'identitynow-search-accessprofiles':
query = demisto.args().get('query', None)
offset = int(demisto.args().get('offset', OFFSET_DEFAULT))
limit = int(demisto.args().get('limit', LIMIT_DEFAULT))
response = search(client, 'accessprofiles', query, offset, limit)
results = build_results('IdentityNow.AccessProfile', 'id', response)
elif demisto.command() == 'identitynow-search-roles':
query = demisto.args().get('query', None)
offset = int(demisto.args().get('offset', OFFSET_DEFAULT))
limit = int(demisto.args().get('limit', LIMIT_DEFAULT))
response = search(client, 'roles', query, offset, limit)
results = build_results('IdentityNow.Role', 'id', response)
elif demisto.command() == 'identitynow-search-entitlements':
query = demisto.args().get('query', None)
offset = int(demisto.args().get('offset', OFFSET_DEFAULT))
limit = int(demisto.args().get('limit', LIMIT_DEFAULT))
response = search(client, 'entitlements', query, offset, limit)
results = build_results('IdentityNow.Entitlement', 'id', response)
elif demisto.command() == 'identitynow-search-events':
query = demisto.args().get('query', None)
offset = int(demisto.args().get('offset', OFFSET_DEFAULT))
limit = int(demisto.args().get('limit', LIMIT_DEFAULT))
response = search(client, 'events', query, offset, limit)
results = build_results('IdentityNow.Event', 'id', response)
elif demisto.command() == 'identitynow-request-grant':
requested_for = demisto.args().get('requested_for', None)
requested_item = demisto.args().get('requested_item', None)
requested_item_type = demisto.args().get('requested_item_type', None)
comment = demisto.args().get('comment', None)
results = access_request(client, "GRANT_ACCESS", requested_for, requested_item, requested_item_type,
comment)
elif demisto.command() == 'identitynow-request-revoke':
requested_for = demisto.args().get('requested_for', None)
requested_item = demisto.args().get('requested_item', None)
requested_item_type = demisto.args().get('requested_item_type', None)
comment = demisto.args().get('comment', None)
results = access_request(client, "REVOKE_ACCESS", requested_for, requested_item, requested_item_type,
comment)
return_results(results)
# Log exceptions and return errors
except Exception as e:
demisto.error(traceback.format_exc())
return_error(f'Failed to execute {demisto.command()} command.\nError:\n{str(e)}')
|
def main():
"""
Intercept and execute commands.
"""
# IdentityNow API Base URL (https://org.api.identitynow.com)
base_url = demisto.params().get('identitynow_url')
# OAuth 2.0 Credentials
client_id = demisto.params().get('client_id')
client_secret = demisto.params().get('client_secret')
grant_type = 'client_credentials'
# Convert the argument to an int or set to MAX_INCIDENTS_TO_FETCH
max_results = int(demisto.params().get('max_fetch'))
if not max_results or max_results > MAX_INCIDENTS_TO_FETCH:
max_results = MAX_INCIDENTS_TO_FETCH
# first_fetch_str = demisto.params().get('first_fetch', '3 days')
# Other configs
verify_certificate = not demisto.params().get('insecure', False)
proxy = demisto.params().get('proxy', False)
request_timeout = 10
headers = get_headers(base_url, client_id, client_secret, grant_type)
client = Client(
base_url=base_url,
verify=verify_certificate,
proxy=proxy,
headers=headers,
max_results=max_results,
request_timeout=request_timeout)
command = demisto.command()
demisto.debug(f'Command being called is {command}')
try:
results = None
if command == 'test-module':
# This is the call made when pressing the integration Test button.
results = test_connection(base_url, client_id, client_secret, grant_type)
elif demisto.command() == 'identitynow-search-identities':
query = demisto.args().get('query', None)
offset = int(demisto.args().get('offset', OFFSET_DEFAULT))
limit = int(demisto.args().get('limit', LIMIT_DEFAULT))
response = search(client, 'identities', query, offset, limit)
results = build_results('IdentityNow.Identity', 'id', response)
elif demisto.command() == 'identitynow-get-accounts':
id = demisto.args().get('id', None)
name = demisto.args().get('name', None)
native_identity = demisto.args().get('native_identity', None)
offset = int(demisto.args().get('offset', OFFSET_DEFAULT))
limit = int(demisto.args().get('limit', LIMIT_DEFAULT))
response = get_accounts(client, id, name, native_identity, offset, limit)
results = build_results('IdentityNow.Account', 'id', response)
elif demisto.command() == 'identitynow-get-accountactivities':
id = demisto.args().get('id', None)
requested_for = demisto.args().get('requested_for', None)
requested_by = demisto.args().get('requested_by', None)
regarding_identity = demisto.args().get('regarding_identity', None)
type = demisto.args().get('type', None)
offset = int(demisto.args().get('offset', OFFSET_DEFAULT))
limit = int(demisto.args().get('limit', LIMIT_DEFAULT))
response = get_account_activities(client, id, requested_for, requested_by, regarding_identity, type, offset,
limit)
results = build_results('IdentityNow.AccountActivity', 'id', response)
elif demisto.command() == 'identitynow-search-accessprofiles':
query = demisto.args().get('query', None)
offset = int(demisto.args().get('offset', OFFSET_DEFAULT))
limit = int(demisto.args().get('limit', LIMIT_DEFAULT))
response = search(client, 'accessprofiles', query, offset, limit)
results = build_results('IdentityNow.AccessProfile', 'id', response)
elif demisto.command() == 'identitynow-search-roles':
query = demisto.args().get('query', None)
offset = int(demisto.args().get('offset', OFFSET_DEFAULT))
limit = int(demisto.args().get('limit', LIMIT_DEFAULT))
response = search(client, 'roles', query, offset, limit)
results = build_results('IdentityNow.Role', 'id', response)
elif demisto.command() == 'identitynow-search-entitlements':
query = demisto.args().get('query', None)
offset = int(demisto.args().get('offset', OFFSET_DEFAULT))
limit = int(demisto.args().get('limit', LIMIT_DEFAULT))
response = search(client, 'entitlements', query, offset, limit)
results = build_results('IdentityNow.Entitlement', 'id', response)
elif demisto.command() == 'identitynow-search-events':
query = demisto.args().get('query', None)
offset = int(demisto.args().get('offset', OFFSET_DEFAULT))
limit = int(demisto.args().get('limit', LIMIT_DEFAULT))
response = search(client, 'events', query, offset, limit)
results = build_results('IdentityNow.Event', 'id', response)
elif demisto.command() == 'identitynow-request-grant':
requested_for = demisto.args().get('requested_for', None)
requested_item = demisto.args().get('requested_item', None)
requested_item_type = demisto.args().get('requested_item_type', None)
comment = demisto.args().get('comment', None)
results = access_request(client, "GRANT_ACCESS", requested_for, requested_item, requested_item_type,
comment)
elif demisto.command() == 'identitynow-request-revoke':
requested_for = demisto.args().get('requested_for', None)
requested_item = demisto.args().get('requested_item', None)
requested_item_type = demisto.args().get('requested_item_type', None)
comment = demisto.args().get('comment', None)
results = access_request(client, "REVOKE_ACCESS", requested_for, requested_item, requested_item_type,
comment)
return_results(results)
# Log exceptions and return errors
except Exception as e:
demisto.error(traceback.format_exc())
return_error(f'Failed to execute {demisto.command()} command.\nError:\n{str(e)}')
|
32,634 |
def main():
"""
PARSE AND VALIDATE INTEGRATION PARAMS
"""
secret_key = demisto.params().get('credentials').get('password')
client_key = demisto.params().get('credentials').get('identifier')
organisation_id = demisto.params().get('organization_id')
# get the service API url
base_url = demisto.params().get("api_url")
# How much time before the first fetch to retrieve incidents
proxy = demisto.params().get('proxy', False)
LOG(f'Command being called is {demisto.command()}')
try:
client = Client(
base_url=base_url,
organisation_id=organisation_id,
secret_key=secret_key,
client_key=client_key,
proxy=proxy)
commands = {
'test-module': test_module,
"umbrella-reporting-destination-list":
get_destinations_list,
"umbrella-reporting-category-list":
get_categories_list,
"umbrella-reporting-identity-list":
get_identities_list,
"umbrella-reporting-event-type-list":
get_event_types_list,
"umbrella-reporting-file-list":
get_file_list,
"umbrella-reporting-threat-list":
get_threat_list,
"umbrella-reporting-activity-list":
get_activity_list,
"umbrella-reporting-activity-get":
get_activity_by_traffic_type,
"umbrella-reporting-summary-list":
get_summary_list
}
args = demisto.args()
command = demisto.command()
if command in commands:
return_results(commands[command](client, args))
else:
raise NotImplementedError
# Log exceptions
except Exception as e:
return_error(
f'Failed to execute {demisto.command()} command. Error: {str(e)}')
|
def main():
"""
PARSE AND VALIDATE INTEGRATION PARAMS
"""
secret_key = demisto.params().get('credentials').get('password')
client_key = demisto.params().get('credentials').get('identifier')
organisation_id = demisto.params().get('organization_id')
# get the service API url
base_url = demisto.params().get("api_url")
# How much time before the first fetch to retrieve incidents
proxy = demisto.params().get('proxy', False)
LOG(f'Command being called is {demisto.command()}')
try:
client = Client(
base_url=base_url,
organisation_id=organisation_id,
secret_key=secret_key,
client_key=client_key,
proxy=proxy)
commands = {
'test-module': test_module,
"umbrella-reporting-destination-list":
get_destinations_list,
"umbrella-reporting-category-list":
get_categories_list,
"umbrella-reporting-identity-list":
get_identities_list,
"umbrella-reporting-event-type-list":
get_event_types_list,
"umbrella-reporting-file-list":
get_file_list,
"umbrella-reporting-threat-list":
get_threat_list,
"umbrella-reporting-activity-list":
get_activity_list,
"umbrella-reporting-activity-get":
get_activity_by_traffic_type,
"umbrella-reporting-summary-list":
get_summary_list
}
args = demisto.args()
command = demisto.command()
if command in commands:
return_results(commands[command](client, args))
else:
raise NotImplementedError
# Log exceptions
except Exception as e:
return_error(
f'Failed to execute {command} command. Error: {str(e)}')
|
33,120 |
def direct_obs_matrix(Nx, obs_inds):
"""Generate matrix that "picks" state elements `obs_inds` out of `range(Nx)`.
Parameters
----------
Nx: int
Number of total length of state vector
obs_inds: ndarray
The observed indices.
Returns
-------
H: ndarray
The observation matrix for direct partial observations.
"""
Ny = len(obs_inds)
H = np.zeros((Ny, Nx))
H[range(Ny), obs_inds] = 1
# One-liner:
# H = np.array([[i==j for i in range(M)] for j in jj],float)
return H
|
def direct_obs_matrix(Nx, obs_inds):
"""Generate matrix that "picks" state elements `obs_inds` out of `range(Nx)`.
Parameters
----------
Nx: int
Number of total length of state vector
obs_inds: ndarray
Indices of elements of the state vector that are (directly) observed.
Returns
-------
H: ndarray
The observation matrix for direct partial observations.
"""
Ny = len(obs_inds)
H = np.zeros((Ny, Nx))
H[range(Ny), obs_inds] = 1
# One-liner:
# H = np.array([[i==j for i in range(M)] for j in jj],float)
return H
|
30,634 |
def device_quarantine_command(client: Client, args: dict):
device_id = argToList(args.get('device_id'))
client.device_quarantine_request(device_id)
return 'The device has been quarantine'
|
def device_quarantine_command(client: Client, args: dict):
device_id = argToList(args.get('device_id'))
client.device_quarantine_request(device_id)
return f'The device {device_id} has been quarantined successfully.'
|
41,732 |
def load_study(
study_name, # type: str
storage, # type: Union[str, storages.BaseStorage]
sampler=None, # type: samplers.BaseSampler
pruner=None, # type: pruners.BasePruner
):
# type: (...) -> Study
"""Load the existing :class:`~optuna.study.Study` that has the specified name.
Args:
study_name:
Study's name. Each study has a unique name as an identifier.
storage:
Database URL such as ``sqlite:///example.db``. Please see also the documentation of
:func:`~optuna.study.create_study` for futhre details.
sampler:
A sampler object that implements background algorithm for value suggestion.
If :obj:`None` is specified, :class:`~optuna.samplers.TPESampler` is used
as the default. See also :class:`~optuna.samplers`.
pruner:
A pruner object that decides early stopping of unpromising trials.
If :obj:`None` is specified, :class:`~optuna.pruners.MedianPruner` is used
as the default. See also :class:`~optuna.pruners`.
"""
return Study(study_name=study_name, storage=storage, sampler=sampler, pruner=pruner)
|
def load_study(
study_name, # type: str
storage, # type: Union[str, storages.BaseStorage]
sampler=None, # type: samplers.BaseSampler
pruner=None, # type: pruners.BasePruner
):
# type: (...) -> Study
"""Load the existing :class:`~optuna.study.Study` that has the specified name.
Args:
study_name:
Study's name. Each study has a unique name as an identifier.
storage:
Database URL such as ``sqlite:///example.db``. Please see also the documentation of
:func:`~optuna.study.create_study` for further details.
sampler:
A sampler object that implements background algorithm for value suggestion.
If :obj:`None` is specified, :class:`~optuna.samplers.TPESampler` is used
as the default. See also :class:`~optuna.samplers`.
pruner:
A pruner object that decides early stopping of unpromising trials.
If :obj:`None` is specified, :class:`~optuna.pruners.MedianPruner` is used
as the default. See also :class:`~optuna.pruners`.
"""
return Study(study_name=study_name, storage=storage, sampler=sampler, pruner=pruner)
|
47,625 |
def anchor_inside_flags(flat_anchors, valid_flags, img_shape, allowed_border=0):
"""Check whether the anchors are inside the border.
Args:
flat_anchors (torch.Tensor): Flatten anchors, shape (n, 4).
valid_flags (torch.Tensor): An existing valid flags of anchors.
img_shape (tuple(int)): Shape of current image.
allowed_border (int, optional): The border to allow the valid anchor.
Defaults to 0.
Returns:
torch.Tensor: Flags indicating whether the anchors are inside a \
valid range.
"""
img_h, img_w = img_shape[:2]
if allowed_border >= 0:
inside_flags = (
valid_flags
& (flat_anchors[:, 0] >= -allowed_border)
& (flat_anchors[:, 1] >= -allowed_border)
& (flat_anchors[:, 2] < img_w + allowed_border)
& (flat_anchors[:, 3] < img_h + allowed_border)
)
else:
inside_flags = valid_flags
return inside_flags
|
def anchor_inside_flags(flat_anchors, valid_flags, img_shape, allowed_border=0):
"""Check whether the anchors are inside the border.
Args:
flat_anchors (torch.Tensor): Flattened anchors, shape (n, 4).
valid_flags (torch.Tensor): An existing valid flags of anchors.
img_shape (tuple(int)): Shape of current image.
allowed_border (int, optional): The border to allow the valid anchor.
Defaults to 0.
Returns:
torch.Tensor: Flags indicating whether the anchors are inside a \
valid range.
"""
img_h, img_w = img_shape[:2]
if allowed_border >= 0:
inside_flags = (
valid_flags
& (flat_anchors[:, 0] >= -allowed_border)
& (flat_anchors[:, 1] >= -allowed_border)
& (flat_anchors[:, 2] < img_w + allowed_border)
& (flat_anchors[:, 3] < img_h + allowed_border)
)
else:
inside_flags = valid_flags
return inside_flags
|
12,989 |
def create_product_variants(variants_data, create_images):
for variant in variants_data:
pk = variant["pk"]
defaults = variant["fields"]
defaults["weight"] = get_weight(defaults["weight"])
product_id = defaults.pop("product")
# We have not created products without images
if product_id not in IMAGES_MAPPING:
continue
defaults["product_id"] = product_id
set_field_as_money(defaults, "price_override")
set_field_as_money(defaults, "cost_price")
is_default_variant = defaults.pop("default", False)
variant, _ = ProductVariant.objects.update_or_create(pk=pk, defaults=defaults)
if is_default_variant:
product = variant.product
product.default_variant = variant
product.save(update_fields=["default_variant", "updated_at"])
if create_images:
image = variant.product.images.filter().first()
VariantImage.objects.create(variant=variant, image=image)
quantity = random.randint(100, 500)
create_stocks(variant, quantity=quantity)
|
def create_product_variants(variants_data, create_images):
for variant in variants_data:
pk = variant["pk"]
defaults = variant["fields"]
defaults["weight"] = get_weight(defaults["weight"])
product_id = defaults.pop("product")
# We have not created products without images
if product_id not in IMAGES_MAPPING:
continue
defaults["product_id"] = product_id
set_field_as_money(defaults, "price_override")
set_field_as_money(defaults, "cost_price")
is_default_variant = defaults.pop("default", False)
variant, _ = ProductVariant.objects.update_or_create(pk=pk, defaults=defaults)
if is_default_variant:
product = variant.product
product.default_variant = variant
product.save(update_fields=["default_variant", "updated_at"])
if create_images:
image = variant.product.images.first()
VariantImage.objects.create(variant=variant, image=image)
quantity = random.randint(100, 500)
create_stocks(variant, quantity=quantity)
|
31,595 |
def create_release_command():
args = demisto.args()
tag_name = args.get('tag_name')
data = {
'tag_name': tag_name,
'name': args.get('name'),
'body': args.get('body'),
'draft': argToBoolean(args.get('draft'))
}
response = http_request('POST', url_suffix=RELEASE_SUFFIX, data=data)
release_url = response.get('html_url')
outputs = {
'id': response.get('id'),
'draft': response.get('draft'),
'html_url': response.get('html_url'),
'url': response.get('url')
}
return_results(CommandResults(
outputs_prefix='GitHub.Release',
outputs=outputs,
outputs_key_field='id',
readable_output=f'Release {tag_name} created successfully for repo {REPOSITORY}: {release_url}',
raw_response=response
))
|
def create_release_command():
args = demisto.args()
tag_name = args.get('tag_name')
data = {
'tag_name': tag_name,
'name': args.get('name'),
'body': args.get('body'),
'draft': argToBoolean(args.get('draft')),
}
response = http_request('POST', url_suffix=RELEASE_SUFFIX, data=data)
release_url = response.get('html_url')
outputs = {
'id': response.get('id'),
'draft': response.get('draft'),
'html_url': response.get('html_url'),
'url': response.get('url')
}
return_results(CommandResults(
outputs_prefix='GitHub.Release',
outputs=outputs,
outputs_key_field='id',
readable_output=f'Release {tag_name} created successfully for repo {REPOSITORY}: {release_url}',
raw_response=response
))
|
10,090 |
def docker_images(args, image):
"""
:param args: CommonConfig
:param image: str
:rtype: list[dict[str, any]]
"""
try:
stdout, _dummy = docker_command(args, ['images', image, '--format', '{{json .}}'], capture=True, always=True)
except SubprocessError as ex:
if u'no such image' in repr(ex):
stdout = '' # podman does not handle this gracefully, exits 125
else:
raise ex
results = [json.loads(line) for line in stdout.splitlines()]
return results
|
def docker_images(args, image):
"""
:param args: CommonConfig
:param image: str
:rtype: list[dict[str, any]]
"""
try:
stdout, _dummy = docker_command(args, ['images', image, '--format', '{{json .}}'], capture=True, always=True)
except SubprocessError as ex:
if 'no such image' in ex.stderr:
stdout = '' # podman does not handle this gracefully, exits 125
else:
raise ex
results = [json.loads(line) for line in stdout.splitlines()]
return results
|
429 |
def update_patient(repeater, patient_uuid):
"""
Fetch patient from OpenMRS, submit case update for all mapped case
properties.
.. NOTE:: OpenMRS UUID must be saved to "external_id" case property
"""
if len(repeater.white_listed_case_types) != 1:
raise ConfigurationError(_(
f'{repeater.domain}: {repeater}: Error in settings: Unable to update '
f'patients from OpenMRS unless only one case type is specified.'
))
case_type = repeater.white_listed_case_types[0]
try:
patient = get_patient_by_uuid(repeater.requests, patient_uuid)
except (RequestException, ValueError) as err:
raise OpenmrsException(_(
f'{repeater.domain}: {repeater}: Error fetching Patient '
f'"{patient_uuid}": {err}'
)) from err
case, error = importer_util.lookup_case(
EXTERNAL_ID,
patient_uuid,
repeater.domain,
case_type=case_type,
)
if error == LookupErrors.NotFound:
default_owner: Optional[CommCareUser] = repeater.get_first_user()
case_block = get_addpatient_caseblock(case_type, default_owner, patient, repeater)
elif error == LookupErrors.MultipleResults:
# Multiple cases have been matched to the same patient.
# Could be caused by:
# * The cases were given the same identifier value. It could
# be user error, or case config assumed identifier was
# unique but it wasn't.
# * PatientFinder matched badly.
# * Race condition where a patient was previously added to
# both CommCare and OpenMRS.
raise DuplicateCaseMatch(_(
f'{repeater.domain}: {repeater}: More than one case found '
f'matching unique OpenMRS UUID. case external_id: "{patient_uuid}"'
))
else:
case_block = get_updatepatient_caseblock(case, patient, repeater)
if case_block:
submit_case_blocks(
[case_block.as_text()],
repeater.domain,
xmlns=XMLNS_OPENMRS,
device_id=OPENMRS_ATOM_FEED_DEVICE_ID + repeater.get_id,
)
|
def update_patient(repeater, patient_uuid):
"""
Fetch patient from OpenMRS, submit case update for all mapped case
properties.
.. NOTE:: OpenMRS UUID must be saved to "external_id" case property
"""
if len(repeater.white_listed_case_types) != 1:
raise ConfigurationError(_(
f'{repeater.domain}: {repeater}: Error in settings: Unable to update '
f'patients from OpenMRS unless only one case type is specified.'
))
case_type = repeater.white_listed_case_types[0]
try:
patient = get_patient_by_uuid(repeater.requests, patient_uuid)
except (RequestException, ValueError) as err:
raise OpenmrsException(_(
f'{repeater.domain}: {repeater}: Error fetching Patient '
f'{patient_uuid!r}: {err}'
)) from err
case, error = importer_util.lookup_case(
EXTERNAL_ID,
patient_uuid,
repeater.domain,
case_type=case_type,
)
if error == LookupErrors.NotFound:
default_owner: Optional[CommCareUser] = repeater.get_first_user()
case_block = get_addpatient_caseblock(case_type, default_owner, patient, repeater)
elif error == LookupErrors.MultipleResults:
# Multiple cases have been matched to the same patient.
# Could be caused by:
# * The cases were given the same identifier value. It could
# be user error, or case config assumed identifier was
# unique but it wasn't.
# * PatientFinder matched badly.
# * Race condition where a patient was previously added to
# both CommCare and OpenMRS.
raise DuplicateCaseMatch(_(
f'{repeater.domain}: {repeater}: More than one case found '
f'matching unique OpenMRS UUID. case external_id: "{patient_uuid}"'
))
else:
case_block = get_updatepatient_caseblock(case, patient, repeater)
if case_block:
submit_case_blocks(
[case_block.as_text()],
repeater.domain,
xmlns=XMLNS_OPENMRS,
device_id=OPENMRS_ATOM_FEED_DEVICE_ID + repeater.get_id,
)
|
25,272 |
def identity():
"""Creates a projection that does no transformation
on the map.
Returns:
Projection[any]: A projection that does no transformation.
"""
return _IdentityProjection()
|
def identity():
"""Creates a projection that does no transformation
on the map.
Returns:
Projection[hazelcast.core.MapEntry]: A projection that does no transformation.
"""
return _IdentityProjection()
|
52,776 |
def get_email_context(**kwargs):
from pretix.base.models import InvoiceAddress
event = kwargs['event']
if 'position' in kwargs:
kwargs.setdefault("position_or_address", kwargs['position'])
if 'order' in kwargs:
try:
kwargs['invoice_address'] = kwargs['order'].invoice_address
except InvoiceAddress.DoesNotExist:
kwargs['invoice_address'] = InvoiceAddress(order=kwargs['order'])
finally:
kwargs.setdefault("position_or_address", kwargs['invoice_address'])
ctx = {}
for r, val in register_mail_placeholders.send(sender=event):
if not isinstance(val, (list, tuple)):
val = [val]
for v in val:
if all(rp in kwargs for rp in v.required_context):
ctx[v.identifier] = v.render(kwargs)
print(v.identifier, v.render(kwargs))
return ctx
|
def get_email_context(**kwargs):
from pretix.base.models import InvoiceAddress
event = kwargs['event']
if 'position' in kwargs:
kwargs.setdefault("position_or_address", kwargs['position'])
if 'order' in kwargs:
try:
kwargs['invoice_address'] = kwargs['order'].invoice_address
except InvoiceAddress.DoesNotExist:
kwargs['invoice_address'] = InvoiceAddress(order=kwargs['order'])
finally:
kwargs.setdefault("position_or_address", kwargs['invoice_address'])
ctx = {}
for r, val in register_mail_placeholders.send(sender=event):
if not isinstance(val, (list, tuple)):
val = [val]
for v in val:
if all(rp in kwargs for rp in v.required_context):
ctx[v.identifier] = v.render(kwargs)
return ctx
|
8,507 |
def _group_or_org_create(context, data_dict, is_org=False):
model = context['model']
user = context['user']
session = context['session']
data_dict['is_organization'] = is_org
upload = uploader.get_uploader('group')
upload.update_data_dict(data_dict, 'image_url',
'image_upload', 'clear_upload')
# get the schema
group_type = data_dict.get('type', 'group')
group_plugin = lib_plugins.lookup_group_plugin(group_type)
try:
schema = group_plugin.form_to_db_schema_options({
'type': 'create', 'api': 'api_version' in context,
'context': context})
except AttributeError:
schema = group_plugin.form_to_db_schema()
if 'api_version' not in context:
# old plugins do not support passing the schema so we need
# to ensure they still work
try:
group_plugin.check_data_dict(data_dict, schema)
except TypeError:
group_plugin.check_data_dict(data_dict)
data, errors = lib_plugins.plugin_validate(
group_plugin, context, data_dict, schema,
'organization_create' if is_org else 'group_create')
log.debug('group_create validate_errs=%r user=%s group=%s data_dict=%r',
errors, context.get('user'), data_dict.get('name'), data_dict)
if errors:
session.rollback()
raise ValidationError(errors)
group = model_save.group_dict_save(data, context)
# Needed to let extensions know the group id
session.flush()
if is_org:
plugin_type = plugins.IOrganizationController
else:
plugin_type = plugins.IGroupController
for item in plugins.PluginImplementations(plugin_type):
item.create(group)
if is_org:
activity_type = 'new organization'
else:
activity_type = 'new group'
user_id = model.User.by_name(six.ensure_text(user)).id
activity_dict = {
'user_id': user_id,
'object_id': group.id,
'activity_type': activity_type,
}
activity_dict['data'] = {
'group': ckan.lib.dictization.table_dictize(group, context)
}
activity_create_context = {
'model': model,
'user': user,
'defer_commit': True,
'ignore_auth': True,
'session': session
}
logic.get_action('activity_create')(activity_create_context, activity_dict)
upload.upload(uploader.get_max_image_size())
if not context.get('defer_commit'):
model.repo.commit()
context["group"] = group
context["id"] = group.id
# creator of group/org becomes an admin
# this needs to be after the repo.commit or else revisions break
member_dict = {
'id': group.id,
'object': user_id,
'object_type': 'user',
'capacity': 'admin',
}
member_create_context = {
'model': model,
'user': user,
'ignore_auth': True, # we are not a member of the group at this point
'session': session
}
logic.get_action('member_create')(member_create_context, member_dict)
log.debug('Created object %s' % group.name)
return_id_only = context.get('return_id_only', False)
action = 'organization_show' if is_org else 'group_show'
output = context['id'] if return_id_only \
else _get_action(action)(context, {'id': group.id})
return output
|
def _group_or_org_create(context, data_dict, is_org=False):
model = context['model']
user = context['user']
session = context['session']
data_dict['is_organization'] = is_org
upload = uploader.get_uploader('group')
upload.update_data_dict(data_dict, 'image_url',
'image_upload', 'clear_upload')
# get the schema
group_type = data_dict.get('type', 'organization' if is_org else 'group')
group_plugin = lib_plugins.lookup_group_plugin(group_type)
try:
schema = group_plugin.form_to_db_schema_options({
'type': 'create', 'api': 'api_version' in context,
'context': context})
except AttributeError:
schema = group_plugin.form_to_db_schema()
if 'api_version' not in context:
# old plugins do not support passing the schema so we need
# to ensure they still work
try:
group_plugin.check_data_dict(data_dict, schema)
except TypeError:
group_plugin.check_data_dict(data_dict)
data, errors = lib_plugins.plugin_validate(
group_plugin, context, data_dict, schema,
'organization_create' if is_org else 'group_create')
log.debug('group_create validate_errs=%r user=%s group=%s data_dict=%r',
errors, context.get('user'), data_dict.get('name'), data_dict)
if errors:
session.rollback()
raise ValidationError(errors)
group = model_save.group_dict_save(data, context)
# Needed to let extensions know the group id
session.flush()
if is_org:
plugin_type = plugins.IOrganizationController
else:
plugin_type = plugins.IGroupController
for item in plugins.PluginImplementations(plugin_type):
item.create(group)
if is_org:
activity_type = 'new organization'
else:
activity_type = 'new group'
user_id = model.User.by_name(six.ensure_text(user)).id
activity_dict = {
'user_id': user_id,
'object_id': group.id,
'activity_type': activity_type,
}
activity_dict['data'] = {
'group': ckan.lib.dictization.table_dictize(group, context)
}
activity_create_context = {
'model': model,
'user': user,
'defer_commit': True,
'ignore_auth': True,
'session': session
}
logic.get_action('activity_create')(activity_create_context, activity_dict)
upload.upload(uploader.get_max_image_size())
if not context.get('defer_commit'):
model.repo.commit()
context["group"] = group
context["id"] = group.id
# creator of group/org becomes an admin
# this needs to be after the repo.commit or else revisions break
member_dict = {
'id': group.id,
'object': user_id,
'object_type': 'user',
'capacity': 'admin',
}
member_create_context = {
'model': model,
'user': user,
'ignore_auth': True, # we are not a member of the group at this point
'session': session
}
logic.get_action('member_create')(member_create_context, member_dict)
log.debug('Created object %s' % group.name)
return_id_only = context.get('return_id_only', False)
action = 'organization_show' if is_org else 'group_show'
output = context['id'] if return_id_only \
else _get_action(action)(context, {'id': group.id})
return output
|
45,909 |
def gaussian_blur2d(input: torch.Tensor,
kernel_size: Tuple[int, int],
sigma: Tuple[float, float],
border_type: str = 'reflect',
separable: bool = True) -> torch.Tensor:
r"""Create an operator that blurs a tensor using a Gaussian filter.
.. image:: _static/img/gaussian_blur2d.png
The operator smooths the given tensor with a gaussian kernel by convolving
it to each channel. It supports batched operation.
Arguments:
input: the input tensor with shape :math:`(B,C,H,W)`.
kernel_size: the size of the kernel.
sigma: the standard deviation of the kernel.
border_type: the padding mode to be applied before convolving.
The expected modes are: ``'constant'``, ``'reflect'``,
``'replicate'`` or ``'circular'``. Default: ``'reflect'``.
separable: run as two 1d-convolutions
Returns:
the blurred tensor with shape :math:`(B, C, H, W)`.
.. note::
See a working example `here <https://kornia-tutorials.readthedocs.io/en/latest/
gaussian_blur.html>`__.
Examples:
>>> input = torch.rand(2, 4, 5, 5)
>>> output = gaussian_blur2d(input, (3, 3), (1.5, 1.5))
>>> output.shape
torch.Size([2, 4, 5, 5])
"""
if separable:
kernel_x: torch.Tensor = get_gaussian_kernel1d(kernel_size[1], sigma[1])
kernel_y: torch.Tensor = get_gaussian_kernel1d(kernel_size[0], sigma[0])
out = kornia.filters.separable_filter2d(input, kernel_x[None], kernel_y[None], border_type)
else:
kernel: torch.Tensor = torch.unsqueeze(get_gaussian_kernel2d(kernel_size, sigma), dim=0)
out = kornia.filter2d(input, kernel, border_type)
return out
|
def gaussian_blur2d(input: torch.Tensor,
kernel_size: Tuple[int, int],
sigma: Tuple[float, float],
border_type: str = 'reflect',
separable: bool = True) -> torch.Tensor:
r"""Create an operator that blurs a tensor using a Gaussian filter.
.. image:: _static/img/gaussian_blur2d.png
The operator smooths the given tensor with a gaussian kernel by convolving
it to each channel. It supports batched operation.
Arguments:
input: the input tensor with shape :math:`(B,C,H,W)`.
kernel_size: the size of the kernel.
sigma: the standard deviation of the kernel.
border_type: the padding mode to be applied before convolving.
The expected modes are: ``'constant'``, ``'reflect'``,
``'replicate'`` or ``'circular'``. Default: ``'reflect'``.
separable: run as composition of two 1d-convolutions.
Returns:
the blurred tensor with shape :math:`(B, C, H, W)`.
.. note::
See a working example `here <https://kornia-tutorials.readthedocs.io/en/latest/
gaussian_blur.html>`__.
Examples:
>>> input = torch.rand(2, 4, 5, 5)
>>> output = gaussian_blur2d(input, (3, 3), (1.5, 1.5))
>>> output.shape
torch.Size([2, 4, 5, 5])
"""
if separable:
kernel_x: torch.Tensor = get_gaussian_kernel1d(kernel_size[1], sigma[1])
kernel_y: torch.Tensor = get_gaussian_kernel1d(kernel_size[0], sigma[0])
out = kornia.filters.separable_filter2d(input, kernel_x[None], kernel_y[None], border_type)
else:
kernel: torch.Tensor = torch.unsqueeze(get_gaussian_kernel2d(kernel_size, sigma), dim=0)
out = kornia.filter2d(input, kernel, border_type)
return out
|
51,548 |
def setup_package():
import os
from datalad.utils import (
on_osx,
on_windows,
)
from datalad.tests import _TEMP_PATHS_GENERATED
if on_osx:
# enforce honoring TMPDIR (see gh-5307)
import tempfile
tempfile.tempdir = os.environ.get('TMPDIR', tempfile.gettempdir())
from datalad import consts
_test_states['env'] = {}
def set_envvar(v, val):
"""Memoize and then set env var"""
_test_states['env'][v] = os.environ.get(v, None)
os.environ[v] = val
_test_states['DATASETS_TOPURL'] = consts.DATASETS_TOPURL
consts.DATASETS_TOPURL = 'https://datasets-tests.datalad.org/'
set_envvar('DATALAD_DATASETS_TOPURL', consts.DATASETS_TOPURL)
from datalad.tests.utils import (
DEFAULT_BRANCH,
DEFAULT_REMOTE,
)
set_envvar("GIT_CONFIG_PARAMETERS",
"'init.defaultBranch={}' 'clone.defaultRemoteName={}'"
.format(DEFAULT_BRANCH, DEFAULT_REMOTE))
gitconfig = """\
[user]
name = DataLad Tester
email = test@example.com
[datalad "log"]
exc = 1
"""
from datalad.support.external_versions import external_versions
if external_versions['cmd:git'] < "2.32":
if on_osx or on_windows:
# This approach of changing HOME (or USERPROFILE respectively)
# has unintended side-effects on:
# 1. Windows, generally, as any subprocess will fail to retrieve
# correct standard locations for configs, caches, etc.
# 2. OSX, most noteably the system-level osxkeychain
# git-credential helper, which will lead to hanging git
# commands if it fails to find the keychain in its expected
# location under $HOME/Library/Keychain.
raise RuntimeError("Refuse to execute datalad tests with git < 2.32"
" on this system, as this may have unintended "
"side-effects.")
# To overcome pybuild overriding HOME but us possibly wanting our
# own HOME where we pre-setup git for testing (name, email)
if 'GIT_HOME' in os.environ:
set_envvar('HOME', os.environ['GIT_HOME'])
else:
# we setup our own new HOME, the BEST and HUGE one
from datalad.utils import make_tempfile
# TODO: split into a function + context manager
with make_tempfile(mkdir=True) as new_home:
pass
for v, val in get_home_envvars(new_home).items():
set_envvar(v, val)
if not os.path.exists(new_home):
os.makedirs(new_home)
with open(os.path.join(new_home, '.gitconfig'), 'w') as f:
f.write(gitconfig)
_TEMP_PATHS_GENERATED.append(new_home)
else:
from datalad.utils import make_tempfile
with make_tempfile(mkdir=True) as cfg_dir:
pass
os.makedirs(cfg_dir, exist_ok=True)
cfg_file = os.path.join(cfg_dir, '.gitconfig')
with open(cfg_file, 'w') as f:
f.write(gitconfig)
set_envvar('GIT_CONFIG_GLOBAL', cfg_file)
_TEMP_PATHS_GENERATED.append(cfg_dir)
# Re-load ConfigManager, since otherwise it won't consider global config
# from new $HOME (see gh-4153
cfg.reload(force=True)
from datalad.interface.common_cfg import compute_cfg_defaults
compute_cfg_defaults()
# datalad.locations.sockets has likely changed. Discard any cached values.
ssh_manager._socket_dir = None
# To overcome pybuild by default defining http{,s}_proxy we would need
# to define them to e.g. empty value so it wouldn't bother touching them.
# But then haskell libraries do not digest empty value nicely, so we just
# pop them out from the environment
for ev in ('http_proxy', 'https_proxy'):
if ev in os.environ and not (os.environ[ev]):
lgr.debug("Removing %s from the environment since it is empty", ev)
os.environ.pop(ev)
# During tests we allow for "insecure" access to local file:// and
# http://localhost URLs since all of them either generated as tests
# fixtures or cloned from trusted sources
from datalad.support.annexrepo import AnnexRepo
AnnexRepo._ALLOW_LOCAL_URLS = True
DATALAD_LOG_LEVEL = os.environ.get('DATALAD_LOG_LEVEL', None)
if DATALAD_LOG_LEVEL is None:
# very very silent. Tests introspecting logs should use
# swallow_logs(new_level=...)
_test_states['loglevel'] = lgr.getEffectiveLevel()
lgr.setLevel(100)
# And we should also set it within environ so underlying commands also stay silent
set_envvar('DATALAD_LOG_LEVEL', '100')
else:
# We are not overriding them, since explicitly were asked to have some log level
_test_states['loglevel'] = None
# Set to non-interactive UI
from datalad.ui import ui
_test_states['ui_backend'] = ui.backend
# obtain() since that one consults for the default value
ui.set_backend(cfg.obtain('datalad.tests.ui.backend'))
# Monkey patch nose so it does not ERROR out whenever code asks for fileno
# of the output. See https://github.com/nose-devs/nose/issues/6
from io import StringIO as OrigStringIO
class StringIO(OrigStringIO):
fileno = lambda self: 1
encoding = None
from nose.ext import dtcompat
from nose.plugins import capture, multiprocess, plugintest
dtcompat.StringIO = StringIO
capture.StringIO = StringIO
multiprocess.StringIO = StringIO
plugintest.StringIO = StringIO
# in order to avoid having to fiddle with rather uncommon
# file:// URLs in the tests, have a standard HTTP server
# that serves an 'httpserve' directory in the test HOME
# the URL will be available from datalad.test_http_server.url
from datalad.tests.utils import HTTPPath
import tempfile
global test_http_server
# Start the server only if not running already
# Relevant: we have test_misc.py:test_test which runs datalad.test but
# not doing teardown, so the original server might never get stopped
if test_http_server is None:
serve_path = tempfile.mkdtemp(
dir=cfg.get("datalad.tests.temp.dir"),
prefix='httpserve',
)
test_http_server = HTTPPath(serve_path)
test_http_server.start()
_TEMP_PATHS_GENERATED.append(serve_path)
if cfg.obtain('datalad.tests.setup.testrepos'):
lgr.debug("Pre-populating testrepos")
from datalad.tests.utils import with_testrepos
with_testrepos()(lambda repo: 1)()
|
def setup_package():
import os
from datalad.utils import (
on_osx,
on_windows,
)
from datalad.tests import _TEMP_PATHS_GENERATED
if on_osx:
# enforce honoring TMPDIR (see gh-5307)
import tempfile
tempfile.tempdir = os.environ.get('TMPDIR', tempfile.gettempdir())
from datalad import consts
_test_states['env'] = {}
def set_envvar(v, val):
"""Memoize and then set env var"""
_test_states['env'][v] = os.environ.get(v, None)
os.environ[v] = val
_test_states['DATASETS_TOPURL'] = consts.DATASETS_TOPURL
consts.DATASETS_TOPURL = 'https://datasets-tests.datalad.org/'
set_envvar('DATALAD_DATASETS_TOPURL', consts.DATASETS_TOPURL)
from datalad.tests.utils import (
DEFAULT_BRANCH,
DEFAULT_REMOTE,
)
set_envvar("GIT_CONFIG_PARAMETERS",
"'init.defaultBranch={}' 'clone.defaultRemoteName={}'"
.format(DEFAULT_BRANCH, DEFAULT_REMOTE))
gitconfig = """\
[user]
name = DataLad Tester
email = test@example.com
[datalad "log"]
exc = 1
"""
from datalad.support.external_versions import external_versions
if external_versions['cmd:git'] < "2.32":
if on_osx or on_windows:
# This approach of changing HOME (or USERPROFILE respectively)
# has unintended side-effects on:
# 1. Windows, generally, as any subprocess will fail to retrieve
# correct standard locations for configs, caches, etc.
# 2. OSX, most noteably the system-level osxkeychain
# git-credential helper, which will lead to hanging git
# commands if it fails to find the keychain in its expected
# location under $HOME/Library/Keychain.
raise RuntimeError("Refuse to execute datalad tests with git < 2.32 "
"on this system, as this may have unintended "
"side-effects.")
# To overcome pybuild overriding HOME but us possibly wanting our
# own HOME where we pre-setup git for testing (name, email)
if 'GIT_HOME' in os.environ:
set_envvar('HOME', os.environ['GIT_HOME'])
else:
# we setup our own new HOME, the BEST and HUGE one
from datalad.utils import make_tempfile
# TODO: split into a function + context manager
with make_tempfile(mkdir=True) as new_home:
pass
for v, val in get_home_envvars(new_home).items():
set_envvar(v, val)
if not os.path.exists(new_home):
os.makedirs(new_home)
with open(os.path.join(new_home, '.gitconfig'), 'w') as f:
f.write(gitconfig)
_TEMP_PATHS_GENERATED.append(new_home)
else:
from datalad.utils import make_tempfile
with make_tempfile(mkdir=True) as cfg_dir:
pass
os.makedirs(cfg_dir, exist_ok=True)
cfg_file = os.path.join(cfg_dir, '.gitconfig')
with open(cfg_file, 'w') as f:
f.write(gitconfig)
set_envvar('GIT_CONFIG_GLOBAL', cfg_file)
_TEMP_PATHS_GENERATED.append(cfg_dir)
# Re-load ConfigManager, since otherwise it won't consider global config
# from new $HOME (see gh-4153
cfg.reload(force=True)
from datalad.interface.common_cfg import compute_cfg_defaults
compute_cfg_defaults()
# datalad.locations.sockets has likely changed. Discard any cached values.
ssh_manager._socket_dir = None
# To overcome pybuild by default defining http{,s}_proxy we would need
# to define them to e.g. empty value so it wouldn't bother touching them.
# But then haskell libraries do not digest empty value nicely, so we just
# pop them out from the environment
for ev in ('http_proxy', 'https_proxy'):
if ev in os.environ and not (os.environ[ev]):
lgr.debug("Removing %s from the environment since it is empty", ev)
os.environ.pop(ev)
# During tests we allow for "insecure" access to local file:// and
# http://localhost URLs since all of them either generated as tests
# fixtures or cloned from trusted sources
from datalad.support.annexrepo import AnnexRepo
AnnexRepo._ALLOW_LOCAL_URLS = True
DATALAD_LOG_LEVEL = os.environ.get('DATALAD_LOG_LEVEL', None)
if DATALAD_LOG_LEVEL is None:
# very very silent. Tests introspecting logs should use
# swallow_logs(new_level=...)
_test_states['loglevel'] = lgr.getEffectiveLevel()
lgr.setLevel(100)
# And we should also set it within environ so underlying commands also stay silent
set_envvar('DATALAD_LOG_LEVEL', '100')
else:
# We are not overriding them, since explicitly were asked to have some log level
_test_states['loglevel'] = None
# Set to non-interactive UI
from datalad.ui import ui
_test_states['ui_backend'] = ui.backend
# obtain() since that one consults for the default value
ui.set_backend(cfg.obtain('datalad.tests.ui.backend'))
# Monkey patch nose so it does not ERROR out whenever code asks for fileno
# of the output. See https://github.com/nose-devs/nose/issues/6
from io import StringIO as OrigStringIO
class StringIO(OrigStringIO):
fileno = lambda self: 1
encoding = None
from nose.ext import dtcompat
from nose.plugins import capture, multiprocess, plugintest
dtcompat.StringIO = StringIO
capture.StringIO = StringIO
multiprocess.StringIO = StringIO
plugintest.StringIO = StringIO
# in order to avoid having to fiddle with rather uncommon
# file:// URLs in the tests, have a standard HTTP server
# that serves an 'httpserve' directory in the test HOME
# the URL will be available from datalad.test_http_server.url
from datalad.tests.utils import HTTPPath
import tempfile
global test_http_server
# Start the server only if not running already
# Relevant: we have test_misc.py:test_test which runs datalad.test but
# not doing teardown, so the original server might never get stopped
if test_http_server is None:
serve_path = tempfile.mkdtemp(
dir=cfg.get("datalad.tests.temp.dir"),
prefix='httpserve',
)
test_http_server = HTTPPath(serve_path)
test_http_server.start()
_TEMP_PATHS_GENERATED.append(serve_path)
if cfg.obtain('datalad.tests.setup.testrepos'):
lgr.debug("Pre-populating testrepos")
from datalad.tests.utils import with_testrepos
with_testrepos()(lambda repo: 1)()
|
31,034 |
def refreshtoken(access_token, refresh_token):
"""Refreshes Access Token"""
headers = {
"Authorization": "Bearer " + access_token,
"Content-Type": "application/json"
}
params = {
"refresh_token": refresh_token
}
r = requests.post(REFRESH_TOKEN_URL, json=params, headers=headers)
return r.json()['access_token'] if r.ok else None
|
def refreshtoken(access_token, refresh_token):
"""Refreshes Access Token"""
headers = {
"Authorization": "Bearer " + access_token,
"Content-Type": "application/json"
}
params = {
"refresh_token": refresh_token
}
r = requests.post(REFRESH_TOKEN_URL, json=params, headers=headers)
return r.json().get('access_token') if r.ok else None
|
34,899 |
def test_forward_all():
"""test All"""
np_data = np.random.choice([True, False], size=(5, 7, 11))
tf.reset_default_graph()
in_data = tf.placeholder(tf.bool, (5, 7, 11), name="in_data")
tf.reduce_all(in_data, name="all")
compare_tf_with_tvm([np_data], ['in_data:0'], 'all:0')
|
def test_forward_all():
"""Test the All operator."""
np_data = np.random.choice([True, False], size=(5, 7, 11))
tf.reset_default_graph()
in_data = tf.placeholder(tf.bool, (5, 7, 11), name="in_data")
tf.reduce_all(in_data, name="all")
compare_tf_with_tvm([np_data], ['in_data:0'], 'all:0')
|
28,997 |
def create_default_server_config_file(data_dir) -> None:
config_file_path = os.path.join(data_dir, SERVER_CONFIG_FILENAME)
if not os.path.isfile(config_file_path):
write_default_server_config_to_file(config_file_path)
return config_file_path
|
def create_default_server_config_file(data_dir: str) -> str:
config_file_path = os.path.join(data_dir, SERVER_CONFIG_FILENAME)
if not os.path.isfile(config_file_path):
write_default_server_config_to_file(config_file_path)
return config_file_path
|
54,980 |
def observable(me_table, init_term=0, mapping="jordan_wigner"):
r"""Builds the many-body observable whose expectation value can be
measured in PennyLane
This function can be used to build second-quantized operators in the basis
of single-particle states (e.g., HF states) and to transform them into
PennyLane observables. In general, single- and two-particle operators can be
expanded in a truncated set of orbitals that define an active space,
.. math::
\hat A = \sum_{\alpha \leq 2N_\mathrm{docc}} \langle \alpha \vert \hat{\mathcal{A}}
\vert \alpha \rangle ~ \hat{n}_\alpha +
\sum_{\alpha, \beta ~ \in ~ \mathrm{active~space}} \langle \alpha \vert \hat{\mathcal{A}}
\vert \beta \rangle ~ \hat{c}_\alpha^\dagger\hat{c}_\beta,
\hat B = \frac{1}{2} \left\{ \sum_{\alpha, \beta \leq 2N_\mathrm{docc}}
\langle \alpha, \beta \vert \hat{\mathcal{B}} \vert \beta, \alpha \rangle
~ \hat{n}_\alpha \hat{n}_\beta + \sum_{\alpha, \beta, \gamma, \delta ~
\in ~ \mathrm{active~space}} \langle \alpha, \beta \vert \hat{\mathcal{B}}
\vert \gamma, \delta \rangle ~ \hat{c}_{\alpha}^\dagger \hat{c}_{\beta}^\dagger
\hat{c}_{\gamma} \hat{c}_{\delta} \right\}.
In the latter equations :math:`N_\mathrm{docc}` denotes the doubly-occupied orbitals,
if any, not included in the active space and
:math:`\langle \alpha \vert \hat{\mathcal{A}} \vert \beta \rangle` and
:math:`\langle \alpha, \beta \vert\hat{\mathcal{B}} \vert \gamma, \delta \rangle`
are the matrix elements of the one- and two-particle operators
:math:`\hat{\mathcal{A}}` and :math:`\hat{\mathcal{B}}`, respectively.
The function utilizes tools of `OpenFermion <https://github.com/quantumlib/OpenFermion>`_
to buil the second-quantized operator and map it to basis of Pauli matrices via the
Jordan-Wigner or Bravyi-Kitaev transformation. Finally, the qubit operator is
converted to a a PennyLane observable by the function :func:`~.convert_observable`.
**Example**
>>> s2_me_table, init_term = get_s2_me('h2', './pyscf/sto-3g')
>>> s2_obs = observable(s2_me_table, init_term=init_term)
>>> print(type(s2_obs))
<class 'pennylane.vqe.vqe.Hamiltonian'>
>>> print(s2_obs)
(0.75) [I<Wires = [0]>]
+ (0.375) [Z<Wires = [1]>]
+ (-0.375) [Z<Wires = [0]> Z<Wires = [1]>]
+ (0.125) [Z<Wires = [0]> Z<Wires = [2]>]
+ (0.375) [Z<Wires = [0]>]
+ (-0.125) [Z<Wires = [0]> Z<Wires = [3]>]
+ (-0.125) [Z<Wires = [1]> Z<Wires = [2]>]
+ (0.125) [Z<Wires = [1]> Z<Wires = [3]>]
+ (0.375) [Z<Wires = [2]>]
+ (0.375) [Z<Wires = [3]>]
+ (-0.375) [Z<Wires = [2]> Z<Wires = [3]>]
+ (0.125) [Y<Wires = [0]> X<Wires = [1]> Y<Wires = [2]> X<Wires = [3]>]
+ (0.125) [Y<Wires = [0]> Y<Wires = [1]> X<Wires = [2]> X<Wires = [3]>]
+ (0.125) [Y<Wires = [0]> Y<Wires = [1]> Y<Wires = [2]> Y<Wires = [3]>]
+ (-0.125) [Y<Wires = [0]> X<Wires = [1]> X<Wires = [2]> Y<Wires = [3]>]
+ (-0.125) [X<Wires = [0]> Y<Wires = [1]> Y<Wires = [2]> X<Wires = [3]>]
+ (0.125) [X<Wires = [0]> X<Wires = [1]> X<Wires = [2]> X<Wires = [3]>]
+ (0.125) [X<Wires = [0]> X<Wires = [1]> Y<Wires = [2]> Y<Wires = [3]>]
+ (0.125) [X<Wires = [0]> Y<Wires = [1]> X<Wires = [2]> Y<Wires = [3]>]
Args:
me_table (array[float]): Numpy array with the table of matrix elements.
For a single-particle operator this array will have shape
``(me_table.shape[0], 3)`` with each row containing the indices
:math:`alpha`, :math:`beta` and the matrix element :math:`\alpha \vert
\hat{\mathcal{A}}\vert \beta \rangle`. For a two-particle operator this
array will have shape ``(me_table.shape[0], 5)`` with each row containing
the indices :math:`alpha`, :math:`beta`, :math:`gamma`, :math:`delta` and
the matrix elements :math:`\langle \alpha, \beta \vert \hat{\mathcal{B}}
\vert \gamma, \delta \rangle`.
init_term: the contribution of doubly-occupied orbitals, if any, or other quantity
required to initialize the many-body observable.
mapping (str): specifies the fermion-to-qubit mapping. Input values can
be ``'jordan_wigner'`` or ``'bravyi_kitaev'``.
Returns:
pennylane.Hamiltonian: the fermionic-to-qubit transformed observable
"""
if mapping.strip().lower() not in ("jordan_wigner", "bravyi_kitaev"):
raise TypeError(
"The '{}' transformation is not available. \n "
"Please set 'mapping' to 'jordan_wigner' or 'bravyi_kitaev'.".format(
mapping
)
)
sp_op_shape = (3,)
tp_op_shape = (5,)
for i_table in me_table:
if np.array(i_table).shape not in (sp_op_shape, tp_op_shape):
raise ValueError(
"expected entries of 'me_table' to be of shape (3,) or (5,) ; got {}".format(
np.array(i_table).shape
)
)
# Initialize the FermionOperator
mb_obs = FermionOperator() + FermionOperator("") * init_term
for i in me_table:
if i.shape == (5,):
# two-particle operator
mb_obs += FermionOperator(
((int(i[0]), 1), (int(i[1]), 1), (int(i[2]), 0), (int(i[3]), 0)), i[4]
)
elif i.shape == (3,):
# single-particle operator
mb_obs += FermionOperator(((int(i[0]), 1), (int(i[1]), 0)), i[2])
# Map the fermionic to a qubit operator measurable in PennyLane
if mapping.strip().lower() == "bravyi_kitaev":
return structure.convert_observable(bravyi_kitaev(mb_obs))
return structure.convert_observable(jordan_wigner(mb_obs))
|
def observable(me_table, init_term=0, mapping="jordan_wigner"):
r"""Builds the many-body observable whose expectation value can be
measured in PennyLane
This function can be used to build second-quantized operators in the basis
of single-particle states (e.g., HF states) and to transform them into
PennyLane observables. In general, single- and two-particle operators can be
expanded in a truncated set of orbitals that define an active space,
.. math::
\hat A = \sum_{\alpha \leq 2N_\mathrm{docc}} \langle \alpha \vert \hat{\mathcal{A}}
\vert \alpha \rangle ~ \hat{n}_\alpha +
\sum_{\alpha, \beta ~ \in ~ \mathrm{active~space}} \langle \alpha \vert \hat{\mathcal{A}}
\vert \beta \rangle ~ \hat{c}_\alpha^\dagger\hat{c}_\beta,
\hat B = \frac{1}{2} \left\{ \sum_{\alpha, \beta \leq 2N_\mathrm{docc}}
\langle \alpha, \beta \vert \hat{\mathcal{B}} \vert \beta, \alpha \rangle
~ \hat{n}_\alpha \hat{n}_\beta + \sum_{\alpha, \beta, \gamma, \delta ~
\in ~ \mathrm{active~space}} \langle \alpha, \beta \vert \hat{\mathcal{B}}
\vert \gamma, \delta \rangle ~ \hat{c}_{\alpha}^\dagger \hat{c}_{\beta}^\dagger
\hat{c}_{\gamma} \hat{c}_{\delta} \right\}.
In the latter equations :math:`N_\mathrm{docc}` denotes the doubly-occupied orbitals,
if any, not included in the active space and
:math:`\langle \alpha \vert \hat{\mathcal{A}} \vert \beta \rangle` and
:math:`\langle \alpha, \beta \vert\hat{\mathcal{B}} \vert \gamma, \delta \rangle`
are the matrix elements of the one- and two-particle operators
:math:`\hat{\mathcal{A}}` and :math:`\hat{\mathcal{B}}`, respectively.
The function utilizes tools of `OpenFermion <https://github.com/quantumlib/OpenFermion>`_
to buil the second-quantized operator and map it to basis of Pauli matrices via the
Jordan-Wigner or Bravyi-Kitaev transformation. Finally, the qubit operator is
converted to a a PennyLane observable by the function :func:`~.convert_observable`.
**Example**
>>> s2_me_table, init_term = get_s2_me('h2', './pyscf/sto-3g')
>>> s2_obs = observable(s2_me_table, init_term=init_term)
>>> print(type(s2_obs))
<class 'pennylane.vqe.vqe.Hamiltonian'>
>>> print(s2_obs)
(0.75) [I<Wires = [0]>]
+ (0.375) [Z<Wires = [1]>]
+ (-0.375) [Z<Wires = [0]> Z<Wires = [1]>]
+ (0.125) [Z<Wires = [0]> Z<Wires = [2]>]
+ (0.375) [Z<Wires = [0]>]
+ (-0.125) [Z<Wires = [0]> Z<Wires = [3]>]
+ (-0.125) [Z<Wires = [1]> Z<Wires = [2]>]
+ (0.125) [Z<Wires = [1]> Z<Wires = [3]>]
+ (0.375) [Z<Wires = [2]>]
+ (0.375) [Z<Wires = [3]>]
+ (-0.375) [Z<Wires = [2]> Z<Wires = [3]>]
+ (0.125) [Y<Wires = [0]> X<Wires = [1]> Y<Wires = [2]> X<Wires = [3]>]
+ (0.125) [Y<Wires = [0]> Y<Wires = [1]> X<Wires = [2]> X<Wires = [3]>]
+ (0.125) [Y<Wires = [0]> Y<Wires = [1]> Y<Wires = [2]> Y<Wires = [3]>]
+ (-0.125) [Y<Wires = [0]> X<Wires = [1]> X<Wires = [2]> Y<Wires = [3]>]
+ (-0.125) [X<Wires = [0]> Y<Wires = [1]> Y<Wires = [2]> X<Wires = [3]>]
+ (0.125) [X<Wires = [0]> X<Wires = [1]> X<Wires = [2]> X<Wires = [3]>]
+ (0.125) [X<Wires = [0]> X<Wires = [1]> Y<Wires = [2]> Y<Wires = [3]>]
+ (0.125) [X<Wires = [0]> Y<Wires = [1]> X<Wires = [2]> Y<Wires = [3]>]
Args:
me_table (array[float]): Numpy array with the table of matrix elements.
For a single-particle operator this array will have shape
``(me_table.shape[0], 3)`` with each row containing the indices
:math:`\alpha`, :math:`\beta` and the matrix element :math:`\langle \alpha \vert
\hat{\mathcal{A}}\vert \beta \rangle`. For a two-particle operator this
array will have shape ``(me_table.shape[0], 5)`` with each row containing
the indices :math:`\alpha`, :math:`\beta`, :math:`\gamma`, :math:`\delta` and
the matrix elements :math:`\langle \alpha, \beta \vert \hat{\mathcal{B}}
\vert \gamma, \delta \rangle`.
init_term: the contribution of doubly-occupied orbitals, if any, or other quantity
required to initialize the many-body observable.
mapping (str): specifies the fermion-to-qubit mapping. Input values can
be ``'jordan_wigner'`` or ``'bravyi_kitaev'``.
Returns:
pennylane.Hamiltonian: the fermionic-to-qubit transformed observable
"""
if mapping.strip().lower() not in ("jordan_wigner", "bravyi_kitaev"):
raise TypeError(
"The '{}' transformation is not available. \n "
"Please set 'mapping' to 'jordan_wigner' or 'bravyi_kitaev'.".format(
mapping
)
)
sp_op_shape = (3,)
tp_op_shape = (5,)
for i_table in me_table:
if np.array(i_table).shape not in (sp_op_shape, tp_op_shape):
raise ValueError(
"expected entries of 'me_table' to be of shape (3,) or (5,) ; got {}".format(
np.array(i_table).shape
)
)
# Initialize the FermionOperator
mb_obs = FermionOperator() + FermionOperator("") * init_term
for i in me_table:
if i.shape == (5,):
# two-particle operator
mb_obs += FermionOperator(
((int(i[0]), 1), (int(i[1]), 1), (int(i[2]), 0), (int(i[3]), 0)), i[4]
)
elif i.shape == (3,):
# single-particle operator
mb_obs += FermionOperator(((int(i[0]), 1), (int(i[1]), 0)), i[2])
# Map the fermionic to a qubit operator measurable in PennyLane
if mapping.strip().lower() == "bravyi_kitaev":
return structure.convert_observable(bravyi_kitaev(mb_obs))
return structure.convert_observable(jordan_wigner(mb_obs))
|
40,707 |
def precision_recall_curve_compute_fn(y_preds: Any, y_targets: Any):
try:
from sklearn.metrics import precision_recall_curve
except ImportError:
raise RuntimeError("This contrib module requires sklearn to be installed.")
y_true = y_targets.numpy()
y_pred = y_preds.numpy()
return precision_recall_curve(y_true, y_pred)
|
def precision_recall_curve_compute_fn(y_preds: torch.Tensor, y_targets: torch.Tensor):
try:
from sklearn.metrics import precision_recall_curve
except ImportError:
raise RuntimeError("This contrib module requires sklearn to be installed.")
y_true = y_targets.numpy()
y_pred = y_preds.numpy()
return precision_recall_curve(y_true, y_pred)
|
4,690 |
def main(path):
"""
Parameters
----------
path : str or None
The path to the font file. If None, use Matplotlib's default font.
"""
if path is None:
path = fm.findfont(fm.FontProperties()) # The default font.
font = FT2Font(path)
# A charmap is a mapping of "character codes" (in the sense of a character
# encoding, e.g. latin-1) to glyph indices (i.e. the internal storage table
# of the font face).
# In FreeType>=2.1, a Unicode charmap (i.e. mapping Unicode codepoints)
# is selected by default. Moreover, recent versions of FreeType will
# automatically synthesize such a charmap if the font does not include one
# (this behavior depends on the font format; for example it is present
# since FreeType 2.0 for Type 1 fonts but only since FreeType 2.8 for
# TrueType (actually, SFNT) fonts).
# The code below (specifically, the ``chr(char_code)`` call) assumes that
# we have indeed selected a Unicode charmap.
codes = font.get_charmap().items()
labelc = ["{:X}".format(i) for i in range(16)]
labelr = ["{:02X}".format(16 * i) for i in range(16)]
chars = [["" for c in range(16)] for r in range(16)]
non_8bit = []
for char_code, glyph_index in codes:
char = chr(char_code)
if char_code >= 256:
non_8bit.append((
str(glyph_index),
char,
unicodedata.name(
char,
f"{char_code:#x} ({font.get_glyph_name(glyph_index)})"),
))
continue
r, c = divmod(char_code, 16)
chars[r][c] = char
if non_8bit:
indices, *_ = zip(*non_8bit)
max_indices_len = max(map(len, indices))
print("The font face contains the following glyphs corresponding to "
"code points beyond 0xff:")
for index, char, name in non_8bit:
print(f"{index:>{max_indices_len}} {char} {name}")
ax = plt.figure(figsize=(8, 4), dpi=120).subplots()
ax.set_title(path)
ax.set_axis_off()
table = ax.table(
cellText=chars,
rowLabels=labelr,
colLabels=labelc,
rowColours=["palegreen"] * 16,
colColours=["palegreen"] * 16,
cellColours=[[".95" for c in range(16)] for r in range(16)],
cellLoc='center',
loc='upper left',
)
for key, cell in table.get_celld().items():
row, col = key
if row > 0 and col > -1: # Beware of table's idiosyncratic indexing...
cell.set_text_props(fontproperties=fm.FontProperties(fname=path))
plt.show()
|
def draw_font_table(path):
"""
Parameters
----------
path : str or None
The path to the font file. If None, use Matplotlib's default font.
"""
if path is None:
path = fm.findfont(fm.FontProperties()) # The default font.
font = FT2Font(path)
# A charmap is a mapping of "character codes" (in the sense of a character
# encoding, e.g. latin-1) to glyph indices (i.e. the internal storage table
# of the font face).
# In FreeType>=2.1, a Unicode charmap (i.e. mapping Unicode codepoints)
# is selected by default. Moreover, recent versions of FreeType will
# automatically synthesize such a charmap if the font does not include one
# (this behavior depends on the font format; for example it is present
# since FreeType 2.0 for Type 1 fonts but only since FreeType 2.8 for
# TrueType (actually, SFNT) fonts).
# The code below (specifically, the ``chr(char_code)`` call) assumes that
# we have indeed selected a Unicode charmap.
codes = font.get_charmap().items()
labelc = ["{:X}".format(i) for i in range(16)]
labelr = ["{:02X}".format(16 * i) for i in range(16)]
chars = [["" for c in range(16)] for r in range(16)]
non_8bit = []
for char_code, glyph_index in codes:
char = chr(char_code)
if char_code >= 256:
non_8bit.append((
str(glyph_index),
char,
unicodedata.name(
char,
f"{char_code:#x} ({font.get_glyph_name(glyph_index)})"),
))
continue
r, c = divmod(char_code, 16)
chars[r][c] = char
if non_8bit:
indices, *_ = zip(*non_8bit)
max_indices_len = max(map(len, indices))
print("The font face contains the following glyphs corresponding to "
"code points beyond 0xff:")
for index, char, name in non_8bit:
print(f"{index:>{max_indices_len}} {char} {name}")
ax = plt.figure(figsize=(8, 4), dpi=120).subplots()
ax.set_title(path)
ax.set_axis_off()
table = ax.table(
cellText=chars,
rowLabels=labelr,
colLabels=labelc,
rowColours=["palegreen"] * 16,
colColours=["palegreen"] * 16,
cellColours=[[".95" for c in range(16)] for r in range(16)],
cellLoc='center',
loc='upper left',
)
for key, cell in table.get_celld().items():
row, col = key
if row > 0 and col > -1: # Beware of table's idiosyncratic indexing...
cell.set_text_props(fontproperties=fm.FontProperties(fname=path))
plt.show()
|
58,320 |
def transform(curve, tf):
"""Transforms the curve by the homogeneous transformation matrix tf"""
def to_point(p):
return np.array([[p.real], [p.imag], [1.0]])
def to_vector(z):
return np.array([[z.real], [z.imag], [0.0]])
def to_complex(v):
return v.item(0) + 1j * v.item(1)
if isinstance(curve, Path):
return Path(*[transform(segment, tf) for segment in curve])
elif is_bezier_segment(curve):
return bpoints2bezier([to_complex(tf.dot(to_point(p)))
for p in curve.bpoints()])
elif isinstance(curve, Arc):
new_start = to_complex(tf.dot(to_point(curve.start)))
new_end = to_complex(tf.dot(to_point(curve.end)))
# Based on https://math.stackexchange.com/questions/2349726/compute-the-major-and-minor-axis-of-an-ellipse-after-linearly-transforming-it
rx2 = curve.radius.real ** 2
ry2 = curve.radius.imag ** 2
Q = np.array([[1/rx2, 0], [0, 1/ry2]])
invT = np.linalg.inv(tf[:2,:2])
D = invT.T @ Q @ invT
eigvals = np.linalg.eigvals(D)
rx = 1 / np.sqrt(eigvals[0])
ry = 1 / np.sqrt(eigvals[1])
new_radius = complex(rx, ry)
if new_radius.real == 0 or new_radius.imag == 0 :
return Line(new_start, new_end)
else :
return Arc(new_start, radius=new_radius, rotation=curve.rotation,
large_arc=curve.large_arc, sweep=curve.sweep, end=new_end)
else:
raise TypeError("Input `curve` should be a Path, Line, "
"QuadraticBezier, CubicBezier, or Arc object.")
|
def transform(curve, tf):
"""Transforms the curve by the homogeneous transformation matrix tf"""
def to_point(p):
return np.array([[p.real], [p.imag], [1.0]])
def to_vector(z):
return np.array([[z.real], [z.imag], [0.0]])
def to_complex(v):
return v.item(0) + 1j * v.item(1)
if isinstance(curve, Path):
return Path(*[transform(segment, tf) for segment in curve])
elif is_bezier_segment(curve):
return bpoints2bezier([to_complex(tf.dot(to_point(p)))
for p in curve.bpoints()])
elif isinstance(curve, Arc):
new_start = to_complex(tf.dot(to_point(curve.start)))
new_end = to_complex(tf.dot(to_point(curve.end)))
# Based on https://math.stackexchange.com/questions/2349726/compute-the-major-and-minor-axis-of-an-ellipse-after-linearly-transforming-it
rx2 = curve.radius.real ** 2
ry2 = curve.radius.imag ** 2
Q = np.array([[1/rx2, 0], [0, 1/ry2]])
invT = np.linalg.inv(tf[:2,:2])
D = invT.T @ Q @ invT
eigvals = np.linalg.eigvals(D)
rx = 1 / np.sqrt(eigvals[0])
ry = 1 / np.sqrt(eigvals[1])
new_radius = complex(rx, ry)
if new_radius.real == 0 or new_radius.imag == 0 :
return Line(new_start, new_end)
else :
return Arc(new_start, radius=new_radius, rotation=curve.rotation,
large_arc=curve.large_arc, sweep=curve.sweep, end=new_end,
autoscale_radius=False)
else:
raise TypeError("Input `curve` should be a Path, Line, "
"QuadraticBezier, CubicBezier, or Arc object.")
|
58,750 |
def from_tuple_type(ty, expr):
"""Convert an expression with the given type into a sequence of expressions.
Each expressions maps to a field of the tuple or nested tuples in linear
order.
Parameters
----------
ty: tvm.Type
The type to unpack.
expr: The expression from which to extract each sub-field.
Returns
-------
result: List[tvm.relay.Expr]
The list of sub-expressions.
"""
return _make.FromTupleType(ty, expr)
|
def from_tuple_type(ty, expr):
"""Convert an expression with the given type into a sequence of expressions.
Each expressions maps to a field of the tuple or nested tuples in linear
order.
Parameters
----------
ty: tvm.Type
The type to unpack.
expr: tvm.relay.Expr
The expression from which to extract each sub-field.
Returns
-------
result: List[tvm.relay.Expr]
The list of sub-expressions.
"""
return _make.FromTupleType(ty, expr)
|
37,107 |
def circuit_drawer(circuit,
scale=0.7,
filename=None,
style=None,
output=None,
interactive=False,
line_length=None,
plot_barriers=True,
reverse_bits=False,
justify=None,
vertical_compression='medium',
idle_wires=True,
with_layout=True,
fold=None):
"""Draw a quantum circuit to different formats (set by output parameter):
**text**: ASCII art TextDrawing that can be printed in the console.
**latex**: high-quality images compiled via latex.
**latex_source**: raw uncompiled latex output.
**matplotlib**: images with color rendered purely in Python.
Args:
circuit (QuantumCircuit): the quantum circuit to draw
scale (float): scale of image to draw (shrink if < 1)
filename (str): file path to save image to
style (dict or str): dictionary of style or file name of style file.
This option is only used by the ``mpl`` output type. If a str is
passed in that is the path to a json file which contains that will
be open, parsed, and then used just as the input dict. See:
:ref:`Style Dict Doc <style-dict-doc>` for more information on the
contents.
output (str): Select the output method to use for drawing the circuit.
Valid choices are ``text``, ``latex``, ``latex_source``, ``mpl``.
By default the `'text`' drawer is used unless a user config file
has an alternative backend set as the default. If the output kwarg
is set, that backend will always be used over the default in a user
config file.
interactive (bool): when set true show the circuit in a new window
(for `mpl` this depends on the matplotlib backend being used
supporting this). Note when used with either the `text` or the
`latex_source` output type this has no effect and will be silently
ignored.
line_length (int): Deprecated, see `fold` which superseceds this option.
Sets the length of the lines generated by `text` output type.
This useful when the drawing does not fit in the console. If None
(default), it will try to guess the console width using
``shutil.get_terminal_size()``. However, if you're running in
jupyter the default line length is set to 80 characters. If you
don't want pagination at all, set ``line_length=-1``.
reverse_bits (bool): When set to True reverse the bit order inside
registers for the output visualization.
plot_barriers (bool): Enable/disable drawing barriers in the output
circuit. Defaults to True.
justify (string): Options are ``left``, ``right`` or ``none``, if
anything else is supplied it defaults to left justified. It refers
to where gates should be placed in the output circuit if there is
an option. ``none`` results in each gate being placed in its own
column.
vertical_compression (string): ``high``, ``medium`` or ``low``. It
merges the lines generated by the ``text`` output so the drawing
will take less vertical room. Default is ``medium``. Only used by
the ``text`` output, will be silently ignored otherwise.
idle_wires (bool): Include idle wires (wires with no circuit elements)
in output visualization. Default is True.
with_layout (bool): Include layout information, with labels on the
physical layout.
fold (int): Sets pagination. It can be disabled using -1.
In `text`, sets the length of the lines. This useful when the
drawing does not fit in the console. If None (default), it will try
to guess the console width using ``shutil.get_terminal_size()``.
However, if running in jupyter, the default line length is set to
80 characters. In ``mpl`` is the amount of operations before
folding. Default is 25.
Returns:
:class:`PIL.Image` or :class:`matplotlib.figure` or :class:`str` or
:class:`TextDrawing`:
* `PIL.Image` (output='latex')
an in-memory representation of the image of the circuit diagram.
* `matplotlib.figure.Figure` (output='mpl')
a matplotlib figure object for the circuit diagram.
* `str` (output='latex_source')
The LaTeX source code for visualizing the circuit diagram.
* `TextDrawing` (output='text')
A drawing that can be printed as ascii art
Raises:
VisualizationError: when an invalid output method is selected
ImportError: when the output methods requieres non-installed libraries.
.. _style-dict-doc:
**Style Dict Details**
The style dict kwarg contains numerous options that define the style of the
output circuit visualization. The style dict is only used by the ``mpl``
output. The options available in the style dict are defined below:
Args:
textcolor (str): The color code to use for text. Defaults to
`'#000000'`
subtextcolor (str): The color code to use for subtext. Defaults to
`'#000000'`
linecolor (str): The color code to use for lines. Defaults to
`'#000000'`
creglinecolor (str): The color code to use for classical register
lines. Defaults to `'#778899'`
gatetextcolor (str): The color code to use for gate text. Defaults to
`'#000000'`
gatefacecolor (str): The color code to use for gates. Defaults to
`'#ffffff'`
barrierfacecolor (str): The color code to use for barriers. Defaults to
`'#bdbdbd'`
backgroundcolor (str): The color code to use for the background.
Defaults to `'#ffffff'`
fontsize (int): The font size to use for text. Defaults to 13
subfontsize (int): The font size to use for subtext. Defaults to 8
displaytext (dict): A dictionary of the text to use for each element
type in the output visualization. The default values are::
{
'id': 'id',
'u0': 'U_0',
'u1': 'U_1',
'u2': 'U_2',
'u3': 'U_3',
'x': 'X',
'y': 'Y',
'z': 'Z',
'h': 'H',
's': 'S',
'sdg': 'S^\\dagger',
't': 'T',
'tdg': 'T^\\dagger',
'rx': 'R_x',
'ry': 'R_y',
'rz': 'R_z',
'reset': '\\left|0\\right\\rangle'
}
You must specify all the necessary values if using this. There is
no provision for passing an incomplete dict in.
displaycolor (dict): The color codes to use for each circuit element.
The default values are::
{
'id': '#F0E442',
'u0': '#E7AB3B',
'u1': '#E7AB3B',
'u2': '#E7AB3B',
'u3': '#E7AB3B',
'x': '#58C698',
'y': '#58C698',
'z': '#58C698',
'h': '#70B7EB',
's': '#E0722D',
'sdg': '#E0722D',
't': '#E0722D',
'tdg': '#E0722D',
'rx': '#ffffff',
'ry': '#ffffff',
'rz': '#ffffff',
'reset': '#D188B4',
'target': '#70B7EB',
'meas': '#D188B4'
}
Also, just like `displaytext` there is no provision for an
incomplete dict passed in.
latexdrawerstyle (bool): When set to True enable latex mode which will
draw gates like the `latex` output modes.
usepiformat (bool): When set to True use radians for output
fold (int): The number of circuit elements to fold the circuit at.
Defaults to 20
cregbundle (bool): If set True bundle classical registers
showindex (bool): If set True draw an index.
compress (bool): If set True draw a compressed circuit
figwidth (int): The maximum width (in inches) for the output figure.
dpi (int): The DPI to use for the output image. Defaults to 150
margin (list): A list of margin values to adjust spacing around output
image. Takes a list of 4 ints: [x left, x right, y bottom, y top].
creglinestyle (str): The style of line to use for classical registers.
Choices are `'solid'`, `'doublet'`, or any valid matplotlib
`linestyle` kwarg value. Defaults to `doublet`
"""
image = None
config = user_config.get_config()
# Get default from config file else use text
default_output = 'text'
if config:
default_output = config.get('circuit_drawer', 'text')
if default_output == 'auto':
if _matplotlib.HAS_MATPLOTLIB:
default_output = 'mpl'
else:
default_output = 'text'
if output is None:
output = default_output
if output == 'text':
return _text_circuit_drawer(circuit, filename=filename,
line_length=line_length,
reverse_bits=reverse_bits,
plot_barriers=plot_barriers,
justify=justify,
vertical_compression=vertical_compression,
idle_wires=idle_wires,
with_layout=with_layout,
fold=fold)
elif output == 'latex':
image = _latex_circuit_drawer(circuit, scale=scale,
filename=filename, style=style,
plot_barriers=plot_barriers,
reverse_bits=reverse_bits,
justify=justify,
idle_wires=idle_wires,
with_layout=with_layout)
elif output == 'latex_source':
return _generate_latex_source(circuit,
filename=filename, scale=scale,
style=style,
plot_barriers=plot_barriers,
reverse_bits=reverse_bits,
justify=justify,
idle_wires=idle_wires,
with_layout=with_layout)
elif output == 'mpl':
image = _matplotlib_circuit_drawer(circuit, scale=scale,
filename=filename, style=style,
plot_barriers=plot_barriers,
reverse_bits=reverse_bits,
justify=justify,
idle_wires=idle_wires,
with_layout=with_layout,
fold=fold)
else:
raise exceptions.VisualizationError(
'Invalid output type %s selected. The only valid choices '
'are latex, latex_source, text, and mpl' % output)
if image and interactive:
image.show()
return image
|
def circuit_drawer(circuit,
scale=0.7,
filename=None,
style=None,
output=None,
interactive=False,
line_length=None,
plot_barriers=True,
reverse_bits=False,
justify=None,
vertical_compression='medium',
idle_wires=True,
with_layout=True,
fold=None):
"""Draw a quantum circuit to different formats (set by output parameter):
**text**: ASCII art TextDrawing that can be printed in the console.
**latex**: high-quality images compiled via latex.
**latex_source**: raw uncompiled latex output.
**matplotlib**: images with color rendered purely in Python.
Args:
circuit (QuantumCircuit): the quantum circuit to draw
scale (float): scale of image to draw (shrink if < 1)
filename (str): file path to save image to
style (dict or str): dictionary of style or file name of style file.
This option is only used by the ``mpl`` output type. If a str is
passed in that is the path to a json file which contains that will
be open, parsed, and then used just as the input dict. See:
:ref:`Style Dict Doc <style-dict-doc>` for more information on the
contents.
output (str): Select the output method to use for drawing the circuit.
Valid choices are ``text``, ``latex``, ``latex_source``, ``mpl``.
By default the `'text`' drawer is used unless a user config file
has an alternative backend set as the default. If the output kwarg
is set, that backend will always be used over the default in a user
config file.
interactive (bool): when set true show the circuit in a new window
(for `mpl` this depends on the matplotlib backend being used
supporting this). Note when used with either the `text` or the
`latex_source` output type this has no effect and will be silently
ignored.
line_length (int): Deprecated, see `fold` which superseceds this option.
Sets the length of the lines generated by `text` output type.
This useful when the drawing does not fit in the console. If None
(default), it will try to guess the console width using
``shutil.get_terminal_size()``. However, if you're running in
jupyter the default line length is set to 80 characters. If you
don't want pagination at all, set ``line_length=-1``.
reverse_bits (bool): When set to True reverse the bit order inside
registers for the output visualization.
plot_barriers (bool): Enable/disable drawing barriers in the output
circuit. Defaults to True.
justify (string): Options are ``left``, ``right`` or ``none``, if
anything else is supplied it defaults to left justified. It refers
to where gates should be placed in the output circuit if there is
an option. ``none`` results in each gate being placed in its own
column.
vertical_compression (string): ``high``, ``medium`` or ``low``. It
merges the lines generated by the ``text`` output so the drawing
will take less vertical room. Default is ``medium``. Only used by
the ``text`` output, will be silently ignored otherwise.
idle_wires (bool): Include idle wires (wires with no circuit elements)
in output visualization. Default is True.
with_layout (bool): Include layout information, with labels on the
physical layout.
fold (int): Sets pagination. It can be disabled using -1.
In `text`, sets the length of the lines. This useful when the
drawing does not fit in the console. If None (default), it will try
to guess the console width using ``shutil.get_terminal_size()``.
However, if running in jupyter, the default line length is set to
80 characters. In ``mpl`` is the amount of operations before
folding. Default is 25.
Returns:
:class:`PIL.Image` or :class:`matplotlib.figure` or :class:`str` or
:class:`TextDrawing`:
* `PIL.Image` (output='latex')
an in-memory representation of the image of the circuit diagram.
* `matplotlib.figure.Figure` (output='mpl')
a matplotlib figure object for the circuit diagram.
* `str` (output='latex_source')
The LaTeX source code for visualizing the circuit diagram.
* `TextDrawing` (output='text')
A drawing that can be printed as ascii art
Raises:
VisualizationError: when an invalid output method is selected
ImportError: when the output methods requires non-installed libraries.
.. _style-dict-doc:
**Style Dict Details**
The style dict kwarg contains numerous options that define the style of the
output circuit visualization. The style dict is only used by the ``mpl``
output. The options available in the style dict are defined below:
Args:
textcolor (str): The color code to use for text. Defaults to
`'#000000'`
subtextcolor (str): The color code to use for subtext. Defaults to
`'#000000'`
linecolor (str): The color code to use for lines. Defaults to
`'#000000'`
creglinecolor (str): The color code to use for classical register
lines. Defaults to `'#778899'`
gatetextcolor (str): The color code to use for gate text. Defaults to
`'#000000'`
gatefacecolor (str): The color code to use for gates. Defaults to
`'#ffffff'`
barrierfacecolor (str): The color code to use for barriers. Defaults to
`'#bdbdbd'`
backgroundcolor (str): The color code to use for the background.
Defaults to `'#ffffff'`
fontsize (int): The font size to use for text. Defaults to 13
subfontsize (int): The font size to use for subtext. Defaults to 8
displaytext (dict): A dictionary of the text to use for each element
type in the output visualization. The default values are::
{
'id': 'id',
'u0': 'U_0',
'u1': 'U_1',
'u2': 'U_2',
'u3': 'U_3',
'x': 'X',
'y': 'Y',
'z': 'Z',
'h': 'H',
's': 'S',
'sdg': 'S^\\dagger',
't': 'T',
'tdg': 'T^\\dagger',
'rx': 'R_x',
'ry': 'R_y',
'rz': 'R_z',
'reset': '\\left|0\\right\\rangle'
}
You must specify all the necessary values if using this. There is
no provision for passing an incomplete dict in.
displaycolor (dict): The color codes to use for each circuit element.
The default values are::
{
'id': '#F0E442',
'u0': '#E7AB3B',
'u1': '#E7AB3B',
'u2': '#E7AB3B',
'u3': '#E7AB3B',
'x': '#58C698',
'y': '#58C698',
'z': '#58C698',
'h': '#70B7EB',
's': '#E0722D',
'sdg': '#E0722D',
't': '#E0722D',
'tdg': '#E0722D',
'rx': '#ffffff',
'ry': '#ffffff',
'rz': '#ffffff',
'reset': '#D188B4',
'target': '#70B7EB',
'meas': '#D188B4'
}
Also, just like `displaytext` there is no provision for an
incomplete dict passed in.
latexdrawerstyle (bool): When set to True enable latex mode which will
draw gates like the `latex` output modes.
usepiformat (bool): When set to True use radians for output
fold (int): The number of circuit elements to fold the circuit at.
Defaults to 20
cregbundle (bool): If set True bundle classical registers
showindex (bool): If set True draw an index.
compress (bool): If set True draw a compressed circuit
figwidth (int): The maximum width (in inches) for the output figure.
dpi (int): The DPI to use for the output image. Defaults to 150
margin (list): A list of margin values to adjust spacing around output
image. Takes a list of 4 ints: [x left, x right, y bottom, y top].
creglinestyle (str): The style of line to use for classical registers.
Choices are `'solid'`, `'doublet'`, or any valid matplotlib
`linestyle` kwarg value. Defaults to `doublet`
"""
image = None
config = user_config.get_config()
# Get default from config file else use text
default_output = 'text'
if config:
default_output = config.get('circuit_drawer', 'text')
if default_output == 'auto':
if _matplotlib.HAS_MATPLOTLIB:
default_output = 'mpl'
else:
default_output = 'text'
if output is None:
output = default_output
if output == 'text':
return _text_circuit_drawer(circuit, filename=filename,
line_length=line_length,
reverse_bits=reverse_bits,
plot_barriers=plot_barriers,
justify=justify,
vertical_compression=vertical_compression,
idle_wires=idle_wires,
with_layout=with_layout,
fold=fold)
elif output == 'latex':
image = _latex_circuit_drawer(circuit, scale=scale,
filename=filename, style=style,
plot_barriers=plot_barriers,
reverse_bits=reverse_bits,
justify=justify,
idle_wires=idle_wires,
with_layout=with_layout)
elif output == 'latex_source':
return _generate_latex_source(circuit,
filename=filename, scale=scale,
style=style,
plot_barriers=plot_barriers,
reverse_bits=reverse_bits,
justify=justify,
idle_wires=idle_wires,
with_layout=with_layout)
elif output == 'mpl':
image = _matplotlib_circuit_drawer(circuit, scale=scale,
filename=filename, style=style,
plot_barriers=plot_barriers,
reverse_bits=reverse_bits,
justify=justify,
idle_wires=idle_wires,
with_layout=with_layout,
fold=fold)
else:
raise exceptions.VisualizationError(
'Invalid output type %s selected. The only valid choices '
'are latex, latex_source, text, and mpl' % output)
if image and interactive:
image.show()
return image
|
57,689 |
def http_request(method, url_suffix, json=None, wait=0, retries=0):
if method == 'GET':
headers = {} # type: Dict[str, str]
elif method == 'POST':
headers = {
'API-Key': APIKEY,
'Content-Type': 'application/json',
'Accept': 'application/json'
}
r = requests.request(
method,
BASE_URL + url_suffix,
data=json,
headers=headers,
verify=USE_SSL
)
if r.headers.get('X-Rate-Limit-Remaining') == '10':
return_warning(f'Your rate limit has reached to 10. The rate limit will reset at '
f'{r.headers.get("X-Rate-Limit-Reset")}')
if r.status_code != 200:
if r.status_code == 429:
if retries <= 0:
# Error in API call to URLScan.io [429] - Too Many Requests
return_error('API rate limit reached [%d] - %s.\nUse the retries and wait arguments when submitting '
'multiple URls' % (r.status_code, r.reason))
else:
time.sleep(wait)
return http_request(method, url_suffix, json, wait, retries - 1)
response_json = r.json()
error_description = response_json.get('description')
should_continue_on_blacklisted_urls = demisto.args().get('continue_on_blacklisted_urls')
if should_continue_on_blacklisted_urls and error_description == BLACKLISTED_URL_ERROR_MESSAGE:
response_json['url_is_blacklisted'] = True
requested_url = JSON.loads(json)['url']
blacklisted_message = 'The URL {} is blacklisted, no results will be returned for it.'.format(requested_url)
demisto.results(blacklisted_message)
return response_json
return_error('Error in API call to URLScan.io [%d] - %s' % (r.status_code, r.reason))
return r.json()
|
def http_request(method, url_suffix, json=None, wait=0, retries=0):
if method == 'GET':
headers = {} # type: Dict[str, str]
elif method == 'POST':
headers = {
'API-Key': APIKEY,
'Content-Type': 'application/json',
'Accept': 'application/json'
}
r = requests.request(
method,
BASE_URL + url_suffix,
data=json,
headers=headers,
verify=USE_SSL
)
rate_limit_remaining = int(r.headers.get('X-Rate-Limit-Remaining'), 99)
if rate_limit_remaining < 10:
return_warning(f'Your available rate limit remaining is {rate_limit_remaining} and is about to be exhausted. The rate limit will reset at '
f'{r.headers.get("X-Rate-Limit-Reset")}')
if r.status_code != 200:
if r.status_code == 429:
if retries <= 0:
# Error in API call to URLScan.io [429] - Too Many Requests
return_error('API rate limit reached [%d] - %s.\nUse the retries and wait arguments when submitting '
'multiple URls' % (r.status_code, r.reason))
else:
time.sleep(wait)
return http_request(method, url_suffix, json, wait, retries - 1)
response_json = r.json()
error_description = response_json.get('description')
should_continue_on_blacklisted_urls = demisto.args().get('continue_on_blacklisted_urls')
if should_continue_on_blacklisted_urls and error_description == BLACKLISTED_URL_ERROR_MESSAGE:
response_json['url_is_blacklisted'] = True
requested_url = JSON.loads(json)['url']
blacklisted_message = 'The URL {} is blacklisted, no results will be returned for it.'.format(requested_url)
demisto.results(blacklisted_message)
return response_json
return_error('Error in API call to URLScan.io [%d] - %s' % (r.status_code, r.reason))
return r.json()
|
13,912 |
def _find_excluded_ranges(
lines: List[Tuple[int, str]],
*,
warnings: _ExclusionRangeWarnings,
exclude_lines_by_pattern: Optional[str] = None,
exclude_branches_by_pattern: Optional[str] = None,
exclude_pattern_prefix: str,
) -> Callable[[int], bool]:
"""
Scan through all lines to find line ranges and branch ranges covered by exclusion markers.
Example:
>>> lines = [(11, '//PREFIX_EXCL_LINE'), (13, '//IGNORE_LINE'), (15, '//PREFIX_EXCL_START'), (18, '//PREFIX_EXCL_STOP'),
... (21, '//PREFIX_EXCL_BR_LINE'), (23, '//IGNORE_BR'), (25, '//PREFIX_EXCL_BR_START'), (28, '//PREFIX_EXCL_BR_STOP')]
>>> [exclude_line, exclude_branch] = _find_excluded_ranges(
... lines, warnings=..., exclude_lines_by_pattern = '.*IGNORE_LINE',
... exclude_branches_by_pattern = '.*IGNORE_BR', exclude_pattern_prefix='PREFIX')
>>> [lineno for lineno in range(30) if exclude_line(lineno)]
[11, 13, 15, 16, 17]
>>> [lineno for lineno in range(30) if exclude_branch(lineno)]
[21, 23, 25, 26, 27]
"""
exclude_lines_by_pattern_regex = None
if exclude_lines_by_pattern:
exclude_lines_by_pattern_regex = re.compile(exclude_lines_by_pattern)
exclude_branches_by_pattern_regex = None
if exclude_branches_by_pattern:
exclude_branches_by_pattern_regex = re.compile(exclude_branches_by_pattern)
# possibly overlapping half-open ranges that are excluded
exclude_line_ranges: List[Tuple[int, int]] = []
exclude_branch_ranges: List[Tuple[int, int]] = []
exclusion_stack_line = []
exclusion_stack_branch = []
for lineno, code in lines:
if _EXCLUDE_FLAG in code:
# process the exclusion marker
#
# header is a marker name like LCOV or GCOVR
#
# START flags are added to the exlusion stack
# STOP flags remove a marker from the exclusion stack
# line exclusion
excl_line_pattern = re.compile(
"(" + exclude_pattern_prefix + ")" + _EXCLUDE_LINE_PATTERN_POSTFIX
)
for header, flag in excl_line_pattern.findall(code):
if flag == "LINE":
if exclusion_stack_line:
warnings.line_after_start(
lineno, f"{header}_EXCL_LINE", exclusion_stack_line[-1][1]
)
else:
exclude_line_ranges.append((lineno, lineno + 1))
if flag == "START":
exclusion_stack_line.append((header, lineno))
elif flag == "STOP":
if not exclusion_stack_line:
warnings.stop_without_start(
lineno, f"{header}_EXCL_START", f"{header}_EXCL_STOP"
)
continue
start_header, start_lineno = exclusion_stack_line.pop()
if header != start_header:
warnings.mismatched_start_stop(
start_lineno,
f"{start_header}_EXCL_START",
lineno,
f"{header}_EXCL_STOP",
)
exclude_line_ranges.append((start_lineno, lineno))
else: # pragma: no cover
pass
# branche exclusion
excl_branch_pattern = re.compile(
"(" + exclude_pattern_prefix + ")" + _EXCLUDE_BRANCH_PATTERN_POSTFIX
)
for header, flag in excl_branch_pattern.findall(code):
if flag == "LINE":
if exclusion_stack_branch:
warnings.branch_after_start(
lineno, f"{header}_EXCL_LINE", exclusion_stack_branch[-1][1]
)
else:
exclude_branch_ranges.append((lineno, lineno + 1))
if flag == "START":
exclusion_stack_branch.append((header, lineno))
elif flag == "STOP":
if not exclusion_stack_branch:
warnings.stop_without_start(
lineno, f"{header}_EXCL_START", f"{header}_EXCL_STOP"
)
continue
start_header, start_lineno = exclusion_stack_branch.pop()
if header != start_header:
warnings.mismatched_start_stop(
start_lineno,
f"{start_header}_EXCL_START",
lineno,
f"{header}_EXCL_STOP",
)
exclude_branch_ranges.append((start_lineno, lineno))
else: # pragma: no cover
pass
if exclude_lines_by_pattern_regex:
if exclude_lines_by_pattern_regex.match(code):
exclude_line_ranges.append((lineno, lineno + 1))
if exclude_branches_by_pattern_regex:
if exclude_branches_by_pattern_regex.match(code):
exclude_branch_ranges.append((lineno, lineno + 1))
for header, lineno in exclusion_stack_line:
warnings.start_without_stop(
lineno, f"{header}_EXCL_START", f"{header}_EXCL_STOP"
)
for header, lineno in exclusion_stack_branch:
warnings.start_without_stop(
lineno, f"{header}_EXCL_START", f"{header}_EXCL_STOP"
)
return [
_make_is_in_any_range(exclude_line_ranges),
_make_is_in_any_range(exclude_branch_ranges),
]
|
def _find_excluded_ranges(
lines: List[Tuple[int, str]],
*,
warnings: _ExclusionRangeWarnings,
exclude_lines_by_pattern: Optional[str] = None,
exclude_branches_by_pattern: Optional[str] = None,
exclude_pattern_prefix: str,
) -> Tuple(Callable[[int], bool], Callable[[int], bool]):
"""
Scan through all lines to find line ranges and branch ranges covered by exclusion markers.
Example:
>>> lines = [(11, '//PREFIX_EXCL_LINE'), (13, '//IGNORE_LINE'), (15, '//PREFIX_EXCL_START'), (18, '//PREFIX_EXCL_STOP'),
... (21, '//PREFIX_EXCL_BR_LINE'), (23, '//IGNORE_BR'), (25, '//PREFIX_EXCL_BR_START'), (28, '//PREFIX_EXCL_BR_STOP')]
>>> [exclude_line, exclude_branch] = _find_excluded_ranges(
... lines, warnings=..., exclude_lines_by_pattern = '.*IGNORE_LINE',
... exclude_branches_by_pattern = '.*IGNORE_BR', exclude_pattern_prefix='PREFIX')
>>> [lineno for lineno in range(30) if exclude_line(lineno)]
[11, 13, 15, 16, 17]
>>> [lineno for lineno in range(30) if exclude_branch(lineno)]
[21, 23, 25, 26, 27]
"""
exclude_lines_by_pattern_regex = None
if exclude_lines_by_pattern:
exclude_lines_by_pattern_regex = re.compile(exclude_lines_by_pattern)
exclude_branches_by_pattern_regex = None
if exclude_branches_by_pattern:
exclude_branches_by_pattern_regex = re.compile(exclude_branches_by_pattern)
# possibly overlapping half-open ranges that are excluded
exclude_line_ranges: List[Tuple[int, int]] = []
exclude_branch_ranges: List[Tuple[int, int]] = []
exclusion_stack_line = []
exclusion_stack_branch = []
for lineno, code in lines:
if _EXCLUDE_FLAG in code:
# process the exclusion marker
#
# header is a marker name like LCOV or GCOVR
#
# START flags are added to the exlusion stack
# STOP flags remove a marker from the exclusion stack
# line exclusion
excl_line_pattern = re.compile(
"(" + exclude_pattern_prefix + ")" + _EXCLUDE_LINE_PATTERN_POSTFIX
)
for header, flag in excl_line_pattern.findall(code):
if flag == "LINE":
if exclusion_stack_line:
warnings.line_after_start(
lineno, f"{header}_EXCL_LINE", exclusion_stack_line[-1][1]
)
else:
exclude_line_ranges.append((lineno, lineno + 1))
if flag == "START":
exclusion_stack_line.append((header, lineno))
elif flag == "STOP":
if not exclusion_stack_line:
warnings.stop_without_start(
lineno, f"{header}_EXCL_START", f"{header}_EXCL_STOP"
)
continue
start_header, start_lineno = exclusion_stack_line.pop()
if header != start_header:
warnings.mismatched_start_stop(
start_lineno,
f"{start_header}_EXCL_START",
lineno,
f"{header}_EXCL_STOP",
)
exclude_line_ranges.append((start_lineno, lineno))
else: # pragma: no cover
pass
# branche exclusion
excl_branch_pattern = re.compile(
"(" + exclude_pattern_prefix + ")" + _EXCLUDE_BRANCH_PATTERN_POSTFIX
)
for header, flag in excl_branch_pattern.findall(code):
if flag == "LINE":
if exclusion_stack_branch:
warnings.branch_after_start(
lineno, f"{header}_EXCL_LINE", exclusion_stack_branch[-1][1]
)
else:
exclude_branch_ranges.append((lineno, lineno + 1))
if flag == "START":
exclusion_stack_branch.append((header, lineno))
elif flag == "STOP":
if not exclusion_stack_branch:
warnings.stop_without_start(
lineno, f"{header}_EXCL_START", f"{header}_EXCL_STOP"
)
continue
start_header, start_lineno = exclusion_stack_branch.pop()
if header != start_header:
warnings.mismatched_start_stop(
start_lineno,
f"{start_header}_EXCL_START",
lineno,
f"{header}_EXCL_STOP",
)
exclude_branch_ranges.append((start_lineno, lineno))
else: # pragma: no cover
pass
if exclude_lines_by_pattern_regex:
if exclude_lines_by_pattern_regex.match(code):
exclude_line_ranges.append((lineno, lineno + 1))
if exclude_branches_by_pattern_regex:
if exclude_branches_by_pattern_regex.match(code):
exclude_branch_ranges.append((lineno, lineno + 1))
for header, lineno in exclusion_stack_line:
warnings.start_without_stop(
lineno, f"{header}_EXCL_START", f"{header}_EXCL_STOP"
)
for header, lineno in exclusion_stack_branch:
warnings.start_without_stop(
lineno, f"{header}_EXCL_START", f"{header}_EXCL_STOP"
)
return [
_make_is_in_any_range(exclude_line_ranges),
_make_is_in_any_range(exclude_branch_ranges),
]
|
27,979 |
def get_file_content_hash(file_path):
"""
Return the file content hash for a file.
"""
with open(file_path, 'rb') as content:
hasher = hashlib.sha256()
hasher.update(content.read())
return hasher.hexdigest()
|
def get_file_content_hash(file_path):
"""
Return the file content hash for a file.
"""
with open(file_path, 'rb', encoding='utf-8', errors='ignore') as content:
hasher = hashlib.sha256()
hasher.update(content.read())
return hasher.hexdigest()
|
31,695 |
def ioc_from_url_command(client: Client, args: Dict[str, Any]) -> List[CommandResults]:
"""
Returns the results of the Parse IOCs from URL API call
Args:
client: IOCParser client to use
args: All command arguments, ulr, limit and keys (if specified)
Returns:
CommandResults object containing the results of the parse from url as
returned from the API and its readable output
"""
url = args.get('url')
keys = argToList(args.get('keys'))
limit = args.get('limit')
if not keys:
keys = KEYS
keys = list_to_upper_case(keys)
if not url:
raise ValueError('url not specified')
response = client.ioc_from_url(url)
response_data = process_response(response, keys, limit)
command_results = []
outputs = {'url': url, 'Results': []}
for key, values in response_data.items():
for value in values:
outputs['Results'].append({'type': key, 'value': value})
for ioc_type, iocs in response_data.items():
command_results.append(CommandResults(
readable_output=tableToMarkdown(f'results for {ioc_type} from {url}', iocs, headers=ioc_type),
outputs_prefix=f'IOCParser.parseFromUrl',
outputs=outputs
))
command_results.append(CommandResults(
raw_response=response_data
))
return command_results
|
def ioc_from_url_command(client: Client, args: Dict[str, Any]) -> List[CommandResults]:
"""
Returns the results of the Parse IOCs from URL API call
Args:
client: IOCParser client to use
args: All command arguments, ulr, limit and keys (if specified)
Returns:
CommandResults object containing the results of the parse from url as
returned from the API and its readable output
"""
url = args.get('url')
keys = argToList(args.get('keys'))
limit = arg_to_number(args.get('limit'))
if not keys:
keys = KEYS
keys = list_to_upper_case(keys)
if not url:
raise ValueError('url not specified')
response = client.ioc_from_url(url)
response_data = process_response(response, keys, limit)
command_results = []
outputs = {'url': url, 'Results': []}
for key, values in response_data.items():
for value in values:
outputs['Results'].append({'type': key, 'value': value})
for ioc_type, iocs in response_data.items():
command_results.append(CommandResults(
readable_output=tableToMarkdown(f'results for {ioc_type} from {url}', iocs, headers=ioc_type),
outputs_prefix=f'IOCParser.parseFromUrl',
outputs=outputs
))
command_results.append(CommandResults(
raw_response=response_data
))
return command_results
|
18,203 |
def _commands(parser, args):
"""This is the 'regular' command, which can be multiple times.
See commands() below for --update-completion handling.
"""
formatter = formatters[args.format]
# check header first so we don't open out files unnecessarily
if args.header and not os.path.exists(args.header):
tty.die("No such file: '%s'" % args.header)
# if we're updating an existing file, only write output if a command
# or the header is newer than the file.
if args.update:
if os.path.exists(args.update):
files = [
spack.cmd.get_module(command).__file__.rstrip('c') # pyc -> py
for command in spack.cmd.all_commands()]
if args.header:
files.append(args.header)
last_update = os.path.getmtime(args.update)
if not any(os.path.getmtime(f) > last_update for f in files):
tty.msg('File is up to date: %s' % args.update)
return
tty.msg('Updating file: %s' % args.update)
with open(args.update, 'w') as f:
prepend_header(args, f)
formatter(args, f)
if args.update_completion:
fs.set_executable(args.update)
else:
prepend_header(args, sys.stdout)
formatter(args, sys.stdout)
|
def _commands(parser, args):
"""This is the 'regular' command, which can be multiple times.
See ``commands()`` below for ``--update-completion`` handling.
"""
formatter = formatters[args.format]
# check header first so we don't open out files unnecessarily
if args.header and not os.path.exists(args.header):
tty.die("No such file: '%s'" % args.header)
# if we're updating an existing file, only write output if a command
# or the header is newer than the file.
if args.update:
if os.path.exists(args.update):
files = [
spack.cmd.get_module(command).__file__.rstrip('c') # pyc -> py
for command in spack.cmd.all_commands()]
if args.header:
files.append(args.header)
last_update = os.path.getmtime(args.update)
if not any(os.path.getmtime(f) > last_update for f in files):
tty.msg('File is up to date: %s' % args.update)
return
tty.msg('Updating file: %s' % args.update)
with open(args.update, 'w') as f:
prepend_header(args, f)
formatter(args, f)
if args.update_completion:
fs.set_executable(args.update)
else:
prepend_header(args, sys.stdout)
formatter(args, sys.stdout)
|
48,980 |
def inspect_with_medical_record_number_custom_regex_detector(
project,
content_string,
):
"""Uses the Data Loss Prevention API to analyze string with medical record
number custom regex detector
Args:
project: The Google Cloud project id to use as a parent resource.
content_string: The string to inspect.
Returns:
None; the response from the API is printed to the terminal.
"""
# Import the client library.
import google.cloud.dlp
# Instantiate a client.
dlp = google.cloud.dlp_v2.DlpServiceClient()
# Construct a custom regex detector info type called "C_MRN",
# with ###-#-##### pattern, where each # represents a digit from 1 to 9.
# The detector has a detection likelihood of POSSIBLE.
custom_info_types = [
{
"info_type": {"name": "C_MRN"},
"regex": {"pattern": "[1-9]{3}-[1-9]{1}-[1-9]{5}"},
"likelihood": "POSSIBLE",
}
]
# Construct the configuration dictionary with the custom regex info type.
inspect_config = {
"custom_info_types": custom_info_types,
}
# Construct the `item`.
item = {"value": content_string}
# Convert the project id into a full resource id.
parent = dlp.project_path(project)
# Call the API.
response = dlp.inspect_content(parent, inspect_config, item)
# Print out the results.
if response.result.findings:
for finding in response.result.findings:
try:
if finding.quote:
print("Quote: {}".format(finding.quote))
except AttributeError:
pass
print("Info type: {}".format(finding.info_type.name))
print("Likelihood: {}".format(finding.likelihood))
else:
print("No findings.")
|
def inspect_with_medical_record_number_custom_regex_detector(
project,
content_string,
):
"""Uses the Data Loss Prevention API to analyze string with medical record
number custom regex detector
Args:
project: The Google Cloud project id to use as a parent resource.
content_string: The string to inspect.
Returns:
None; the response from the API is printed to the terminal.
"""
# Import the client library.
import google.cloud.dlp
# Instantiate a client.
dlp = google.cloud.dlp_v2.DlpServiceClient()
# Construct a custom regex detector info type called "C_MRN",
# with ###-#-##### pattern, where each # represents a digit from 1 to 9.
# The detector has a detection likelihood of POSSIBLE.
custom_info_types = [
{
"info_type": {"name": "C_MRN"},
"regex": {"pattern": "[1-9]{3}-[1-9]{1}-[1-9]{5}"},
"likelihood": "POSSIBLE",
}
]
# Construct the configuration dictionary with the custom regex info type.
inspect_config = {
"custom_info_types": custom_info_types,
}
# Construct the `item`.
item = {"value": content_string}
# Convert the project id into a full resource id.
parent = dlp.project_path(project)
# Call the API.
response = dlp.inspect_content(parent, inspect_config, item)
# Print out the results.
if response.result.findings:
for finding in response.result.findings:
try:
if finding.quote:
print(f"Quote: {finding.quote}")
except AttributeError:
pass
print(f"Info type: {finding.info_type.name}")
print(f"Likelihood: {finding.likelihood}")
else:
print("No findings.")
|
7,652 |
def get_contributions_for_person(event, person, onlyspeakers=False):
"""Get all contributions for an event person
If onlyspeakers is true, then only contributions where the person is speaker
are returned
"""
if onlyspeakers:
cl_join = db.and_(ContributionPersonLink.is_speaker,
ContributionPersonLink.person_id == person.id,
ContributionPersonLink.contribution_id == Contribution.id)
else:
cl_join = db.and_(ContributionPersonLink.person_id == person.id,
ContributionPersonLink.contribution_id == Contribution.id)
return (Contribution.query.filter(Contribution.event_id == event.id)
.filter(Contribution.is_deleted is not False)
.join(ContributionPersonLink, cl_join).all())
|
def get_contributions_for_person(event, person, onlyspeakers=False):
"""Get all contributions for an event person.
If onlyspeakers is true, then only contributions where the person is speaker
are returned
"""
if onlyspeakers:
cl_join = db.and_(ContributionPersonLink.is_speaker,
ContributionPersonLink.person_id == person.id,
ContributionPersonLink.contribution_id == Contribution.id)
else:
cl_join = db.and_(ContributionPersonLink.person_id == person.id,
ContributionPersonLink.contribution_id == Contribution.id)
return (Contribution.query.filter(Contribution.event_id == event.id)
.filter(Contribution.is_deleted is not False)
.join(ContributionPersonLink, cl_join).all())
|
19,738 |
def orgname_option_or_argument(*, required):
"Create decorator that allows org_name to be an option or an argument"
def decorator(func):
if required:
message = "One of ORGNAME (see above) or --org is required."
else:
message = "By default, runs against the current default org."
opt_version = click.option(
"--org",
callback=set_org_name(
False
), # never required because arg-version may be specified
expose_value=False,
help=f"Alternate way to specify the target org. {message}",
)
# "required" checking is handled in the callback because it has more context
# about whether its already seen it.
arg_version = click.argument(
"orgname",
required=False,
callback=set_org_name(required),
expose_value=False,
)
return arg_version(opt_version(func))
return decorator
|
def orgname_option_or_argument(*, required):
"""Create decorator that allows org_name to be an option or an argument"""
def decorator(func):
if required:
message = "One of ORGNAME (see above) or --org is required."
else:
message = "By default, runs against the current default org."
opt_version = click.option(
"--org",
callback=set_org_name(
False
), # never required because arg-version may be specified
expose_value=False,
help=f"Alternate way to specify the target org. {message}",
)
# "required" checking is handled in the callback because it has more context
# about whether its already seen it.
arg_version = click.argument(
"orgname",
required=False,
callback=set_org_name(required),
expose_value=False,
)
return arg_version(opt_version(func))
return decorator
|
11 |
def do_migrate(args):
if "bowler" not in sys.modules:
print("bowler can't be found, not mirating config file")
print("install it and try again")
sys.exit(1)
shutil.copyfile(args.config, args.config+BACKUP_SUFFIX)
for m in MIGRATIONS:
m(args.config).execute(interactive=args.interactive, write=True)
|
def do_migrate(args):
if "bowler" not in sys.modules:
print("bowler can't be found, not migrating config file")
print("install it and try again")
sys.exit(1)
shutil.copyfile(args.config, args.config+BACKUP_SUFFIX)
for m in MIGRATIONS:
m(args.config).execute(interactive=args.interactive, write=True)
|
12,725 |
def docker_build_failed(address: Address, context: DockerBuildContext, colors: bool) -> str | None:
if not context.copy_source_vs_context_source:
return None
msg = (
f"Docker build failed for `docker_image` {address}. The {context.dockerfile} have `COPY`"
"instructions where the source files may not have been found in the Docker build context."
"\n\n"
)
renames = [
format_rename_suggestion(src, dst, colors=colors)
for src, dst in context.copy_source_vs_context_source
if src and dst
]
if renames:
msg += (
f"However there are possible matches. Please review the following list of suggested "
f"renames:\n\n{bullet_list(renames)}\n\n"
)
unknown = [src for src, dst in context.copy_source_vs_context_source if not dst]
if unknown:
msg += (
f"The following files where not found in the Docker build context:\n\n"
f"{bullet_list(unknown)}\n\n"
)
unreferenced = [dst for src, dst in context.copy_source_vs_context_source if not src]
if unreferenced:
if len(unreferenced) > 10:
unreferenced = unreferenced[:9] + [f"... and {len(unreferenced)-9} more"]
msg += (
f"There are additional files in the Docker build context that were not referenced by "
f"any `COPY` instruction (this is not an error):\n\n{bullet_list(unreferenced)}\n\n"
)
return msg
|
def docker_build_failed(address: Address, context: DockerBuildContext, colors: bool) -> str | None:
if not context.copy_source_vs_context_source:
return None
msg = (
f"Docker build failed for `docker_image` {address}. The {context.dockerfile} have `COPY`"
"instructions where the source files may not have been found in the Docker build context."
"\n\n"
)
renames = [
format_rename_suggestion(src, dst, colors=colors)
for src, dst in context.copy_source_vs_context_source
if src and dst
]
if renames:
msg += (
f"However there are possible matches. Please review the following list of suggested "
f"renames:\n\n{bullet_list(renames)}\n\n"
)
unknown = [src for src, dst in context.copy_source_vs_context_source if not dst]
if unknown:
msg += (
f"The following files were not found in the Docker build context:\n\n"
f"{bullet_list(unknown)}\n\n"
)
unreferenced = [dst for src, dst in context.copy_source_vs_context_source if not src]
if unreferenced:
if len(unreferenced) > 10:
unreferenced = unreferenced[:9] + [f"... and {len(unreferenced)-9} more"]
msg += (
f"There are additional files in the Docker build context that were not referenced by "
f"any `COPY` instruction (this is not an error):\n\n{bullet_list(unreferenced)}\n\n"
)
return msg
|
31,848 |
def get_report(client: Client, report_id):
"""
Returns the report from ThreatStream sandbox by id.
"""
response = client.http_request("GET", F"v1/submit/{report_id}/report", resp_type='response')
if response.status_code == 404:
return f'No report found with id {report_id}'
report = response.json()
report_results = report.get('results', {})
if report_results:
info = parse_info(report_results.get('info', {}))
info['ReportID'] = report_id
_, info['Verdict'] = get_submission_status(client, report_id, False)
readable_output = tableToMarkdown(f'Report {report_id} analysis results', info)
# ignore 'networks' from the readable output
info['Network'] = parse_network_lists(report_results.get('network', {}))
return CommandResults(
outputs_prefix=f'{THREAT_STREAM}.Analysis',
outputs_key_field='ReportID',
outputs=info,
readable_output=readable_output,
raw_response=report
)
|
def get_report(client: Client, report_id):
"""
Returns the report from ThreatStream sandbox by id.
"""
response = client.http_request('GET', f'v1/submit/{report_id}/report', resp_type='response')
if response.status_code == 404:
return f'No report found with id {report_id}'
report = response.json()
report_results = report.get('results', {})
if report_results:
info = parse_info(report_results.get('info', {}))
info['ReportID'] = report_id
_, info['Verdict'] = get_submission_status(client, report_id, False)
readable_output = tableToMarkdown(f'Report {report_id} analysis results', info)
# ignore 'networks' from the readable output
info['Network'] = parse_network_lists(report_results.get('network', {}))
return CommandResults(
outputs_prefix=f'{THREAT_STREAM}.Analysis',
outputs_key_field='ReportID',
outputs=info,
readable_output=readable_output,
raw_response=report
)
|
50,568 |
def test_df_apply_returning_series(df):
# https://github.com/geopandas/geopandas/issues/2283
df.apply(lambda row: row.geometry, axis=1)
|
def test_df_apply_returning_series(df):
# https://github.com/geopandas/geopandas/issues/2283
result = df.apply(lambda row: row.geometry, axis=1)
assert_geoseries_equal(result, df.geometry, check_crs=False)
|
54,462 |
def get_all_study_summaries(storage: Union[str, storages.BaseStorage]) -> List[StudySummary]:
"""Get all history of studies stored in a specified storage.
Example:
.. testsetup::
import os
if os.path.exists("example.db"):
raise RuntimeError("'example.db' already exists. Please remove it.")
.. testcode::
import optuna
def objective(trial):
x = trial.suggest_float("x", -10, 10)
return (x - 2) ** 2
study = optuna.create_study(study_name="example-study", storage="sqlite:///example.db")
study.optimize(objective, n_trials=3)
study_sammary = optuna.study.get_all_study_summaries(storage="sqlite:///example.db")[0]
.. testcleanup::
os.remove("example.db")
Args:
storage:
Database URL such as ``sqlite:///example.db``. Please see also the documentation of
:func:`~optuna.study.create_study` for further details.
Returns:
List of study history summarized as :class:`~optuna.study.StudySummary` objects.
See also:
:func:`optuna.get_all_study_summaries` is an alias of
:func:`optuna.study.get_all_study_summaries`.
"""
storage = storages.get_storage(storage)
return storage.get_all_study_summaries()
|
def get_all_study_summaries(storage: Union[str, storages.BaseStorage]) -> List[StudySummary]:
"""Get all history of studies stored in a specified storage.
Example:
.. testsetup::
import os
if os.path.exists("example.db"):
raise RuntimeError("'example.db' already exists. Please remove it.")
.. testcode::
import optuna
def objective(trial):
x = trial.suggest_float("x", -10, 10)
return (x - 2) ** 2
study = optuna.create_study(study_name="example-study", storage="sqlite:///example.db")
study.optimize(objective, n_trials=3)
study_summaries = optuna.study.get_all_study_summaries(storage="sqlite:///example.db")
assert len(study_summaries) == 1
study_summary = study_summaries[0]
assert study_summary.study_name == "example-study"
.. testcleanup::
os.remove("example.db")
Args:
storage:
Database URL such as ``sqlite:///example.db``. Please see also the documentation of
:func:`~optuna.study.create_study` for further details.
Returns:
List of study history summarized as :class:`~optuna.study.StudySummary` objects.
See also:
:func:`optuna.get_all_study_summaries` is an alias of
:func:`optuna.study.get_all_study_summaries`.
"""
storage = storages.get_storage(storage)
return storage.get_all_study_summaries()
|
54,512 |
def test_plot_parallel_coordinate() -> None:
# Test with no trial.
study = create_study()
figure = plot_parallel_coordinate(study)
assert len(list(figure.get_figure().axes)) == 0 + 1
study = prepare_study_with_trials(with_c_d=False)
# Test with a trial.
figure = plot_parallel_coordinate(study)
assert len(list(figure.get_figure().axes)) == 3 + 1
fig = figure.get_figure()
assert fig.axes[1].get_ylabel() == "Objective Value"
assert fig.axes[1].get_ylim() == (0.0, 2.0)
assert len(figure.findobj(LineCollection)) == 1
assert figure.findobj(LineCollection)[0].get_array().tolist()[:-1] == [0.0, 2.0, 1.0]
assert fig.axes[2].get_ylim() == (1.0, 2.5)
assert [
fig.axes[2].get_lines()[0].get_ydata()[0],
fig.axes[2].get_lines()[0].get_ydata()[-1],
] == [1.0, 2.5]
assert fig.axes[3].get_ylim() == (0.0, 2.0)
assert fig.axes[3].get_lines()[0].get_ydata().tolist() == [2.0, 0.0, 1.0]
# Test with a trial to select parameter.
figure = plot_parallel_coordinate(study, params=["param_a"])
assert len(list(figure.get_figure().axes)) == 2 + 1
fig = figure.get_figure()
assert fig.axes[1].get_ylabel() == "Objective Value"
assert fig.axes[1].get_ylim() == (0.0, 2.0)
assert len(figure.findobj(LineCollection)) == 1
assert figure.findobj(LineCollection)[0].get_array().tolist()[:-1] == [0.0, 2.0, 1.0]
assert fig.axes[2].get_ylim() == (1.0, 2.5)
assert [
fig.axes[2].get_lines()[0].get_ydata()[0],
fig.axes[2].get_lines()[0].get_ydata()[-1],
] == [1.0, 2.5]
# Test with a customized target value.
with pytest.warns(UserWarning):
figure = plot_parallel_coordinate(
study, params=["param_a"], target=lambda t: t.params["param_b"]
)
assert len(list(figure.get_figure().axes)) == 2 + 1
fig = figure.get_figure()
assert fig.axes[1].get_ylabel() == "Objective Value"
assert fig.axes[1].get_ylim() == (0.0, 2.0)
assert len(figure.findobj(LineCollection)) == 1
assert figure.findobj(LineCollection)[0].get_array().tolist()[:-1] == [2.0, 0.0, 1.0]
assert fig.axes[2].get_ylim() == (1.0, 2.5)
assert [
fig.axes[2].get_lines()[0].get_ydata()[0],
fig.axes[2].get_lines()[0].get_ydata()[-1],
] == [1.0, 2.5]
# Test with a customized target name.
figure = plot_parallel_coordinate(study, target_name="Target Name")
assert len(list(figure.get_figure().axes)) == 3 + 1
fig = figure.get_figure()
assert fig.axes[1].get_ylabel() == "Target Name"
assert fig.axes[1].get_ylim() == (0.0, 2.0)
assert len(figure.findobj(LineCollection)) == 1
assert figure.findobj(LineCollection)[0].get_array().tolist()[:-1] == [0.0, 2.0, 1.0]
assert fig.axes[2].get_ylim() == (1.0, 2.5)
assert [
fig.axes[2].get_lines()[0].get_ydata()[0],
fig.axes[2].get_lines()[0].get_ydata()[-1],
] == [1.0, 2.5]
assert fig.axes[3].get_ylim() == (0.0, 2.0)
assert fig.axes[3].get_lines()[0].get_ydata().tolist() == [2.0, 0.0, 1.0]
# Test with wrong params that do not exist in trials
with pytest.raises(ValueError, match="Parameter optuna does not exist in your study."):
plot_parallel_coordinate(study, params=["optuna", "optuna"])
# Ignore failed trials.
def fail_objective(_: Trial) -> float:
raise ValueError
study = create_study()
study.optimize(fail_objective, n_trials=1, catch=(ValueError,))
figure = plot_parallel_coordinate(study)
assert len(figure.get_lines()) == 0
|
def test_plot_parallel_coordinate() -> None:
# Test with no trial.
study = create_study()
figure = plot_parallel_coordinate(study)
assert len(list(figure.get_figure().axes)) == 0 + 1
study = prepare_study_with_trials(with_c_d=False)
# Test with a trial.
figure = plot_parallel_coordinate(study)
assert len(list(figure.get_figure().axes)) == 3 + 1
axes = figure.get_figure().axes
assert len(axes) == 3 + 1
assert axes[1].get_ylabel() == "Objective Value"
assert axes[1].get_ylim() == (0.0, 2.0)
assert axes[2].get_ylim() == (1.0, 2.5)
assert [
axes[2].get_lines()[0].get_ydata()[0],
axes[2].get_lines()[0].get_ydata()[-1],
] == [1.0, 2.5]
assert axes[3].get_ylim() == (0.0, 2.0)
assert axes[3].get_lines()[0].get_ydata().tolist() == [2.0, 0.0, 1.0]
line_collections = figure.findobj(LineCollection)
assert len(line_collections) == 1
assert line_collections[0].get_array().tolist()[:-1] == [0.0, 2.0, 1.0]
xticklabels = axes[0].get_xticklabels()
assert xticklabels[0].get_text() == "Objective Value"
assert xticklabels[1].get_text() == "param_a"
assert xticklabels[2].get_text() == "param_b"
# Test with a trial to select parameter.
figure = plot_parallel_coordinate(study, params=["param_a"])
assert len(list(figure.get_figure().axes)) == 2 + 1
fig = figure.get_figure()
assert fig.axes[1].get_ylabel() == "Objective Value"
assert fig.axes[1].get_ylim() == (0.0, 2.0)
assert len(figure.findobj(LineCollection)) == 1
assert figure.findobj(LineCollection)[0].get_array().tolist()[:-1] == [0.0, 2.0, 1.0]
assert fig.axes[2].get_ylim() == (1.0, 2.5)
assert [
fig.axes[2].get_lines()[0].get_ydata()[0],
fig.axes[2].get_lines()[0].get_ydata()[-1],
] == [1.0, 2.5]
# Test with a customized target value.
with pytest.warns(UserWarning):
figure = plot_parallel_coordinate(
study, params=["param_a"], target=lambda t: t.params["param_b"]
)
assert len(list(figure.get_figure().axes)) == 2 + 1
fig = figure.get_figure()
assert fig.axes[1].get_ylabel() == "Objective Value"
assert fig.axes[1].get_ylim() == (0.0, 2.0)
assert len(figure.findobj(LineCollection)) == 1
assert figure.findobj(LineCollection)[0].get_array().tolist()[:-1] == [2.0, 0.0, 1.0]
assert fig.axes[2].get_ylim() == (1.0, 2.5)
assert [
fig.axes[2].get_lines()[0].get_ydata()[0],
fig.axes[2].get_lines()[0].get_ydata()[-1],
] == [1.0, 2.5]
# Test with a customized target name.
figure = plot_parallel_coordinate(study, target_name="Target Name")
assert len(list(figure.get_figure().axes)) == 3 + 1
fig = figure.get_figure()
assert fig.axes[1].get_ylabel() == "Target Name"
assert fig.axes[1].get_ylim() == (0.0, 2.0)
assert len(figure.findobj(LineCollection)) == 1
assert figure.findobj(LineCollection)[0].get_array().tolist()[:-1] == [0.0, 2.0, 1.0]
assert fig.axes[2].get_ylim() == (1.0, 2.5)
assert [
fig.axes[2].get_lines()[0].get_ydata()[0],
fig.axes[2].get_lines()[0].get_ydata()[-1],
] == [1.0, 2.5]
assert fig.axes[3].get_ylim() == (0.0, 2.0)
assert fig.axes[3].get_lines()[0].get_ydata().tolist() == [2.0, 0.0, 1.0]
# Test with wrong params that do not exist in trials
with pytest.raises(ValueError, match="Parameter optuna does not exist in your study."):
plot_parallel_coordinate(study, params=["optuna", "optuna"])
# Ignore failed trials.
def fail_objective(_: Trial) -> float:
raise ValueError
study = create_study()
study.optimize(fail_objective, n_trials=1, catch=(ValueError,))
figure = plot_parallel_coordinate(study)
assert len(figure.get_lines()) == 0
|
50,245 |
def get_user_from_access_payload(payload: dict) -> Optional[User]:
jwt_type = payload.get("type")
if jwt_type not in [JWT_ACCESS_TYPE, JWT_THIRDPARTY_ACCESS_TYPE]:
raise jwt.InvalidTokenError(
"Invalid token. Create new one by using tokenCreate mutation."
)
permissions = payload.get(PERMISSIONS_FIELD, None)
user = get_user_from_payload(payload)
if user:
if permissions is not None:
token_permissions = get_permissions_from_names(permissions)
token_codenames = [perm.codename for perm in token_permissions]
user.effective_permissions = get_permissions_from_codenames(token_codenames)
user.is_staff = True if user.effective_permissions else False
is_staff = payload.get("is_staff")
if is_staff:
user.is_staff = True
return user
|
def get_user_from_access_payload(payload: dict) -> Optional[User]:
jwt_type = payload.get("type")
if jwt_type not in [JWT_ACCESS_TYPE, JWT_THIRDPARTY_ACCESS_TYPE]:
raise jwt.InvalidTokenError(
"Invalid token. Create new one by using tokenCreate mutation."
)
permissions = payload.get(PERMISSIONS_FIELD, None)
user = get_user_from_payload(payload)
if user:
if permissions is not None:
token_permissions = get_permissions_from_names(permissions)
token_codenames = [perm.codename for perm in token_permissions]
user.effective_permissions = get_permissions_from_codenames(token_codenames)
user.is_staff = True if user.effective_permissions else False
if is_staff := payload.get("is_staff"):
user.is_staff = True
return user
|
27,442 |
def download_IERS_A(show_progress=True):
"""
Download and cache the IERS Bulletin A table.
If one is already cached, download a new one and overwrite the old. Store
table in the astropy cache, and undo the monkey patching done by
`~astroplan.get_IERS_A_or_workaround`.
Parameters
----------
show_progress : bool
`True` shows a progress bar during the download.
"""
urls = (conf.iers_auto_url, conf.iers_auto_url_mirror)
if IERS_A_in_cache():
for url in urls:
clear_download_cache(url)
for i, url in enumerate(urls):
try:
local_iers_a_path = download_file(iers.IERS_A_URL, cache=True,
show_progress=show_progress)
except urllib.error.URLError:
if i == len(urls) - 1:
raise
# Undo monkey patch set up by get_IERS_A_or_workaround
iers.IERS.iers_table = iers.IERS_A.open(local_iers_a_path)
Time._get_delta_ut1_utc = BACKUP_Time_get_delta_ut1_utc
|
def download_IERS_A(show_progress=True):
"""
Download and cache the IERS Bulletin A table.
If one is already cached, download a new one and overwrite the old. Store
table in the astropy cache, and undo the monkey patching done by
`~astroplan.get_IERS_A_or_workaround`.
Parameters
----------
show_progress : bool
`True` shows a progress bar during the download.
"""
urls = (conf.iers_auto_url, conf.iers_auto_url_mirror)
if IERS_A_in_cache():
for url in urls:
clear_download_cache(url)
for i, url in enumerate(urls):
try:
local_iers_a_path = download_file(url, cache=True,
show_progress=show_progress)
except urllib.error.URLError:
if i == len(urls) - 1:
raise
# Undo monkey patch set up by get_IERS_A_or_workaround
iers.IERS.iers_table = iers.IERS_A.open(local_iers_a_path)
Time._get_delta_ut1_utc = BACKUP_Time_get_delta_ut1_utc
|
6,455 |
def prepare_chart_data(item_data):
labels, qty_to_order, ordered_qty, received_qty, pending_qty = [], [], [], [], []
if len(item_data) > 30:
item_data = dict(list(item_data.items())[:30])
for row in item_data:
mr_row = item_data[row]
labels.append(row)
qty_to_order.append(mr_row["qty_to_order"])
ordered_qty.append(mr_row["ordered_qty"])
received_qty.append(mr_row["received_qty"])
pending_qty.append(mr_row["pending_qty"])
chart_data = {
"data" : {
"labels": labels,
"datasets": [
{
'name': _('Qty to Order'),
'values': qty_to_order
},
{
'name': _('Ordered Qty'),
'values': ordered_qty
},
{
'name': _('Received Qty'),
'values': received_qty
},
{
'name': _('Pending Qty'),
'values': pending_qty
}
]
},
"type": "bar",
"barOptions": {
"stacked": 1
},
}
return chart_data
|
def prepare_chart_data(item_data):
labels, qty_to_order, ordered_qty, received_qty, qty_to_receive = [], [], [], [], []
if len(item_data) > 30:
item_data = dict(list(item_data.items())[:30])
for row in item_data:
mr_row = item_data[row]
labels.append(row)
qty_to_order.append(mr_row["qty_to_order"])
ordered_qty.append(mr_row["ordered_qty"])
received_qty.append(mr_row["received_qty"])
pending_qty.append(mr_row["pending_qty"])
chart_data = {
"data" : {
"labels": labels,
"datasets": [
{
'name': _('Qty to Order'),
'values': qty_to_order
},
{
'name': _('Ordered Qty'),
'values': ordered_qty
},
{
'name': _('Received Qty'),
'values': received_qty
},
{
'name': _('Pending Qty'),
'values': pending_qty
}
]
},
"type": "bar",
"barOptions": {
"stacked": 1
},
}
return chart_data
|
58,679 |
def convert(args: argparse.Namespace):
output = Path(args.output[0])
if not os.path.exists(output):
print_error_and_exit(
f"The output path {output} doesn't exist. Please make sure to specify "
f"existing directory and try again."
)
return
for training_data_path in args.training_data:
if not os.path.exists(training_data_path):
print_error_and_exit(
f"The training data path {training_data_path} doesn't exist "
f"and will be skipped."
)
loop = asyncio.get_event_loop()
num_of_files_converted = 0
for file in os.listdir(training_data_path):
source_path = Path(training_data_path) / file
output_path = Path(output) / f"{source_path.stem}{CONVERTED_FILE_POSTFIX}"
if MarkdownReader.is_markdown_nlu_file(source_path):
convert_nlu(source_path, output_path, source_path)
num_of_files_converted += 1
elif MarkdownStoryReader.is_markdown_story_file(source_path):
loop.run_until_complete(
convert_core(source_path, output_path, source_path)
)
num_of_files_converted += 1
else:
print_warning(
f"Skipped file '{source_path}' since it's neither NLU "
"nor Core training data file."
)
print_info(f"Converted {num_of_files_converted} files, saved in '{output}'")
|
def convert(args: argparse.Namespace):
output = Path(args.output[0])
if not os.path.exists(output):
print_error_and_exit(
f"The output path {output} doesn't exist. Please make sure to specify "
f"an existing directory and try again."
)
return
for training_data_path in args.training_data:
if not os.path.exists(training_data_path):
print_error_and_exit(
f"The training data path {training_data_path} doesn't exist "
f"and will be skipped."
)
loop = asyncio.get_event_loop()
num_of_files_converted = 0
for file in os.listdir(training_data_path):
source_path = Path(training_data_path) / file
output_path = Path(output) / f"{source_path.stem}{CONVERTED_FILE_POSTFIX}"
if MarkdownReader.is_markdown_nlu_file(source_path):
convert_nlu(source_path, output_path, source_path)
num_of_files_converted += 1
elif MarkdownStoryReader.is_markdown_story_file(source_path):
loop.run_until_complete(
convert_core(source_path, output_path, source_path)
)
num_of_files_converted += 1
else:
print_warning(
f"Skipped file '{source_path}' since it's neither NLU "
"nor Core training data file."
)
print_info(f"Converted {num_of_files_converted} files, saved in '{output}'")
|
32,826 |
def pytest_configure(config):
if not os.getenv("CI") == "true":
return
# Write JUnit xml results to a file that contains this proceses PID
# This ensures running pytest multiple times does not overwrite previous results
# e.g. test-results/junit.xml -> test-results/junit.1797.xml
if config.option.xmlpath:
fname, ext = os.path.splitext(config.option.xmlpath)
# DEV: `ext` will contain the `.`, e.g. `.xml`
config.option.xmlpath = "{0}.{1}{2}".format(fname, os.getpid(), ext)
|
def pytest_configure(config):
if os.getenv("CI") != "true":
return
# Write JUnit xml results to a file that contains this proceses PID
# This ensures running pytest multiple times does not overwrite previous results
# e.g. test-results/junit.xml -> test-results/junit.1797.xml
if config.option.xmlpath:
fname, ext = os.path.splitext(config.option.xmlpath)
# DEV: `ext` will contain the `.`, e.g. `.xml`
config.option.xmlpath = "{0}.{1}{2}".format(fname, os.getpid(), ext)
|
31,449 |
def main():
install_logging('Prepare_Content_Packs_For_Testing.log')
option = option_handler()
packs_artifacts_path = option.artifacts_path
extract_destination_path = option.extract_path
storage_bucket_name = option.bucket_name
service_account = option.service_account
target_packs = option.pack_names if option.pack_names else ""
build_number = option.ci_build_number if option.ci_build_number else str(uuid.uuid4())
override_all_packs = option.override_all_packs
signature_key = option.key_string
id_set_path = option.id_set_path
packs_dependencies_mapping = load_json(option.pack_dependencies) if option.pack_dependencies else {}
storage_base_path = option.storage_base_path
remove_test_playbooks = option.remove_test_playbooks
is_bucket_upload_flow = option.bucket_upload
private_bucket_name = option.private_bucket_name
circle_branch = option.circle_branch
force_upload = option.force_upload
# google cloud storage client initialized
storage_client = init_storage_client(service_account)
storage_bucket = storage_client.bucket(storage_bucket_name)
# google cloud bigquery client initialized
bq_client = init_bigquery_client(service_account)
if storage_base_path:
GCPConfig.STORAGE_BASE_PATH = storage_base_path
# Relevant when triggering test upload flow
if storage_bucket_name:
GCPConfig.PRODUCTION_BUCKET = storage_bucket_name
# download and extract index from public bucket
index_folder_path, index_blob, index_generation = download_and_extract_index(storage_bucket,
extract_destination_path)
landing_page_sections = load_json(LANDING_PAGE_SECTIONS_PATH)
trending_packs = get_trending_packs(bq_client, index_folder_path)
# content repo client initialized
content_repo = get_content_git_client(CONTENT_ROOT_PATH)
current_commit_hash, previous_commit_hash = get_recent_commits_data(content_repo, index_folder_path,
is_bucket_upload_flow, circle_branch)
# detect packs to upload
pack_names = get_packs_names(target_packs, previous_commit_hash)
extract_packs_artifacts(packs_artifacts_path, extract_destination_path)
packs_list = [Pack(pack_name, os.path.join(extract_destination_path, pack_name)) for pack_name in pack_names
if os.path.exists(os.path.join(extract_destination_path, pack_name))]
diff_files_list = content_repo.commit(current_commit_hash).diff(content_repo.commit(previous_commit_hash))
# taking care of private packs
is_private_content_updated, private_packs, updated_private_packs_ids = handle_private_content(
index_folder_path, private_bucket_name, extract_destination_path, storage_client, pack_names
)
if not option.override_all_packs:
check_if_index_is_updated(index_folder_path, content_repo, current_commit_hash, previous_commit_hash,
storage_bucket, is_private_content_updated)
packs_statistic_df = get_packs_statistics_dataframe(bq_client)
# clean index and gcs from non existing or invalid packs
clean_non_existing_packs(index_folder_path, private_packs, storage_bucket)
# starting iteration over packs
for pack in packs_list:
task_status, user_metadata = pack.load_user_metadata()
if not task_status:
pack.status = PackStatus.FAILED_LOADING_USER_METADATA.value
pack.cleanup()
continue
task_status, pack_content_items = pack.collect_content_items()
if not task_status:
pack.status = PackStatus.FAILED_COLLECT_ITEMS.name
pack.cleanup()
continue
task_status, integration_images = pack.upload_integration_images(storage_bucket, diff_files_list, True)
if not task_status:
pack.status = PackStatus.FAILED_IMAGES_UPLOAD.name
pack.cleanup()
continue
task_status, author_image = pack.upload_author_image(storage_bucket, diff_files_list, True)
if not task_status:
pack.status = PackStatus.FAILED_AUTHOR_IMAGE_UPLOAD.name
pack.cleanup()
continue
task_status, modified_files_paths, pack_was_modified = pack.detect_modified(
content_repo, index_folder_path, current_commit_hash, previous_commit_hash)
if not task_status:
pack.status = PackStatus.FAILED_DETECTING_MODIFIED_FILES.name
pack.cleanup()
continue
task_status = pack.format_metadata(user_metadata=user_metadata, pack_content_items=pack_content_items,
integration_images=integration_images, author_image=author_image,
index_folder_path=index_folder_path,
packs_dependencies_mapping=packs_dependencies_mapping,
build_number=build_number, commit_hash=current_commit_hash,
packs_statistic_df=packs_statistic_df,
pack_was_modified=pack_was_modified,
landing_page_sections=landing_page_sections,
trending_packs=trending_packs)
if not task_status:
pack.status = PackStatus.FAILED_METADATA_PARSING.name
pack.cleanup()
continue
logging.info("Calling prepare release notes")
task_status, not_updated_build = pack.prepare_release_notes(index_folder_path, build_number, pack_was_modified,
modified_files_paths)
if not task_status:
pack.status = PackStatus.FAILED_RELEASE_NOTES.name
pack.cleanup()
continue
if not_updated_build:
pack.status = PackStatus.PACK_IS_NOT_UPDATED_IN_RUNNING_BUILD.name
pack.cleanup()
continue
task_status = pack.remove_unwanted_files(remove_test_playbooks)
if not task_status:
pack.status = PackStatus.FAILED_REMOVING_PACK_SKIPPED_FOLDERS
pack.cleanup()
continue
task_status = pack.sign_pack(signature_key)
if not task_status:
pack.status = PackStatus.FAILED_SIGNING_PACKS.name
pack.cleanup()
continue
task_status, zip_pack_path = pack.zip_pack()
if not task_status:
pack.status = PackStatus.FAILED_ZIPPING_PACK_ARTIFACTS.name
pack.cleanup()
continue
(task_status, skipped_pack_uploading, full_pack_path) = \
pack.upload_to_storage(zip_pack_path, pack.latest_version,
storage_bucket, override_all_packs
or pack_was_modified)
if not task_status:
pack.status = PackStatus.FAILED_UPLOADING_PACK.name
pack.cleanup()
continue
task_status, exists_in_index = pack.check_if_exists_in_index(index_folder_path)
if not task_status:
pack.status = PackStatus.FAILED_SEARCHING_PACK_IN_INDEX.name
pack.cleanup()
continue
task_status = pack.prepare_for_index_upload()
if not task_status:
pack.status = PackStatus.FAILED_PREPARING_INDEX_FOLDER.name
pack.cleanup()
continue
task_status = update_index_folder(index_folder_path=index_folder_path, pack_name=pack.name, pack_path=pack.path,
pack_version=pack.latest_version, hidden_pack=pack.hidden)
if not task_status:
pack.status = PackStatus.FAILED_UPDATING_INDEX_FOLDER.name
pack.cleanup()
continue
# in case that pack already exist at cloud storage path and in index, don't show that the pack was changed
if skipped_pack_uploading and exists_in_index:
pack.status = PackStatus.PACK_ALREADY_EXISTS.name
pack.cleanup()
continue
pack.status = PackStatus.SUCCESS.name
# upload core packs json to bucket
upload_core_packs_config(storage_bucket, build_number, index_folder_path)
# finished iteration over content packs
upload_index_to_storage(index_folder_path=index_folder_path, extract_destination_path=extract_destination_path,
index_blob=index_blob, build_number=build_number, private_packs=private_packs,
current_commit_hash=current_commit_hash, index_generation=index_generation,
force_upload=force_upload, previous_commit_hash=previous_commit_hash,
landing_page_sections=landing_page_sections)
# upload id_set.json to bucket
upload_id_set(storage_bucket, id_set_path)
# get the lists of packs divided by their status
successful_packs, skipped_packs, failed_packs = get_packs_summary(packs_list)
# Store successful and failed packs list in CircleCI artifacts - to be used in Upload Packs To Marketplace job
packs_results_file_path = os.path.join(os.path.dirname(packs_artifacts_path), BucketUploadFlow.PACKS_RESULTS_FILE)
store_successful_and_failed_packs_in_ci_artifacts(
packs_results_file_path, BucketUploadFlow.PREPARE_CONTENT_FOR_TESTING, successful_packs, failed_packs,
updated_private_packs_ids, images_data=get_images_data(packs_list)
)
# summary of packs status
print_packs_summary(successful_packs, skipped_packs, failed_packs, not is_bucket_upload_flow)
|
def main():
install_logging('Prepare_Content_Packs_For_Testing.log')
option = option_handler()
packs_artifacts_path = option.artifacts_path
extract_destination_path = option.extract_path
storage_bucket_name = option.bucket_name
service_account = option.service_account
target_packs = option.pack_names if option.pack_names else ""
build_number = option.ci_build_number if option.ci_build_number else str(uuid.uuid4())
override_all_packs = option.override_all_packs
signature_key = option.key_string
id_set_path = option.id_set_path
packs_dependencies_mapping = load_json(option.pack_dependencies) if option.pack_dependencies else {}
storage_base_path = option.storage_base_path
remove_test_playbooks = option.remove_test_playbooks
is_bucket_upload_flow = option.bucket_upload
private_bucket_name = option.private_bucket_name
circle_branch = option.circle_branch
force_upload = option.force_upload
# google cloud storage client initialized
storage_client = init_storage_client(service_account)
storage_bucket = storage_client.bucket(storage_bucket_name)
# google cloud bigquery client initialized
bq_client = init_bigquery_client(service_account)
if storage_base_path:
GCPConfig.STORAGE_BASE_PATH = storage_base_path
# Relevant when triggering test upload flow
if storage_bucket_name:
GCPConfig.PRODUCTION_BUCKET = storage_bucket_name
# download and extract index from public bucket
index_folder_path, index_blob, index_generation = download_and_extract_index(storage_bucket,
extract_destination_path)
landing_page_sections = load_json(LANDING_PAGE_SECTIONS_PATH)
trending_packs = get_trending_packs(bq_client, index_folder_path)
# content repo client initialized
content_repo = get_content_git_client(CONTENT_ROOT_PATH)
current_commit_hash, previous_commit_hash = get_recent_commits_data(content_repo, index_folder_path,
is_bucket_upload_flow, circle_branch)
# detect packs to upload
pack_names = get_packs_names(target_packs, previous_commit_hash)
extract_packs_artifacts(packs_artifacts_path, extract_destination_path)
packs_list = [Pack(pack_name, os.path.join(extract_destination_path, pack_name)) for pack_name in pack_names
if os.path.exists(os.path.join(extract_destination_path, pack_name))]
diff_files_list = content_repo.commit(current_commit_hash).diff(content_repo.commit(previous_commit_hash))
# taking care of private packs
is_private_content_updated, private_packs, updated_private_packs_ids = handle_private_content(
index_folder_path, private_bucket_name, extract_destination_path, storage_client, pack_names
)
if not option.override_all_packs:
check_if_index_is_updated(index_folder_path, content_repo, current_commit_hash, previous_commit_hash,
storage_bucket, is_private_content_updated)
packs_statistic_df = get_packs_statistics_dataframe(bq_client)
# clean index and gcs from non existing or invalid packs
clean_non_existing_packs(index_folder_path, private_packs, storage_bucket)
# starting iteration over packs
for pack in packs_list:
task_status, user_metadata = pack.load_user_metadata()
if not task_status:
pack.status = PackStatus.FAILED_LOADING_USER_METADATA.value
pack.cleanup()
continue
task_status, pack_content_items = pack.collect_content_items()
if not task_status:
pack.status = PackStatus.FAILED_COLLECT_ITEMS.name
pack.cleanup()
continue
task_status, integration_images = pack.upload_integration_images(storage_bucket, diff_files_list, True)
if not task_status:
pack.status = PackStatus.FAILED_IMAGES_UPLOAD.name
pack.cleanup()
continue
task_status, author_image = pack.upload_author_image(storage_bucket, diff_files_list, True)
if not task_status:
pack.status = PackStatus.FAILED_AUTHOR_IMAGE_UPLOAD.name
pack.cleanup()
continue
task_status, modified_files_paths, pack_was_modified = pack.detect_modified(
content_repo, index_folder_path, current_commit_hash, previous_commit_hash)
if not task_status:
pack.status = PackStatus.FAILED_DETECTING_MODIFIED_FILES.name
pack.cleanup()
continue
task_status = pack.format_metadata(user_metadata=user_metadata, pack_content_items=pack_content_items,
integration_images=integration_images, author_image=author_image,
index_folder_path=index_folder_path,
packs_dependencies_mapping=packs_dependencies_mapping,
build_number=build_number, commit_hash=current_commit_hash,
packs_statistic_df=packs_statistic_df,
pack_was_modified=pack_was_modified,
landing_page_sections=landing_page_sections,
trending_packs=trending_packs)
if not task_status:
pack.status = PackStatus.FAILED_METADATA_PARSING.name
pack.cleanup()
continue
logging.info("Calling prepare release notes")
task_status, not_updated_build = pack.prepare_release_notes(index_folder_path, build_number, pack_was_modified,
modified_pack_files_paths)
if not task_status:
pack.status = PackStatus.FAILED_RELEASE_NOTES.name
pack.cleanup()
continue
if not_updated_build:
pack.status = PackStatus.PACK_IS_NOT_UPDATED_IN_RUNNING_BUILD.name
pack.cleanup()
continue
task_status = pack.remove_unwanted_files(remove_test_playbooks)
if not task_status:
pack.status = PackStatus.FAILED_REMOVING_PACK_SKIPPED_FOLDERS
pack.cleanup()
continue
task_status = pack.sign_pack(signature_key)
if not task_status:
pack.status = PackStatus.FAILED_SIGNING_PACKS.name
pack.cleanup()
continue
task_status, zip_pack_path = pack.zip_pack()
if not task_status:
pack.status = PackStatus.FAILED_ZIPPING_PACK_ARTIFACTS.name
pack.cleanup()
continue
(task_status, skipped_pack_uploading, full_pack_path) = \
pack.upload_to_storage(zip_pack_path, pack.latest_version,
storage_bucket, override_all_packs
or pack_was_modified)
if not task_status:
pack.status = PackStatus.FAILED_UPLOADING_PACK.name
pack.cleanup()
continue
task_status, exists_in_index = pack.check_if_exists_in_index(index_folder_path)
if not task_status:
pack.status = PackStatus.FAILED_SEARCHING_PACK_IN_INDEX.name
pack.cleanup()
continue
task_status = pack.prepare_for_index_upload()
if not task_status:
pack.status = PackStatus.FAILED_PREPARING_INDEX_FOLDER.name
pack.cleanup()
continue
task_status = update_index_folder(index_folder_path=index_folder_path, pack_name=pack.name, pack_path=pack.path,
pack_version=pack.latest_version, hidden_pack=pack.hidden)
if not task_status:
pack.status = PackStatus.FAILED_UPDATING_INDEX_FOLDER.name
pack.cleanup()
continue
# in case that pack already exist at cloud storage path and in index, don't show that the pack was changed
if skipped_pack_uploading and exists_in_index:
pack.status = PackStatus.PACK_ALREADY_EXISTS.name
pack.cleanup()
continue
pack.status = PackStatus.SUCCESS.name
# upload core packs json to bucket
upload_core_packs_config(storage_bucket, build_number, index_folder_path)
# finished iteration over content packs
upload_index_to_storage(index_folder_path=index_folder_path, extract_destination_path=extract_destination_path,
index_blob=index_blob, build_number=build_number, private_packs=private_packs,
current_commit_hash=current_commit_hash, index_generation=index_generation,
force_upload=force_upload, previous_commit_hash=previous_commit_hash,
landing_page_sections=landing_page_sections)
# upload id_set.json to bucket
upload_id_set(storage_bucket, id_set_path)
# get the lists of packs divided by their status
successful_packs, skipped_packs, failed_packs = get_packs_summary(packs_list)
# Store successful and failed packs list in CircleCI artifacts - to be used in Upload Packs To Marketplace job
packs_results_file_path = os.path.join(os.path.dirname(packs_artifacts_path), BucketUploadFlow.PACKS_RESULTS_FILE)
store_successful_and_failed_packs_in_ci_artifacts(
packs_results_file_path, BucketUploadFlow.PREPARE_CONTENT_FOR_TESTING, successful_packs, failed_packs,
updated_private_packs_ids, images_data=get_images_data(packs_list)
)
# summary of packs status
print_packs_summary(successful_packs, skipped_packs, failed_packs, not is_bucket_upload_flow)
|
30,693 |
def main():
options = options_handler()
username = options.user
password = options.password
ami_env = options.ami_env
git_sha1 = options.git_sha1
conf_path = options.conf
secret_conf_path = options.secret
branch_name = options.branch
ci_build_number = options.build_number
servers = determine_servers_urls(ami_env)
server_numeric_version = get_server_numeric_version(ami_env)
prints_manager = ParallelPrintsManager(1)
conf, secret_conf = load_conf_files(conf_path, secret_conf_path)
secret_params = secret_conf.get('integrations', []) if secret_conf else []
username = secret_conf.get('username') if not username else username
password = secret_conf.get('userPassword') if not password else password
if LooseVersion(server_numeric_version) >= LooseVersion('6.0.0'):
for server in servers:
client = demisto_client.configure(base_url=server, username=username, password=password,
verify_ssl=False)
set_marketplace_gcp_bucket_for_build(client, prints_manager, branch_name, ci_build_number)
print('Restarting servers to apply GCS server config ...')
ssh_string = 'ssh -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null {}@{} ' \
'"sudo systemctl restart demisto"'
try:
subprocess.check_output(
ssh_string.format('ec2-user', server.replace('https://', '')), shell=True)
except subprocess.CalledProcessError as exc:
print(exc.output)
print('Done restarting servers.')
tests = conf['tests']
skipped_integrations_conf = conf['skipped_integrations']
all_module_instances = []
filtered_tests, filter_configured, run_all_tests = extract_filtered_tests(is_nightly=options.is_nightly)
tests_for_iteration = tests
if run_all_tests:
# skip test button testing
skipped_instance_test_message = 'Not running instance tests when {} is turned on'.format(RUN_ALL_TESTS_FORMAT)
prints_manager.add_print_job(skipped_instance_test_message, print_warning, 0)
tests_for_iteration = []
elif filter_configured and filtered_tests:
tests_for_iteration = [test for test in tests if test.get('playbookID', '') in filtered_tests]
tests_for_iteration = filter_tests_with_incompatible_version(tests_for_iteration, server_numeric_version,
prints_manager)
prints_manager.execute_thread_prints(0)
# get a list of brand new integrations that way we filter them out to only configure instances
# after updating content
new_integrations_files, modified_integrations_files = get_new_and_modified_integration_files(git_sha1)
new_integrations_names, modified_integrations_names = [], []
installed_content_packs_successfully = True
if LooseVersion(server_numeric_version) >= LooseVersion('6.0.0'):
# sleep for one minute before starting to search and install packs to ensure bucket is ready
prints_manager.add_print_job('Sleeping for 1 minute...', print_warning, 0)
prints_manager.execute_thread_prints(0)
sleep(60)
if options.nightly:
threads_list = []
threads_prints_manager = ParallelPrintsManager(len(servers))
# For each server url we install content
for thread_index, server_url in enumerate(servers):
client = demisto_client.configure(base_url=server_url, username=username,
password=password, verify_ssl=False)
t = Thread(target=install_all_content_packs,
kwargs={'client': client, 'server': server_url,
'prints_manager': threads_prints_manager,
'thread_index': thread_index})
threads_list.append(t)
run_threads_list(threads_list)
else:
# install content packs in every server
pack_ids = get_pack_ids_to_install()
for server_url in servers:
try:
client = demisto_client.configure(base_url=server_url, username=username, password=password,
verify_ssl=False)
search_and_install_packs_and_their_dependencies(pack_ids, client, prints_manager, options.is_nightly)
except Exception as exc:
prints_manager.add_print_job(str(exc), print_error, 0)
prints_manager.execute_thread_prints(0)
installed_content_packs_successfully = False
if new_integrations_files:
new_integrations_names = get_integration_names_from_files(new_integrations_files)
new_integrations_names_message = \
'New Integrations Since Last Release:\n{}\n'.format('\n'.join(new_integrations_names))
prints_manager.add_print_job(new_integrations_names_message, print_warning, 0)
if modified_integrations_files:
modified_integrations_names = get_integration_names_from_files(modified_integrations_files)
modified_integrations_names_message = \
'Updated Integrations Since Last Release:\n{}\n'.format('\n'.join(modified_integrations_names))
prints_manager.add_print_job(modified_integrations_names_message, print_warning, 0)
prints_manager.execute_thread_prints(0)
# Each test is a dictionary from Tests/conf.json which may contain the following fields
# "playbookID", "integrations", "instance_names", "timeout", "nightly", "fromversion", "toversion"
# Note that only the "playbookID" field is required with all of the others being optional.
# Most tests have an "integrations" field listing the integration used for that playbook
# and sometimes an "instance_names" field which is used when there are multiple instances
# of an integration that we want to configure with different configuration values. Look at
# [conf.json](../conf.json) for examples
brand_new_integrations = []
for test in tests_for_iteration:
testing_client = demisto_client.configure(base_url=servers[0], username=username, password=password,
verify_ssl=False)
integrations = get_integrations_for_test(test, skipped_integrations_conf)
instance_names_conf = test.get('instance_names', [])
if not isinstance(instance_names_conf, list):
instance_names_conf = [instance_names_conf]
integrations_names = [i.get('name') for i in integrations]
prints_manager.add_print_job('All Integrations for test "{}":'.format(test.get('playbookID')), print_warning, 0)
prints_manager.add_print_job(integrations_names, print_warning, 0)
new_integrations, modified_integrations, unchanged_integrations, integration_to_status = group_integrations(
integrations, skipped_integrations_conf, new_integrations_names, modified_integrations_names
)
integrations_msg = '\n'.join(['"{}" - {}'.format(key, val) for key, val in integration_to_status.items()])
prints_manager.add_print_job('{}\n'.format(integrations_msg), print_warning, 0)
integrations_to_configure = modified_integrations[:]
integrations_to_configure.extend(unchanged_integrations)
# set params for new integrations and [modified + unchanged] integrations, then add the new ones
# to brand_new_integrations list for later use
placeholders_map = {'%%SERVER_HOST%%': servers[0]}
new_ints_params_set = set_integration_params(new_integrations, secret_params, instance_names_conf,
placeholders_map)
ints_to_configure_params_set = set_integration_params(integrations_to_configure, secret_params,
instance_names_conf, placeholders_map)
if not new_ints_params_set:
prints_manager.add_print_job(
'failed setting parameters for integrations "{}"'.format('\n'.join(new_integrations)), print_error, 0)
if not ints_to_configure_params_set:
prints_manager.add_print_job(
'failed setting parameters for integrations "{}"'.format('\n'.join(integrations_to_configure)),
print_error, 0)
if not (new_ints_params_set and ints_to_configure_params_set):
continue
prints_manager.execute_thread_prints(0)
brand_new_integrations.extend(new_integrations)
module_instances = []
for integration in integrations_to_configure:
placeholders_map = {'%%SERVER_HOST%%': servers[0]}
module_instance = configure_integration_instance(integration, testing_client, prints_manager,
placeholders_map)
if module_instance:
module_instances.append(module_instance)
all_module_instances.extend(module_instances)
preupdate_fails = set()
postupdate_fails = set()
preupdate_success = set()
postupdate_success = set()
# Test all module instances (of modified + unchanged integrations) pre-updating content
if all_module_instances:
# only print start message if there are instances to configure
prints_manager.add_print_job('Start of Instance Testing ("Test" button) prior to Content Update:',
print_warning, 0)
else:
prints_manager.add_print_job('No integrations to configure for the chosen tests. (Pre-update)',
print_warning, 0)
prints_manager.execute_thread_prints(0)
for instance in all_module_instances:
testing_client = demisto_client.configure(base_url=servers[0], username=username, password=password,
verify_ssl=False)
integration_of_instance = instance.get('brand', '')
instance_name = instance.get('name', '')
msg = 'Testing ("Test" button) for instance "{}" of integration "{}".'.format(instance_name,
integration_of_instance)
prints_manager.add_print_job(msg, print_color, 0, LOG_COLORS.GREEN)
prints_manager.execute_thread_prints(0)
# If there is a failure, __test_integration_instance will print it
success, _ = __test_integration_instance(testing_client, instance, prints_manager)
prints_manager.execute_thread_prints(0)
if not success:
preupdate_fails.add((instance_name, integration_of_instance))
else:
preupdate_success.add((instance_name, integration_of_instance))
if LooseVersion(server_numeric_version) < LooseVersion('6.0.0'):
threads_list = []
threads_prints_manager = ParallelPrintsManager(len(servers))
# For each server url we install content
for thread_index, server_url in enumerate(servers):
client = demisto_client.configure(base_url=server_url, username=username,
password=password, verify_ssl=False)
t = Thread(target=update_content_on_demisto_instance,
kwargs={'client': client, 'server': server_url, 'ami_name': ami_env,
'prints_manager': threads_prints_manager,
'thread_index': thread_index})
threads_list.append(t)
run_threads_list(threads_list)
# configure instances for new integrations
new_integration_module_instances = []
for integration in brand_new_integrations:
placeholders_map = {'%%SERVER_HOST%%': servers[0]}
new_integration_module_instance = configure_integration_instance(integration, testing_client, prints_manager,
placeholders_map)
if new_integration_module_instance:
new_integration_module_instances.append(new_integration_module_instance)
all_module_instances.extend(new_integration_module_instances)
# After content upload has completed - test ("Test" button) integration instances
# Test all module instances (of pre-existing AND new integrations) post-updating content
if all_module_instances:
# only print start message if there are instances to configure
prints_manager.add_print_job('Start of Instance Testing ("Test" button) after the Content Update:',
print_warning, 0)
else:
prints_manager.add_print_job('No integrations to configure for the chosen tests. (Post-update)',
print_warning, 0)
prints_manager.execute_thread_prints(0)
for instance in all_module_instances:
integration_of_instance = instance.get('brand', '')
instance_name = instance.get('name', '')
msg = 'Testing ("Test" button) for instance "{}" of integration "{}" .'.format(instance_name,
integration_of_instance)
prints_manager.add_print_job(msg, print_color, 0, LOG_COLORS.GREEN)
prints_manager.execute_thread_prints(0)
# If there is a failure, __test_integration_instance will print it
success, _ = __test_integration_instance(testing_client, instance, prints_manager)
prints_manager.execute_thread_prints(0)
if not success:
postupdate_fails.add((instance_name, integration_of_instance))
else:
postupdate_success.add((instance_name, integration_of_instance))
# reinitialize all clients since their authorization has probably expired by now
for server_url in servers:
client = demisto_client.configure(base_url=server_url, username=username, password=password, verify_ssl=False)
__disable_integrations_instances(client, all_module_instances, prints_manager)
prints_manager.execute_thread_prints(0)
success = report_tests_status(preupdate_fails, postupdate_fails, preupdate_success, postupdate_success,
new_integrations_names, prints_manager)
prints_manager.execute_thread_prints(0)
if not success or not installed_content_packs_successfully:
sys.exit(2)
|
def main():
options = options_handler()
username = options.user
password = options.password
ami_env = options.ami_env
git_sha1 = options.git_sha1
conf_path = options.conf
secret_conf_path = options.secret
branch_name = options.branch
ci_build_number = options.build_number
servers = determine_servers_urls(ami_env)
server_numeric_version = get_server_numeric_version(ami_env)
prints_manager = ParallelPrintsManager(1)
conf, secret_conf = load_conf_files(conf_path, secret_conf_path)
secret_params = secret_conf.get('integrations', []) if secret_conf else []
username = secret_conf.get('username') if not username else username
password = secret_conf.get('userPassword') if not password else password
if LooseVersion(server_numeric_version) >= LooseVersion('6.0.0'):
for server in servers:
client = demisto_client.configure(base_url=server, username=username, password=password,
verify_ssl=False)
set_marketplace_gcp_bucket_for_build(client, prints_manager, branch_name, ci_build_number)
print('Restarting servers to apply GCS server config ...')
ssh_string = 'ssh -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null {}@{} ' \
'"sudo systemctl restart demisto"'
try:
subprocess.check_output(
ssh_string.format('ec2-user', server.replace('https://', '')), shell=True)
except subprocess.CalledProcessError as exc:
print(exc.output)
print('Done restarting servers.')
tests = conf['tests']
skipped_integrations_conf = conf['skipped_integrations']
all_module_instances = []
filtered_tests, filter_configured, run_all_tests = extract_filtered_tests(is_nightly=options.is_nightly)
tests_for_iteration = tests
if run_all_tests:
# skip test button testing
skipped_instance_test_message = 'Not running instance tests when {} is turned on'.format(RUN_ALL_TESTS_FORMAT)
prints_manager.add_print_job(skipped_instance_test_message, print_warning, 0)
tests_for_iteration = []
elif filter_configured and filtered_tests:
tests_for_iteration = [test for test in tests if test.get('playbookID', '') in filtered_tests]
tests_for_iteration = filter_tests_with_incompatible_version(tests_for_iteration, server_numeric_version,
prints_manager)
prints_manager.execute_thread_prints(0)
# get a list of brand new integrations that way we filter them out to only configure instances
# after updating content
new_integrations_files, modified_integrations_files = get_new_and_modified_integration_files(git_sha1)
new_integrations_names, modified_integrations_names = [], []
installed_content_packs_successfully = True
if LooseVersion(server_numeric_version) >= LooseVersion('6.0.0'):
# sleep for one minute before starting to search and install packs to ensure bucket is ready
prints_manager.add_print_job('Sleeping for 1 minute...', print_warning, 0)
prints_manager.execute_thread_prints(0)
sleep(60)
if options.nightly:
threads_list = []
threads_print_manager = ParallelPrintsManager(len(servers))
# For each server url we install content
for thread_index, server_url in enumerate(servers):
client = demisto_client.configure(base_url=server_url, username=username,
password=password, verify_ssl=False)
t = Thread(target=install_all_content_packs,
kwargs={'client': client, 'server': server_url,
'prints_manager': threads_prints_manager,
'thread_index': thread_index})
threads_list.append(t)
run_threads_list(threads_list)
else:
# install content packs in every server
pack_ids = get_pack_ids_to_install()
for server_url in servers:
try:
client = demisto_client.configure(base_url=server_url, username=username, password=password,
verify_ssl=False)
search_and_install_packs_and_their_dependencies(pack_ids, client, prints_manager, options.is_nightly)
except Exception as exc:
prints_manager.add_print_job(str(exc), print_error, 0)
prints_manager.execute_thread_prints(0)
installed_content_packs_successfully = False
if new_integrations_files:
new_integrations_names = get_integration_names_from_files(new_integrations_files)
new_integrations_names_message = \
'New Integrations Since Last Release:\n{}\n'.format('\n'.join(new_integrations_names))
prints_manager.add_print_job(new_integrations_names_message, print_warning, 0)
if modified_integrations_files:
modified_integrations_names = get_integration_names_from_files(modified_integrations_files)
modified_integrations_names_message = \
'Updated Integrations Since Last Release:\n{}\n'.format('\n'.join(modified_integrations_names))
prints_manager.add_print_job(modified_integrations_names_message, print_warning, 0)
prints_manager.execute_thread_prints(0)
# Each test is a dictionary from Tests/conf.json which may contain the following fields
# "playbookID", "integrations", "instance_names", "timeout", "nightly", "fromversion", "toversion"
# Note that only the "playbookID" field is required with all of the others being optional.
# Most tests have an "integrations" field listing the integration used for that playbook
# and sometimes an "instance_names" field which is used when there are multiple instances
# of an integration that we want to configure with different configuration values. Look at
# [conf.json](../conf.json) for examples
brand_new_integrations = []
for test in tests_for_iteration:
testing_client = demisto_client.configure(base_url=servers[0], username=username, password=password,
verify_ssl=False)
integrations = get_integrations_for_test(test, skipped_integrations_conf)
instance_names_conf = test.get('instance_names', [])
if not isinstance(instance_names_conf, list):
instance_names_conf = [instance_names_conf]
integrations_names = [i.get('name') for i in integrations]
prints_manager.add_print_job('All Integrations for test "{}":'.format(test.get('playbookID')), print_warning, 0)
prints_manager.add_print_job(integrations_names, print_warning, 0)
new_integrations, modified_integrations, unchanged_integrations, integration_to_status = group_integrations(
integrations, skipped_integrations_conf, new_integrations_names, modified_integrations_names
)
integrations_msg = '\n'.join(['"{}" - {}'.format(key, val) for key, val in integration_to_status.items()])
prints_manager.add_print_job('{}\n'.format(integrations_msg), print_warning, 0)
integrations_to_configure = modified_integrations[:]
integrations_to_configure.extend(unchanged_integrations)
# set params for new integrations and [modified + unchanged] integrations, then add the new ones
# to brand_new_integrations list for later use
placeholders_map = {'%%SERVER_HOST%%': servers[0]}
new_ints_params_set = set_integration_params(new_integrations, secret_params, instance_names_conf,
placeholders_map)
ints_to_configure_params_set = set_integration_params(integrations_to_configure, secret_params,
instance_names_conf, placeholders_map)
if not new_ints_params_set:
prints_manager.add_print_job(
'failed setting parameters for integrations "{}"'.format('\n'.join(new_integrations)), print_error, 0)
if not ints_to_configure_params_set:
prints_manager.add_print_job(
'failed setting parameters for integrations "{}"'.format('\n'.join(integrations_to_configure)),
print_error, 0)
if not (new_ints_params_set and ints_to_configure_params_set):
continue
prints_manager.execute_thread_prints(0)
brand_new_integrations.extend(new_integrations)
module_instances = []
for integration in integrations_to_configure:
placeholders_map = {'%%SERVER_HOST%%': servers[0]}
module_instance = configure_integration_instance(integration, testing_client, prints_manager,
placeholders_map)
if module_instance:
module_instances.append(module_instance)
all_module_instances.extend(module_instances)
preupdate_fails = set()
postupdate_fails = set()
preupdate_success = set()
postupdate_success = set()
# Test all module instances (of modified + unchanged integrations) pre-updating content
if all_module_instances:
# only print start message if there are instances to configure
prints_manager.add_print_job('Start of Instance Testing ("Test" button) prior to Content Update:',
print_warning, 0)
else:
prints_manager.add_print_job('No integrations to configure for the chosen tests. (Pre-update)',
print_warning, 0)
prints_manager.execute_thread_prints(0)
for instance in all_module_instances:
testing_client = demisto_client.configure(base_url=servers[0], username=username, password=password,
verify_ssl=False)
integration_of_instance = instance.get('brand', '')
instance_name = instance.get('name', '')
msg = 'Testing ("Test" button) for instance "{}" of integration "{}".'.format(instance_name,
integration_of_instance)
prints_manager.add_print_job(msg, print_color, 0, LOG_COLORS.GREEN)
prints_manager.execute_thread_prints(0)
# If there is a failure, __test_integration_instance will print it
success, _ = __test_integration_instance(testing_client, instance, prints_manager)
prints_manager.execute_thread_prints(0)
if not success:
preupdate_fails.add((instance_name, integration_of_instance))
else:
preupdate_success.add((instance_name, integration_of_instance))
if LooseVersion(server_numeric_version) < LooseVersion('6.0.0'):
threads_list = []
threads_prints_manager = ParallelPrintsManager(len(servers))
# For each server url we install content
for thread_index, server_url in enumerate(servers):
client = demisto_client.configure(base_url=server_url, username=username,
password=password, verify_ssl=False)
t = Thread(target=update_content_on_demisto_instance,
kwargs={'client': client, 'server': server_url, 'ami_name': ami_env,
'prints_manager': threads_prints_manager,
'thread_index': thread_index})
threads_list.append(t)
run_threads_list(threads_list)
# configure instances for new integrations
new_integration_module_instances = []
for integration in brand_new_integrations:
placeholders_map = {'%%SERVER_HOST%%': servers[0]}
new_integration_module_instance = configure_integration_instance(integration, testing_client, prints_manager,
placeholders_map)
if new_integration_module_instance:
new_integration_module_instances.append(new_integration_module_instance)
all_module_instances.extend(new_integration_module_instances)
# After content upload has completed - test ("Test" button) integration instances
# Test all module instances (of pre-existing AND new integrations) post-updating content
if all_module_instances:
# only print start message if there are instances to configure
prints_manager.add_print_job('Start of Instance Testing ("Test" button) after the Content Update:',
print_warning, 0)
else:
prints_manager.add_print_job('No integrations to configure for the chosen tests. (Post-update)',
print_warning, 0)
prints_manager.execute_thread_prints(0)
for instance in all_module_instances:
integration_of_instance = instance.get('brand', '')
instance_name = instance.get('name', '')
msg = 'Testing ("Test" button) for instance "{}" of integration "{}" .'.format(instance_name,
integration_of_instance)
prints_manager.add_print_job(msg, print_color, 0, LOG_COLORS.GREEN)
prints_manager.execute_thread_prints(0)
# If there is a failure, __test_integration_instance will print it
success, _ = __test_integration_instance(testing_client, instance, prints_manager)
prints_manager.execute_thread_prints(0)
if not success:
postupdate_fails.add((instance_name, integration_of_instance))
else:
postupdate_success.add((instance_name, integration_of_instance))
# reinitialize all clients since their authorization has probably expired by now
for server_url in servers:
client = demisto_client.configure(base_url=server_url, username=username, password=password, verify_ssl=False)
__disable_integrations_instances(client, all_module_instances, prints_manager)
prints_manager.execute_thread_prints(0)
success = report_tests_status(preupdate_fails, postupdate_fails, preupdate_success, postupdate_success,
new_integrations_names, prints_manager)
prints_manager.execute_thread_prints(0)
if not success or not installed_content_packs_successfully:
sys.exit(2)
|
1,634 |
def quantile_transform(X, axis=0, n_quantiles=1000,
output_distribution='uniform',
ignore_implicit_zeros=False,
subsample=int(1e5),
random_state=None,
copy=True):
"""Transform features using quantiles information.
This method transforms the features to follow a uniform or a normal
distribution. Therefore, for a given feature, this transformation tends
to spread out the most frequent values. It also reduces the impact of
(marginal) outliers: this is therefore a robust preprocessing scheme.
The transformation is applied on each feature independently. First an
estimate of the cumulative distribution function of a feature is
used to map the original values to a uniform distribution. The obtained
values are then mapped to the desired output distribution using the
associated quantile function. Features values of new/unseen data that fall
below or above the fitted range will be mapped to the bounds of the output
distribution. Note that this transform is non-linear. It may distort linear
correlations between variables measured at the same scale but renders
variables measured at different scales more directly comparable.
Read more in the :ref:`User Guide <preprocessing_transformer>`.
Parameters
----------
X : array-like, sparse matrix
The data to transform.
axis : int, (default=0)
Axis used to compute the means and standard deviations along. If 0,
transform each feature, otherwise (if 1) transform each sample.
n_quantiles : int, optional (default=1000 or n_samples)
Number of quantiles to be computed. It corresponds to the number
of landmarks used to discretize the cumulative distribution function.
If n_quantiles is larger than the number of samples, n_quantiles is set
to the number of samples as a larger number of quantiles does not give
a better approximation of the cumulative distribution function
estimator.
output_distribution : str, optional (default='uniform')
Marginal distribution for the transformed data. The choices are
'uniform' (default) or 'normal'.
ignore_implicit_zeros : bool, optional (default=False)
Only applies to sparse matrices. If True, the sparse entries of the
matrix are discarded to compute the quantile statistics. If False,
these entries are treated as zeros.
subsample : int, optional (default=1e5)
Maximum number of samples used to estimate the quantiles for
computational efficiency. Note that the subsampling procedure may
differ for value-identical sparse and dense matrices.
random_state : int, RandomState instance or None, optional (default=None)
Determines random number generation for subsampling.
Please see ``subsample`` for more details.
Pass an int for reproducible results across multiple function calls.
See :term:`Glossary <random_state>`
copy : boolean, optional, (default=True)
Set to False to perform inplace transformation and avoid a copy (if the
input is already a numpy array). If True, a copy of `X` is transformed,
leaving the original `X` unchanged
..versionchnanged:: 0.23
The default value of `copy` changed from False to True in 0.23.
Returns
-------
Xt : ndarray or sparse matrix, shape (n_samples, n_features)
The transformed data.
Examples
--------
>>> import numpy as np
>>> from sklearn.preprocessing import quantile_transform
>>> rng = np.random.RandomState(0)
>>> X = np.sort(rng.normal(loc=0.5, scale=0.25, size=(25, 1)), axis=0)
>>> quantile_transform(X, n_quantiles=10, random_state=0, copy=True)
array([...])
See also
--------
QuantileTransformer : Performs quantile-based scaling using the
``Transformer`` API (e.g. as part of a preprocessing
:class:`sklearn.pipeline.Pipeline`).
power_transform : Maps data to a normal distribution using a
power transformation.
scale : Performs standardization that is faster, but less robust
to outliers.
robust_scale : Performs robust standardization that removes the influence
of outliers but does not put outliers and inliers on the same scale.
Notes
-----
NaNs are treated as missing values: disregarded in fit, and maintained in
transform.
For a comparison of the different scalers, transformers, and normalizers,
see :ref:`examples/preprocessing/plot_all_scaling.py
<sphx_glr_auto_examples_preprocessing_plot_all_scaling.py>`.
"""
n = QuantileTransformer(n_quantiles=n_quantiles,
output_distribution=output_distribution,
subsample=subsample,
ignore_implicit_zeros=ignore_implicit_zeros,
random_state=random_state,
copy=copy)
if axis == 0:
return n.fit_transform(X)
elif axis == 1:
return n.fit_transform(X.T).T
else:
raise ValueError("axis should be either equal to 0 or 1. Got"
" axis={}".format(axis))
|
def quantile_transform(X, axis=0, n_quantiles=1000,
output_distribution='uniform',
ignore_implicit_zeros=False,
subsample=int(1e5),
random_state=None,
copy=True):
"""Transform features using quantiles information.
This method transforms the features to follow a uniform or a normal
distribution. Therefore, for a given feature, this transformation tends
to spread out the most frequent values. It also reduces the impact of
(marginal) outliers: this is therefore a robust preprocessing scheme.
The transformation is applied on each feature independently. First an
estimate of the cumulative distribution function of a feature is
used to map the original values to a uniform distribution. The obtained
values are then mapped to the desired output distribution using the
associated quantile function. Features values of new/unseen data that fall
below or above the fitted range will be mapped to the bounds of the output
distribution. Note that this transform is non-linear. It may distort linear
correlations between variables measured at the same scale but renders
variables measured at different scales more directly comparable.
Read more in the :ref:`User Guide <preprocessing_transformer>`.
Parameters
----------
X : array-like, sparse matrix
The data to transform.
axis : int, (default=0)
Axis used to compute the means and standard deviations along. If 0,
transform each feature, otherwise (if 1) transform each sample.
n_quantiles : int, optional (default=1000 or n_samples)
Number of quantiles to be computed. It corresponds to the number
of landmarks used to discretize the cumulative distribution function.
If n_quantiles is larger than the number of samples, n_quantiles is set
to the number of samples as a larger number of quantiles does not give
a better approximation of the cumulative distribution function
estimator.
output_distribution : str, optional (default='uniform')
Marginal distribution for the transformed data. The choices are
'uniform' (default) or 'normal'.
ignore_implicit_zeros : bool, optional (default=False)
Only applies to sparse matrices. If True, the sparse entries of the
matrix are discarded to compute the quantile statistics. If False,
these entries are treated as zeros.
subsample : int, optional (default=1e5)
Maximum number of samples used to estimate the quantiles for
computational efficiency. Note that the subsampling procedure may
differ for value-identical sparse and dense matrices.
random_state : int, RandomState instance or None, optional (default=None)
Determines random number generation for subsampling and smoothing noise.
Please see ``subsample`` for more details.
Pass an int for reproducible results across multiple function calls.
See :term:`Glossary <random_state>`
copy : boolean, optional, (default=True)
Set to False to perform inplace transformation and avoid a copy (if the
input is already a numpy array). If True, a copy of `X` is transformed,
leaving the original `X` unchanged
..versionchnanged:: 0.23
The default value of `copy` changed from False to True in 0.23.
Returns
-------
Xt : ndarray or sparse matrix, shape (n_samples, n_features)
The transformed data.
Examples
--------
>>> import numpy as np
>>> from sklearn.preprocessing import quantile_transform
>>> rng = np.random.RandomState(0)
>>> X = np.sort(rng.normal(loc=0.5, scale=0.25, size=(25, 1)), axis=0)
>>> quantile_transform(X, n_quantiles=10, random_state=0, copy=True)
array([...])
See also
--------
QuantileTransformer : Performs quantile-based scaling using the
``Transformer`` API (e.g. as part of a preprocessing
:class:`sklearn.pipeline.Pipeline`).
power_transform : Maps data to a normal distribution using a
power transformation.
scale : Performs standardization that is faster, but less robust
to outliers.
robust_scale : Performs robust standardization that removes the influence
of outliers but does not put outliers and inliers on the same scale.
Notes
-----
NaNs are treated as missing values: disregarded in fit, and maintained in
transform.
For a comparison of the different scalers, transformers, and normalizers,
see :ref:`examples/preprocessing/plot_all_scaling.py
<sphx_glr_auto_examples_preprocessing_plot_all_scaling.py>`.
"""
n = QuantileTransformer(n_quantiles=n_quantiles,
output_distribution=output_distribution,
subsample=subsample,
ignore_implicit_zeros=ignore_implicit_zeros,
random_state=random_state,
copy=copy)
if axis == 0:
return n.fit_transform(X)
elif axis == 1:
return n.fit_transform(X.T).T
else:
raise ValueError("axis should be either equal to 0 or 1. Got"
" axis={}".format(axis))
|
16,648 |
def setup_proximity_component(
hass: HomeAssistant, name: str, config: ConfigType
) -> bool:
"""Set up the individual proximity component."""
ignored_zones: list[str] = config[CONF_IGNORED_ZONES]
proximity_devices: list[str] = config[CONF_DEVICES]
tolerance: int = config[CONF_TOLERANCE]
proximity_zone = name
unit_of_measurement: str = config.get(
CONF_UNIT_OF_MEASUREMENT, hass.config.units.length_unit
)
zone_id = f"zone.{config.get(CONF_ZONE)}"
proximity = Proximity( # type:ignore[no-untyped-call]
hass,
proximity_zone,
DEFAULT_DIST_TO_ZONE,
DEFAULT_DIR_OF_TRAVEL,
DEFAULT_NEAREST,
ignored_zones,
proximity_devices,
tolerance,
zone_id,
unit_of_measurement,
)
proximity.entity_id = f"{DOMAIN}.{proximity_zone}"
proximity.schedule_update_ha_state()
track_state_change(hass, proximity_devices, proximity.check_proximity_state_change)
return True
|
def setup_proximity_component(
hass: HomeAssistant, name: str, config: ConfigType
) -> bool:
"""Set up the individual proximity component."""
ignored_zones: list[str] = config[CONF_IGNORED_ZONES]
proximity_devices: list[str] = config[CONF_DEVICES]
tolerance: int = config[CONF_TOLERANCE]
proximity_zone = name
unit_of_measurement: str = config.get(
CONF_UNIT_OF_MEASUREMENT, hass.config.units.length_unit
)
zone_id = f"zone.{config[CONF_ZONE]}"
proximity = Proximity( # type:ignore[no-untyped-call]
hass,
proximity_zone,
DEFAULT_DIST_TO_ZONE,
DEFAULT_DIR_OF_TRAVEL,
DEFAULT_NEAREST,
ignored_zones,
proximity_devices,
tolerance,
zone_id,
unit_of_measurement,
)
proximity.entity_id = f"{DOMAIN}.{proximity_zone}"
proximity.schedule_update_ha_state()
track_state_change(hass, proximity_devices, proximity.check_proximity_state_change)
return True
|
59,012 |
def preprocess(batch1, batch2):
batch1 = F.resize(batch1, size=[520, 960])
batch2 = F.resize(batch2, size=[520, 960])
return transforms(batch1, batch2)
|
def preprocess(img1_batch, img2_batch):
batch1 = F.resize(img1_batch, size=[520, 960])
batch2 = F.resize(img2_batch, size=[520, 960])
return transforms(img1_batch, img2_batch)
|
55,853 |
def test_subscript_get_int_key() -> None:
c = OmegaConf.create("1: b")
assert isinstance(c, DictConfig)
assert "b" == c[1]
|
def test_subscript_get_int_key() -> None:
c = OmegaConf.create({1: b})
assert isinstance(c, DictConfig)
assert "b" == c[1]
|
29,752 |
def get_recommendations_for_user(model, user_id, user_name, recordings_df, top_artists_candidate_set,
similar_artists_candidate_set):
""" Get recommended recordings which belong to top artists and artists similar to top
artists listened to by the user.
Args:
model: Best model after training.
user_id (int): user id of the user.
user_name (str): User name of the user.
recordings_df: Dataframe containing distinct recordings and corresponding
mbids and names.
top_artists_candidate_set: Dataframe containing recording ids that belong to top artists.
similar_artists_candidate_set: Dataframe containing recording ids that belong to similar artists.
Returns:
user_recommendations_top_artist: list of recommended recordings of top artist.
user_recommendations_top_artist: list of recommended recordings of similar artist.
"""
top_artists_recordings = top_artists_candidate_set.select('user_id', 'recording_id') \
.where(col('user_id') == user_id)
top_artists_recordings_rdd = top_artists_recordings.rdd.map(lambda r: (r['user_id'], r['recording_id']))
user_recommendations_top_artist = generate_recommendations(top_artists_recordings_rdd,
config.RECOMMENDATION_TOP_ARTIST_LIMIT,
recordings_df, model)
if len(user_recommendations_top_artist) == 0:
current_app.logger.info('Top artists recommendations not generated for "{}"'.format(user_name))
similar_artists_recordings = similar_artists_candidate_set.select('user_id', 'recording_id') \
.where(col('user_id') == user_id)
try:
similar_artists_recordings.take(1)[0]
similar_artists_recordings_rdd = similar_artists_recordings.rdd.map(lambda r: (r['user_id'], r['recording_id']))
user_recommendations_similar_artist = generate_recommendations(similar_artists_recordings_rdd,
config.RECOMMENDATION_SIMILAR_ARTIST_LIMIT,
recordings_df, model)
except IndexError:
user_recommendations_similar_artist = []
current_app.logger.info('Similar artist recordings not found for "{}"'.format(user_name))
current_app.logger.info('Similar artist recommendations not generated for "{}"'.format(user_name))
return user_recommendations_top_artist, user_recommendations_similar_artist
|
def get_recommendations_for_user(model, user_id, user_name, recordings_df, top_artists_candidate_set,
similar_artists_candidate_set):
""" Get recommended recordings which belong to top artists and artists similar to top
artists listened to by the user.
Args:
model: Best model after training.
user_id (int): user id of the user.
user_name (str): User name of the user.
recordings_df: Dataframe containing distinct recordings and corresponding
mbids and names.
top_artists_candidate_set: Dataframe containing recording ids that belong to top artists.
similar_artists_candidate_set: Dataframe containing recording ids that belong to similar artists.
Returns:
user_recommendations_top_artist: list of recommended recordings of top artist.
user_recommendations_similar_artist: list of recommended recordings of similar artist.
"""
top_artists_recordings = top_artists_candidate_set.select('user_id', 'recording_id') \
.where(col('user_id') == user_id)
top_artists_recordings_rdd = top_artists_recordings.rdd.map(lambda r: (r['user_id'], r['recording_id']))
user_recommendations_top_artist = generate_recommendations(top_artists_recordings_rdd,
config.RECOMMENDATION_TOP_ARTIST_LIMIT,
recordings_df, model)
if len(user_recommendations_top_artist) == 0:
current_app.logger.info('Top artists recommendations not generated for "{}"'.format(user_name))
similar_artists_recordings = similar_artists_candidate_set.select('user_id', 'recording_id') \
.where(col('user_id') == user_id)
try:
similar_artists_recordings.take(1)[0]
similar_artists_recordings_rdd = similar_artists_recordings.rdd.map(lambda r: (r['user_id'], r['recording_id']))
user_recommendations_similar_artist = generate_recommendations(similar_artists_recordings_rdd,
config.RECOMMENDATION_SIMILAR_ARTIST_LIMIT,
recordings_df, model)
except IndexError:
user_recommendations_similar_artist = []
current_app.logger.info('Similar artist recordings not found for "{}"'.format(user_name))
current_app.logger.info('Similar artist recommendations not generated for "{}"'.format(user_name))
return user_recommendations_top_artist, user_recommendations_similar_artist
|
6,433 |
def get_context(context):
context.no_cache = 1
context.bg = 'background-color: #fafbfc; border-radius:0'
context.align_greeting = 'start'
context.align_search_box = '0'
settings = frappe.get_doc("Support Settings", "Support Settings")
s = settings
context.greeting_text = s.greeting_text if s.greeting_text else "We're here to help"
if s.greeting_text_and_search_bar_alignment == 'Center':
context.align_greeting = 'center'
context.align_search_box = '25%'
if s.greeting_text_and_search_bar_alignment == 'Right':
context.align_greeting = 'end'
context.align_search_box = '50%'
if s.background == 'Color' and s.select_color:
context.bg = 'background-color: ' + s.select_color + '; border-radius:0'
if s.background == 'Image' and s.add_image:
context.bg = 'background-image: url(' + s.add_image + '); background-repeat: no-repeat; border-radius:0'
# Support content
favorite_article_count = 0
portal_setting = frappe.get_single("Portal Settings")
context.favorite_article_list=[]
context.help_article_list=[]
context.category_list = frappe.get_all("Help Category", fields="name")
all_articles = [i[0] for i in frappe.db.sql("""SELECT route from `tabHelp Article`""")]
favorite_articles = get_favorite_articles()
for article in favorite_articles:
favorite_article_dict = {}
if favorite_article_count < 3:
if article[0] in all_articles:
favorite_article = frappe.get_all("Help Article", fields=["title", "content", "route", "category"], filters={"route": article[0]})
content = frappe.utils.strip_html(favorite_article[0].content)
if len(content) > 115:
content = content[:112] + '...'
favorite_article_dict = {
'title': favorite_article[0].title,
'content': content,
'category': favorite_article[0].category,
'route': favorite_article[0].route,
}
context.favorite_article_list.append(favorite_article_dict)
favorite_article_count += 1
for category in context.category_list:
help_aricles_per_category = {}
help_articles = frappe.get_all("Help Article", fields="*", filters={"category": category.name}, order_by="modified desc", limit=5)
help_aricles_per_caetgory = {
'category': category,
'articles': help_articles,
}
context.help_article_list.append(help_aricles_per_caetgory)
# Get Started sections
if s.get_started_sections:
sections = json.loads(s.get_started_sections)
context.get_started_sections = sections
# Forum posts
if s.show_latest_forum_posts:
topics_data, post_params = get_forum_posts(s)
context.post_params = post_params
context.forum_url = s.forum_url
context.topics = topics_data[:3]
# Issues
if frappe.session.user != "Guest":
context.issues = frappe.get_all("Issue", fields=["name", "status", "subject", "modified"])[:3]
else:
context.issues = []
|
def get_context(context):
context.no_cache = 1
settings = frappe.get_doc("Support Settings")
context.align_greeting = 'start'
context.align_search_box = '0'
settings = frappe.get_doc("Support Settings", "Support Settings")
s = settings
context.greeting_text = s.greeting_text if s.greeting_text else "We're here to help"
if s.greeting_text_and_search_bar_alignment == 'Center':
context.align_greeting = 'center'
context.align_search_box = '25%'
if s.greeting_text_and_search_bar_alignment == 'Right':
context.align_greeting = 'end'
context.align_search_box = '50%'
if s.background == 'Color' and s.select_color:
context.bg = 'background-color: ' + s.select_color + '; border-radius:0'
if s.background == 'Image' and s.add_image:
context.bg = 'background-image: url(' + s.add_image + '); background-repeat: no-repeat; border-radius:0'
# Support content
favorite_article_count = 0
portal_setting = frappe.get_single("Portal Settings")
context.favorite_article_list=[]
context.help_article_list=[]
context.category_list = frappe.get_all("Help Category", fields="name")
all_articles = [i[0] for i in frappe.db.sql("""SELECT route from `tabHelp Article`""")]
favorite_articles = get_favorite_articles()
for article in favorite_articles:
favorite_article_dict = {}
if favorite_article_count < 3:
if article[0] in all_articles:
favorite_article = frappe.get_all("Help Article", fields=["title", "content", "route", "category"], filters={"route": article[0]})
content = frappe.utils.strip_html(favorite_article[0].content)
if len(content) > 115:
content = content[:112] + '...'
favorite_article_dict = {
'title': favorite_article[0].title,
'content': content,
'category': favorite_article[0].category,
'route': favorite_article[0].route,
}
context.favorite_article_list.append(favorite_article_dict)
favorite_article_count += 1
for category in context.category_list:
help_aricles_per_category = {}
help_articles = frappe.get_all("Help Article", fields="*", filters={"category": category.name}, order_by="modified desc", limit=5)
help_aricles_per_caetgory = {
'category': category,
'articles': help_articles,
}
context.help_article_list.append(help_aricles_per_caetgory)
# Get Started sections
if s.get_started_sections:
sections = json.loads(s.get_started_sections)
context.get_started_sections = sections
# Forum posts
if s.show_latest_forum_posts:
topics_data, post_params = get_forum_posts(s)
context.post_params = post_params
context.forum_url = s.forum_url
context.topics = topics_data[:3]
# Issues
if frappe.session.user != "Guest":
context.issues = frappe.get_all("Issue", fields=["name", "status", "subject", "modified"])[:3]
else:
context.issues = []
|
31,513 |
def build_attribute_context(response: Union[dict, requests.Response]) -> dict:
"""
Convert the response returned from MIPS to the context output format.
"""
attribute_fields = [
'id',
'event_id',
'object_id',
'object_relation',
'category',
'type',
'to_ids',
'uuid',
'timestamp',
'distribution',
'sharing_group_id',
'comment',
'deleted',
'disable_correlation',
'value',
'Event',
'Object',
'Galaxy',
'Tag',
'decay_score'
]
if isinstance(response, str):
response = json.loads(json.dumps(response))
attributes = response.get('Attribute')
for i in range(len(attributes)):
attributes[i] = {key: attributes[i].get(key) for key in attribute_fields if key in attributes[i]}
# Build Galaxy
if attributes[i].get('Galaxy'):
attributes[i]['Galaxy'] = [
{
'name': star.get('name'),
'type': star.get('type'),
'description': star.get('description')
} for star in attributes[i]['Galaxy']
]
# Build Tag
if attributes[i].get('Tag'):
attributes[i]['Tag'] = [
{'Name': tag.get('name')} for tag in attributes[i].get('Tag')
]
attributes = replace_keys(attributes)
return attributes
|
def build_attribute_context(response: Union[dict, requests.Response]) -> dict:
"""
Convert the response of attribute search returned from MIPS to the context output format.
"""
attribute_fields = [
'id',
'event_id',
'object_id',
'object_relation',
'category',
'type',
'to_ids',
'uuid',
'timestamp',
'distribution',
'sharing_group_id',
'comment',
'deleted',
'disable_correlation',
'value',
'Event',
'Object',
'Galaxy',
'Tag',
'decay_score'
]
if isinstance(response, str):
response = json.loads(json.dumps(response))
attributes = response.get('Attribute')
for i in range(len(attributes)):
attributes[i] = {key: attributes[i].get(key) for key in attribute_fields if key in attributes[i]}
# Build Galaxy
if attributes[i].get('Galaxy'):
attributes[i]['Galaxy'] = [
{
'name': star.get('name'),
'type': star.get('type'),
'description': star.get('description')
} for star in attributes[i]['Galaxy']
]
# Build Tag
if attributes[i].get('Tag'):
attributes[i]['Tag'] = [
{'Name': tag.get('name')} for tag in attributes[i].get('Tag')
]
attributes = replace_keys(attributes)
return attributes
|
8,797 |
def test_bot_mixed_mode_types(mockbot):
"""Ensure mixed argument- and non-argument- modes are handled.
Sopel 6.6.6 and older did not behave well.
.. seealso::
Github Issue #1575.
"""
irc = IRCFactory(mockbot)
irc.channel_joined('#test', [
'Uvoice', 'Uop', 'Uadmin', 'Uvoice2', 'Uop2', 'Uadmin2'])
irc.mode_set('#test', '+amov', ['Uadmin', 'Uop', 'Uvoice'])
assert mockbot.channels["#test"].privileges[Identifier("Uadmin")] == ADMIN
assert mockbot.channels["#test"].privileges[Identifier("Uop")] == OP
assert mockbot.channels["#test"].privileges[Identifier("Uvoice")] == VOICE
irc.mode_set('#test', '+abov', ['Uadmin2', 'x!y@z', 'Uop2', 'Uvoice2'])
assert mockbot.channels["#test"].privileges[Identifier("Uadmin2")] == 0
assert mockbot.channels["#test"].privileges[Identifier("Uop2")] == 0
assert mockbot.channels["#test"].privileges[Identifier("Uvoice2")] == 0
assert mockbot.backend.message_sent == rawlist('WHO #test'), (
'Upon finding an unexpected nick, the bot must send a WHO request.')
|
def test_bot_mixed_mode_types(mockbot):
"""Ensure mixed argument-required and -not-required modes are handled.
Sopel 6.6.6 and older did not behave well.
.. seealso::
Github Issue #1575.
"""
irc = IRCFactory(mockbot)
irc.channel_joined('#test', [
'Uvoice', 'Uop', 'Uadmin', 'Uvoice2', 'Uop2', 'Uadmin2'])
irc.mode_set('#test', '+amov', ['Uadmin', 'Uop', 'Uvoice'])
assert mockbot.channels["#test"].privileges[Identifier("Uadmin")] == ADMIN
assert mockbot.channels["#test"].privileges[Identifier("Uop")] == OP
assert mockbot.channels["#test"].privileges[Identifier("Uvoice")] == VOICE
irc.mode_set('#test', '+abov', ['Uadmin2', 'x!y@z', 'Uop2', 'Uvoice2'])
assert mockbot.channels["#test"].privileges[Identifier("Uadmin2")] == 0
assert mockbot.channels["#test"].privileges[Identifier("Uop2")] == 0
assert mockbot.channels["#test"].privileges[Identifier("Uvoice2")] == 0
assert mockbot.backend.message_sent == rawlist('WHO #test'), (
'Upon finding an unexpected nick, the bot must send a WHO request.')
|
20,545 |
def get_parser():
# initialize default parameters
param_default = Param()
# Initialize the parser
parser = SCTArgumentParser(
description="Smooth the spinal cord along its centerline. Steps are:\n"
" 1) Spinal cord is straightened (using centerline),\n"
" 2) a Gaussian kernel is applied in the superior-inferior direction,\n"
" 3) then cord is de-straightened as originally.\n"
)
mandatory = parser.add_argument_group("\nMANDATORY ARGUMENTS")
mandatory.add_argument(
'-i',
metavar=Metavar.file,
required=True,
help="Image to smooth. Example: data.nii.gz"
)
mandatory.add_argument(
'-s',
metavar=Metavar.file,
required=True,
help="Spinal cord centerline or segmentation. Example: data_centerline.nii.gz"
)
optional = parser.add_argument_group("\nOPTIONAL ARGUMENTS")
optional.add_argument(
"-h",
"--help",
action="help",
help="Show this help message and exit."
)
optional.add_argument(
'-smooth',
metavar=Metavar.list,
type=list_type(',', float),
default=[0, 0, 3],
help="Sigma (standard deviation) of the smoothing Gaussian kernel (in mm). For isotropic smoothing you only "
"need to specify a value (e.g. 2). For anisotropic smoothing specify a value for each axis, separated "
"with a comma. The order should follow axes Right-Left, Antero-Posterior, Superior-Inferior "
"(e.g.: 1,1,3). For no smoothing, set value to 0."
)
optional.add_argument(
'-algo-fitting',
metavar=Metavar.str,
choices=['bspline', 'polyfit'],
default=param_default.algo_fitting,
help=f"Algorithm for curve fitting. For more information, see sct_straighten_spinalcord."
)
optional.add_argument(
"-o",
metavar=Metavar.file,
help='Output filename. Example: smooth_sc.nii.gz'),
optional.add_argument(
'-r',
choices=[0, 1],
default=1,
help="Whether to remove temporary files. 0 = no, 1 = yes"
)
optional.add_argument(
'-v',
metavar=Metavar.int,
type=int,
choices=[0, 1, 2],
default=1,
# Values [0, 1, 2] map to logging levels [WARNING, INFO, DEBUG], but are also used as "if verbose == #" in API
help="Verbosity. 0: Display only errors/warnings, 1: Errors/warnings + info messages, 2: Debug mode"
)
return parser
|
def get_parser():
# initialize default parameters
param_default = Param()
# Initialize the parser
parser = SCTArgumentParser(
description="Smooth the spinal cord along its centerline. Steps are:\n"
" 1) Spinal cord is straightened (using centerline),\n"
" 2) a Gaussian kernel is applied in the superior-inferior direction,\n"
" 3) then cord is de-straightened as originally.\n"
)
mandatory = parser.add_argument_group("\nMANDATORY ARGUMENTS")
mandatory.add_argument(
'-i',
metavar=Metavar.file,
required=True,
help="Image to smooth. Example: data.nii.gz"
)
mandatory.add_argument(
'-s',
metavar=Metavar.file,
required=True,
help="Spinal cord centerline or segmentation. Example: data_centerline.nii.gz"
)
optional = parser.add_argument_group("\nOPTIONAL ARGUMENTS")
optional.add_argument(
"-h",
"--help",
action="help",
help="Show this help message and exit."
)
optional.add_argument(
'-smooth',
metavar=Metavar.list,
type=list_type(',', float),
default=[0, 0, 3],
help="Sigma (standard deviation) of the smoothing Gaussian kernel (in mm). For isotropic smoothing you only "
"need to specify a value (e.g. 2). For anisotropic smoothing specify a value for each axis, separated "
"with a comma. The order should follow axes Right-Left, Antero-Posterior, Superior-Inferior "
"(e.g.: 1,1,3). For no smoothing, set value to 0."
)
optional.add_argument(
'-algo-fitting',
metavar=Metavar.str,
choices=['bspline', 'polyfit'],
default=param_default.algo_fitting,
help=f"Algorithm for curve fitting. For more information, see sct_straighten_spinalcord."
)
optional.add_argument(
"-o",
metavar=Metavar.file,
help="Output filename. Example: smooth_sc.nii.gz. By default, the suffix '_smooth' will be added to the input file name."),
optional.add_argument(
'-r',
choices=[0, 1],
default=1,
help="Whether to remove temporary files. 0 = no, 1 = yes"
)
optional.add_argument(
'-v',
metavar=Metavar.int,
type=int,
choices=[0, 1, 2],
default=1,
# Values [0, 1, 2] map to logging levels [WARNING, INFO, DEBUG], but are also used as "if verbose == #" in API
help="Verbosity. 0: Display only errors/warnings, 1: Errors/warnings + info messages, 2: Debug mode"
)
return parser
|
25,659 |
def special_methods_callback(app, what, name, obj, skip, options):
"""
Include all method and function docstrings in the docs if they have a custom docstring. This
overrides the default behaviour of omitting special methods or functions from the docs.
"""
if hasattr(obj, '__doc__') and isinstance(obj, (types.FunctionType, types.MethodType)):
return False
return skip
|
def autodoc_skip_member_callback(app, what, name, obj, skip, options):
"""
Include all method and function docstrings in the docs if they have a custom docstring. This
overrides the default behaviour of omitting special methods or functions from the docs.
"""
if hasattr(obj, '__doc__') and isinstance(obj, (types.FunctionType, types.MethodType)):
return False
return skip
|
29,785 |
def sanitize_label():
assert sanitize_label("_")
assert sanitize_label("12345A")
assert sanitize_label("12345_A")
assert sanitize_label(12345)
|
def test_sanitize_label():
assert sanitize_label("_")
assert sanitize_label("12345A")
assert sanitize_label("12345_A")
assert sanitize_label(12345)
|
7,228 |
def denoise_tv_bregman(image, weight, max_iter=100, eps=1e-3, isotropic=True,
multichannel=False):
"""Perform total-variation denoising using split-Bregman optimization.
Total-variation denoising (also know as total-variation regularization)
tries to find an image with less total-variation under the constraint
of being similar to the input image, which is controlled by the
regularization parameter ([1]_, [2]_, [3]_, [4]_).
Parameters
----------
image : ndarray
Input data to be denoised (converted using img_as_float`).
weight : float
Denoising weight. The smaller the `weight`, the more denoising (at
the expense of less similarity to the `input`). The regularization
parameter `lambda` is chosen as `2 * weight`.
eps : float, optional
Relative difference of the value of the cost function that determines
the stop criterion. The algorithm stops when::
SUM((u(n) - u(n-1))**2) < eps
max_iter : int, optional
Maximal number of iterations used for the optimization.
isotropic : boolean, optional
Switch between isotropic and anisotropic TV denoising.
multichannel : bool, optional
Apply total-variation denoising separately for each channel. This
option should be true for color images, otherwise the denoising is
also applied in the channels dimension.
Returns
-------
u : ndarray
Denoised image.
References
----------
.. [1] https://en.wikipedia.org/wiki/Total_variation_denoising
.. [2] Tom Goldstein and Stanley Osher, "The Split Bregman Method For L1
Regularized Problems",
ftp://ftp.math.ucla.edu/pub/camreport/cam08-29.pdf
.. [3] Pascal Getreuer, "Rudin–Osher–Fatemi Total Variation Denoising
using Split Bregman" in Image Processing On Line on 2012–05–19,
https://www.ipol.im/pub/art/2012/g-tvd/article_lr.pdf
.. [4] https://web.math.ucsb.edu/~cgarcia/UGProjects/BregmanAlgorithms_JacquelineBush.pdf
"""
image = np.atleast_3d(img_as_float(image))
shape_ext = np.asarray(image.shape)
shape_ext[0:2] += 2
out = np.zeros(shape_ext, image.dtype)
if multichannel:
for c in range(image.shape[-1]):
if np.ndim(image) == 3:
channel_in = np.ascontiguousarray(image[..., c, None])
channel_out = np.ascontiguousarray(out[..., c, None])
_denoise_tv_bregman(channel_in, image.dtype.type(weight),
max_iter, eps, isotropic, channel_out)
out[..., c] = channel_out[..., -1]
else:
channel_in = np.ascontiguousarray(image[..., c])
channel_out = np.ascontiguousarray(out[..., c])
_denoise_tv_bregman(channel_in, image.dtype.type(weight),
max_iter, eps, isotropic, channel_out)
out[..., c] = channel_out
else:
image = np.ascontiguousarray(image)
_denoise_tv_bregman(image, image.dtype.type(weight), max_iter, eps,
isotropic, out)
return np.squeeze(out[1:-1, 1:-1])
|
def denoise_tv_bregman(image, weight, max_iter=100, eps=1e-3, isotropic=True,
*, multichannel=False):
"""Perform total-variation denoising using split-Bregman optimization.
Total-variation denoising (also know as total-variation regularization)
tries to find an image with less total-variation under the constraint
of being similar to the input image, which is controlled by the
regularization parameter ([1]_, [2]_, [3]_, [4]_).
Parameters
----------
image : ndarray
Input data to be denoised (converted using img_as_float`).
weight : float
Denoising weight. The smaller the `weight`, the more denoising (at
the expense of less similarity to the `input`). The regularization
parameter `lambda` is chosen as `2 * weight`.
eps : float, optional
Relative difference of the value of the cost function that determines
the stop criterion. The algorithm stops when::
SUM((u(n) - u(n-1))**2) < eps
max_iter : int, optional
Maximal number of iterations used for the optimization.
isotropic : boolean, optional
Switch between isotropic and anisotropic TV denoising.
multichannel : bool, optional
Apply total-variation denoising separately for each channel. This
option should be true for color images, otherwise the denoising is
also applied in the channels dimension.
Returns
-------
u : ndarray
Denoised image.
References
----------
.. [1] https://en.wikipedia.org/wiki/Total_variation_denoising
.. [2] Tom Goldstein and Stanley Osher, "The Split Bregman Method For L1
Regularized Problems",
ftp://ftp.math.ucla.edu/pub/camreport/cam08-29.pdf
.. [3] Pascal Getreuer, "Rudin–Osher–Fatemi Total Variation Denoising
using Split Bregman" in Image Processing On Line on 2012–05–19,
https://www.ipol.im/pub/art/2012/g-tvd/article_lr.pdf
.. [4] https://web.math.ucsb.edu/~cgarcia/UGProjects/BregmanAlgorithms_JacquelineBush.pdf
"""
image = np.atleast_3d(img_as_float(image))
shape_ext = np.asarray(image.shape)
shape_ext[0:2] += 2
out = np.zeros(shape_ext, image.dtype)
if multichannel:
for c in range(image.shape[-1]):
if np.ndim(image) == 3:
channel_in = np.ascontiguousarray(image[..., c, None])
channel_out = np.ascontiguousarray(out[..., c, None])
_denoise_tv_bregman(channel_in, image.dtype.type(weight),
max_iter, eps, isotropic, channel_out)
out[..., c] = channel_out[..., -1]
else:
channel_in = np.ascontiguousarray(image[..., c])
channel_out = np.ascontiguousarray(out[..., c])
_denoise_tv_bregman(channel_in, image.dtype.type(weight),
max_iter, eps, isotropic, channel_out)
out[..., c] = channel_out
else:
image = np.ascontiguousarray(image)
_denoise_tv_bregman(image, image.dtype.type(weight), max_iter, eps,
isotropic, out)
return np.squeeze(out[1:-1, 1:-1])
|
17,333 |
def list_equiv(first, second):
equiv = True
if len(first) != len(second):
return False
else:
for i in range(len(first)):
equiv = equiv and equivalent(first[i], second[i])
return equiv
|
def list_equiv(first, second):
equiv = True
if len(first) != len(second):
return False
else:
for i in range(len(first)):
equiv = equiv and equivalent(f, s)
return equiv
|
46,627 |
def test_simple_gp_model_predict():
x = np.linspace(-1, 1, 3)[:, None]
y = x **2
gp = SimpleGaussianProcessModel(x, y)
mean, var = gp.predict(np.array(5, 1))
assert mean.shape == (5, 1)
assert var.shape == (5, 1)
|
def test_simple_gp_model_predict():
x = np.linspace(-1, 1, 3)[:, None]
y = x**2
gp = SimpleGaussianProcessModel(x, y)
mean, var = gp.predict(np.array(5, 1))
assert mean.shape == (5, 1)
assert var.shape == (5, 1)
|
45,630 |
def colors_array_track(data):
"""Replaces the "stain" provided by the dataset, with a RGB color that the
circos graph can display.
"""
data['color'] = data['color'].ffill()
data['color'] = data['color'].map(
{
"gneg": "rgb(173,6,6)",
"acen": "rgb(130,130,130)",
"n/a": "rgb(255,127,80)",
"gpos25": "rgb(153, 204, 255)",
"gpos100": "rgb(153, 255, 102)",
"gpos75": "rgb(102, 51, 0)",
"gpos50": "rgb(255, 0, 255)",
"gvar": "rgb(204, 153, 0)"
}
)
return data
|
def colors_array_track(data):
"""Replace the "stain" provided by the dataset with a RGB color that the
circos graph can display.
"""
data['color'] = data['color'].ffill()
data['color'] = data['color'].map(
{
"gneg": "rgb(173,6,6)",
"acen": "rgb(130,130,130)",
"n/a": "rgb(255,127,80)",
"gpos25": "rgb(153, 204, 255)",
"gpos100": "rgb(153, 255, 102)",
"gpos75": "rgb(102, 51, 0)",
"gpos50": "rgb(255, 0, 255)",
"gvar": "rgb(204, 153, 0)"
}
)
return data
|
3,920 |
def total_spanning_tree_weight(G, weight=None):
"""
Apply Kirchhoff's Tree Matrix Theorem to a graph in order to find the total
weight of all spanning trees.
The theorem states that the determinant of any cofactor of the Laplacian
matrix of a graph is the number of spanning trees in the graph. For a
weighted Laplacian matrix, it is the sum across all spanning trees of the
multiplicative weight of each tree. That is, the weight of each tree is the
product of its edge weights.
Parameters
----------
G : NetworkX Graph
The graph to use Kirchhoff's theorem on.
weight : string or None
The key for the edge attribute holding the edge weight. If `None`, then
each edge is assumed to have a weight of 1 and this function returns the
total number of spanning trees in `G`.
Returns
-------
float
The sum of the total multiplicative weight for all spanning trees in the
graph.
"""
import numpy as np
G_laplacian = nx.laplacian_matrix(G, weight=weight).toarray()
# Determinant ignoring first row and column
return abs(np.linalg.det(G_laplacian[1:, 1:]))
|
def total_spanning_tree_weight(G, weight=None):
"""
Apply Kirchhoff's Tree Matrix Theorem to a graph in order to find the total
weight of all spanning trees.
The theorem states that the determinant of any cofactor of the Laplacian
matrix of a graph is the number of spanning trees in the graph. For a
weighted Laplacian matrix, it is the sum across all spanning trees of the
multiplicative weight of each tree. That is, the weight of each tree is the
product of its edge weights.
Parameters
----------
G : NetworkX Graph
The graph to use Kirchhoff's theorem on.
weight : string or None
The key for the edge attribute holding the edge weight. If `None`, then
each edge is assumed to have a weight of 1 and this function returns the
total number of spanning trees in `G`.
Returns
-------
float
The sum of the total multiplicative weights for all spanning trees in `G`
"""
import numpy as np
G_laplacian = nx.laplacian_matrix(G, weight=weight).toarray()
# Determinant ignoring first row and column
return abs(np.linalg.det(G_laplacian[1:, 1:]))
|
42,007 |
def _interpolate_zmap(
zmap: Dict[complex, Union[int, float]], contour_plot_num: int
) -> Dict[complex, Union[int, float]]:
# implements interpolation algorithm used in Plotly
# to interpolate heatmaps and contour plots
# https://github.com/plotly/plotly.js/blob/master/src/traces/heatmap/interp2d.js#L30
# citing their doc:
#
# > Fill in missing data from a 2D array using an iterative
# > poisson equation solver with zero-derivative BC at edges.
# > Amazingly, this just amounts to repeatedly averaging all the existing
# > nearest neighbors
max_fractional_delta = 1.0
empties = _find_coordinates_where_empty(zmap, contour_plot_num)
# one pass to fill in a starting value for all the empties
zmap, _ = _run_iteration(zmap, empties)
for _ in range(NUM_OPTIMIZATION_ITERATIONS):
if max_fractional_delta > FRACTIONAL_DELTA_THRESHOLD:
# correct for overshoot and run again
max_fractional_delta = 0.5 - 0.25 * min(1, max_fractional_delta * 0.5)
zmatrix, max_fractional_delta = _run_iteration(zmap, empties, max_fractional_delta)
else:
break
return zmap
|
def _interpolate_zmap(
zmap: Dict[complex, Union[int, float]], contour_plot_num: int
) -> Dict[complex, Union[int, float]]:
# implements interpolation algorithm used in Plotly
# to interpolate heatmaps and contour plots
# https://github.com/plotly/plotly.js/blob/master/src/traces/heatmap/interp2d.js#L30
# citing their doc:
#
# > Fill in missing data from a 2D array using an iterative
# > poisson equation solver with zero-derivative BC at edges.
# > Amazingly, this just amounts to repeatedly averaging all the existing
# > nearest neighbors
max_fractional_delta = 1.0
empties = _find_coordinates_where_empty(zmap, contour_plot_num)
# one pass to fill in a starting value for all the empties
_run_iteration(zmap, empties)
for _ in range(NUM_OPTIMIZATION_ITERATIONS):
if max_fractional_delta > FRACTIONAL_DELTA_THRESHOLD:
# correct for overshoot and run again
max_fractional_delta = 0.5 - 0.25 * min(1, max_fractional_delta * 0.5)
zmatrix, max_fractional_delta = _run_iteration(zmap, empties, max_fractional_delta)
else:
break
return zmap
|
53,837 |
def load_arguments(self, _): # pylint: disable=too-many-locals, too-many-statements, too-many-lines
from argcomplete.completers import FilesCompleter
from six import u as unicode_string
from knack.arguments import ignore_type, CLIArgumentType
from azure.cli.core.commands.parameters import get_resource_name_completion_list
from .sdkutil import get_table_data_type
from .completers import get_storage_name_completion_list
t_base_blob_service = self.get_sdk('blob.baseblobservice#BaseBlobService')
t_file_service = self.get_sdk('file#FileService')
t_queue_service = self.get_sdk('queue#QueueService')
t_table_service = get_table_data_type(self.cli_ctx, 'table', 'TableService')
storage_account_type = CLIArgumentType(options_list='--storage-account',
help='The name or ID of the storage account.',
validator=parse_storage_account, id_part='name')
acct_name_type = CLIArgumentType(options_list=['--account-name', '-n'], help='The storage account name.',
id_part='name',
completer=get_resource_name_completion_list('Microsoft.Storage/storageAccounts'),
local_context_attribute=LocalContextAttribute(
name='storage_account_name', actions=[LocalContextAction.GET]))
blob_name_type = CLIArgumentType(options_list=['--blob-name', '-b'], help='The blob name.',
completer=get_storage_name_completion_list(t_base_blob_service, 'list_blobs',
parent='container_name'))
container_name_type = CLIArgumentType(options_list=['--container-name', '-c'], help='The container name.',
completer=get_storage_name_completion_list(t_base_blob_service,
'list_containers'))
directory_type = CLIArgumentType(options_list=['--directory-name', '-d'], help='The directory name.',
completer=get_storage_name_completion_list(t_file_service,
'list_directories_and_files',
parent='share_name'))
file_name_type = CLIArgumentType(options_list=['--file-name', '-f'],
completer=get_storage_name_completion_list(t_file_service,
'list_directories_and_files',
parent='share_name'))
share_name_type = CLIArgumentType(options_list=['--share-name', '-s'], help='The file share name.',
completer=get_storage_name_completion_list(t_file_service, 'list_shares'))
table_name_type = CLIArgumentType(options_list=['--table-name', '-t'],
completer=get_storage_name_completion_list(t_table_service, 'list_tables'))
queue_name_type = CLIArgumentType(options_list=['--queue-name', '-q'], help='The queue name.',
completer=get_storage_name_completion_list(t_queue_service, 'list_queues'))
progress_type = CLIArgumentType(help='Include this flag to disable progress reporting for the command.',
action='store_true', validator=add_progress_callback)
socket_timeout_type = CLIArgumentType(help='The socket timeout(secs), used by the service to regulate data flow.',
type=int)
num_results_type = CLIArgumentType(
default=5000, help='Specifies the maximum number of results to return. Provide "*" to return all.',
validator=validate_storage_data_plane_list)
large_file_share_type = CLIArgumentType(
action='store_true', min_api='2019-04-01',
help='Enable the capability to support large file shares with more than 5 TiB capacity for storage account.'
'Once the property is enabled, the feature cannot be disabled. Currently only supported for LRS and '
'ZRS replication types, hence account conversions to geo-redundant accounts would not be possible. '
'For more information, please refer to https://go.microsoft.com/fwlink/?linkid=2086047.')
adds_type = CLIArgumentType(arg_type=get_three_state_flag(), min_api='2019-04-01',
help='Enable Azure Files Active Directory Domain Service Authentication for '
'storage account. When --enable-files-adds is set to true, Azure Active '
'Directory Properties arguments must be provided.')
aadds_type = CLIArgumentType(arg_type=get_three_state_flag(), min_api='2018-11-01',
help='Enable Azure Active Directory Domain Services authentication for Azure Files')
domain_name_type = CLIArgumentType(min_api='2019-04-01', arg_group="Azure Active Directory Properties",
help="Specify the primary domain that the AD DNS server is authoritative for. "
"Required when --enable-files-adds is set to True")
net_bios_domain_name_type = CLIArgumentType(min_api='2019-04-01', arg_group="Azure Active Directory Properties",
help="Specify the NetBIOS domain name. "
"Required when --enable-files-adds is set to True")
forest_name_type = CLIArgumentType(min_api='2019-04-01', arg_group="Azure Active Directory Properties",
help="Specify the Active Directory forest to get. "
"Required when --enable-files-adds is set to True")
domain_guid_type = CLIArgumentType(min_api='2019-04-01', arg_group="Azure Active Directory Properties",
help="Specify the domain GUID. Required when --enable-files-adds is set to True")
domain_sid_type = CLIArgumentType(min_api='2019-04-01', arg_group="Azure Active Directory Properties",
help="Specify the security identifier (SID). Required when --enable-files-adds "
"is set to True")
azure_storage_sid_type = CLIArgumentType(min_api='2019-04-01', arg_group="Azure Active Directory Properties",
help="Specify the security identifier (SID) for Azure Storage. "
"Required when --enable-files-adds is set to True")
exclude_pattern_type = CLIArgumentType(arg_group='Additional Flags', help='Exclude these files where the name '
'matches the pattern list. For example: *.jpg;*.pdf;exactName. This '
'option supports wildcard characters (*)')
include_pattern_type = CLIArgumentType(arg_group='Additional Flags', help='Include only these files where the name '
'matches the pattern list. For example: *.jpg;*.pdf;exactName. This '
'option supports wildcard characters (*)')
exclude_path_type = CLIArgumentType(arg_group='Additional Flags', help='Exclude these paths. This option does not '
'support wildcard characters (*). Checks relative path prefix. For example: '
'myFolder;myFolder/subDirName/file.pdf.')
include_path_type = CLIArgumentType(arg_group='Additional Flags', help='Include only these paths. This option does '
'not support wildcard characters (*). Checks relative path prefix. For example:'
'myFolder;myFolder/subDirName/file.pdf')
recursive_type = CLIArgumentType(options_list=['--recursive', '-r'], action='store_true',
help='Look into sub-directories recursively.')
sas_help = 'The permissions the SAS grants. Allowed values: {}. Do not use if a stored access policy is ' \
'referenced with --id that specifies this value. Can be combined.'
t_routing_choice = self.get_models('RoutingChoice', resource_type=ResourceType.MGMT_STORAGE)
routing_choice_type = CLIArgumentType(
arg_group='Routing Preference', arg_type=get_enum_type(t_routing_choice),
help='Routing Choice defines the kind of network routing opted by the user.',
is_preview=True, min_api='2019-06-01')
publish_microsoft_endpoints_type = CLIArgumentType(
arg_group='Routing Preference', arg_type=get_three_state_flag(), is_preview=True, min_api='2019-06-01',
help='A boolean flag which indicates whether microsoft routing storage endpoints are to be published.')
publish_internet_endpoints_type = CLIArgumentType(
arg_group='Routing Preference', arg_type=get_three_state_flag(), is_preview=True, min_api='2019-06-01',
help='A boolean flag which indicates whether internet routing storage endpoints are to be published.')
umask_type = CLIArgumentType(
help='When creating a file or directory and the parent folder does not have a default ACL, the umask restricts '
'the permissions of the file or directory to be created. The resulting permission is given by p & ^u, '
'where p is the permission and u is the umask. For more information, please refer to '
'https://docs.microsoft.com/en-us/azure/storage/blobs/data-lake-storage-access-control#umask.')
permissions_type = CLIArgumentType(
help='POSIX access permissions for the file owner, the file owning group, and others. Each class may be '
'granted read, write, or execute permission. The sticky bit is also supported. Both symbolic (rwxrw-rw-) '
'and 4-digit octal notation (e.g. 0766) are supported. For more information, please refer to https://'
'docs.microsoft.com/en-us/azure/storage/blobs/data-lake-storage-access-control#levels-of-permission.')
timeout_type = CLIArgumentType(
help='Request timeout in seconds. Applies to each call to the service.', type=int
)
with self.argument_context('storage') as c:
c.argument('container_name', container_name_type)
c.argument('directory_name', directory_type)
c.argument('share_name', share_name_type)
c.argument('table_name', table_name_type)
c.argument('retry_wait', options_list=('--retry-interval',))
c.ignore('progress_callback')
c.argument('metadata', nargs='+',
help='Metadata in space-separated key=value pairs. This overwrites any existing metadata.',
validator=validate_metadata)
c.argument('timeout', help='Request timeout in seconds. Applies to each call to the service.', type=int)
with self.argument_context('storage', arg_group='Precondition') as c:
c.argument('if_modified_since',
help='Commence only if modified since supplied UTC datetime (Y-m-d\'T\'H:M\'Z\')',
type=get_datetime_type(False))
c.argument('if_unmodified_since',
help='Commence only if unmodified since supplied UTC datetime (Y-m-d\'T\'H:M\'Z\')',
type=get_datetime_type(False))
c.argument('if_match')
c.argument('if_none_match')
for item in ['delete', 'show', 'update', 'show-connection-string', 'keys', 'network-rule', 'revoke-delegation-keys', 'failover']: # pylint: disable=line-too-long
with self.argument_context('storage account {}'.format(item)) as c:
c.argument('account_name', acct_name_type, options_list=['--name', '-n'])
c.argument('resource_group_name', required=False, validator=process_resource_group)
with self.argument_context('storage account check-name') as c:
c.argument('name', options_list=['--name', '-n'])
with self.argument_context('storage account delete') as c:
c.argument('account_name', acct_name_type, options_list=['--name', '-n'], local_context_attribute=None)
with self.argument_context('storage account create', resource_type=ResourceType.MGMT_STORAGE) as c:
t_account_type, t_sku_name, t_kind = self.get_models('AccountType', 'SkuName', 'Kind',
resource_type=ResourceType.MGMT_STORAGE)
c.register_common_storage_account_options()
c.argument('location', get_location_type(self.cli_ctx), validator=get_default_location_from_resource_group)
c.argument('account_type', help='The storage account type', arg_type=get_enum_type(t_account_type))
c.argument('account_name', acct_name_type, options_list=['--name', '-n'], completer=None,
local_context_attribute=LocalContextAttribute(
name='storage_account_name', actions=[LocalContextAction.SET], scopes=[ALL]))
c.argument('kind', help='Indicates the type of storage account.', min_api="2018-02-01",
arg_type=get_enum_type(t_kind), default='StorageV2')
c.argument('kind', help='Indicates the type of storage account.', max_api="2017-10-01",
arg_type=get_enum_type(t_kind), default='Storage')
c.argument('https_only', arg_type=get_three_state_flag(), min_api='2019-04-01',
help='Allow https traffic only to storage service if set to true. The default value is true.')
c.argument('https_only', arg_type=get_three_state_flag(), max_api='2018-11-01',
help='Allow https traffic only to storage service if set to true. The default value is false.')
c.argument('tags', tags_type)
c.argument('custom_domain', help='User domain assigned to the storage account. Name is the CNAME source.')
c.argument('sku', help='The storage account SKU.', arg_type=get_enum_type(t_sku_name, default='standard_ragrs'))
c.argument('enable_files_aadds', aadds_type)
c.argument('enable_files_adds', adds_type)
c.argument('enable_large_file_share', arg_type=large_file_share_type)
c.argument('domain_name', domain_name_type)
c.argument('net_bios_domain_name', net_bios_domain_name_type)
c.argument('forest_name', forest_name_type)
c.argument('domain_guid', domain_guid_type)
c.argument('domain_sid', domain_sid_type)
c.argument('azure_storage_sid', azure_storage_sid_type)
c.argument('enable_hierarchical_namespace', arg_type=get_three_state_flag(),
options_list=['--enable-hierarchical-namespace', '--hns'],
help=" Allow the blob service to exhibit filesystem semantics. This property can be enabled only "
"when storage account kind is StorageV2.",
min_api='2018-02-01')
c.argument('encryption_key_type_for_table', arg_type=get_enum_type(['Account', 'Service']),
help='Set the encryption key type for Table service. "Account": Table will be encrypted '
'with account-scoped encryption key. "Service": Table will always be encrypted with '
'service-scoped keys. Currently the default encryption key type is "Service".',
min_api='2019-06-01', options_list=['--encryption-key-type-for-table', '-t'])
c.argument('encryption_key_type_for_queue', arg_type=get_enum_type(['Account', 'Service']),
help='Set the encryption key type for Queue service. "Account": Queue will be encrypted '
'with account-scoped encryption key. "Service": Queue will always be encrypted with '
'service-scoped keys. Currently the default encryption key type is "Service".',
min_api='2019-06-01', options_list=['--encryption-key-type-for-queue', '-q'])
c.argument('routing_choice', routing_choice_type)
c.argument('publish_microsoft_endpoints', publish_microsoft_endpoints_type)
c.argument('publish_internet_endpoints', publish_internet_endpoints_type)
with self.argument_context('storage account private-endpoint-connection',
resource_type=ResourceType.MGMT_STORAGE) as c:
c.argument('private_endpoint_connection_name', options_list=['--name', '-n'],
help='The name of the private endpoint connection associated with the Storage Account.')
for item in ['approve', 'reject', 'show', 'delete']:
with self.argument_context('storage account private-endpoint-connection {}'.format(item),
resource_type=ResourceType.MGMT_STORAGE) as c:
c.argument('private_endpoint_connection_name', options_list=['--name', '-n'], required=False,
help='The name of the private endpoint connection associated with the Storage Account.')
c.extra('connection_id', options_list=['--id'],
help='The ID of the private endpoint connection associated with the Storage Account. You can get '
'it using `az storage account show`.')
c.argument('account_name', help='The storage account name.', required=False)
c.argument('resource_group_name', help='The resource group name of specified storage account.',
required=False)
c.argument('description', help='Comments for {} operation.'.format(item))
with self.argument_context('storage account update', resource_type=ResourceType.MGMT_STORAGE) as c:
c.register_common_storage_account_options()
c.argument('custom_domain',
help='User domain assigned to the storage account. Name is the CNAME source. Use "" to clear '
'existing value.',
validator=validate_custom_domain)
c.argument('use_subdomain', help='Specify whether to use indirect CNAME validation.',
arg_type=get_enum_type(['true', 'false']))
c.argument('tags', tags_type, default=None)
c.argument('enable_files_aadds', aadds_type)
c.argument('enable_files_adds', adds_type)
c.argument('enable_large_file_share', arg_type=large_file_share_type)
c.argument('domain_name', domain_name_type)
c.argument('net_bios_domain_name', net_bios_domain_name_type)
c.argument('forest_name', forest_name_type)
c.argument('domain_guid', domain_guid_type)
c.argument('domain_sid', domain_sid_type)
c.argument('azure_storage_sid', azure_storage_sid_type)
c.argument('routing_choice', routing_choice_type)
c.argument('publish_microsoft_endpoints', publish_microsoft_endpoints_type)
c.argument('publish_internet_endpoints', publish_internet_endpoints_type)
with self.argument_context('storage account update', arg_group='Customer managed key', min_api='2017-06-01') as c:
t_key_source = self.get_models('KeySource', resource_type=ResourceType.MGMT_STORAGE)
c.argument('encryption_key_name', help='The name of the KeyVault key.', )
c.argument('encryption_key_vault', help='The Uri of the KeyVault.')
c.argument('encryption_key_version',
help='The version of the KeyVault key to use, which will opt out of implicit key rotation. '
'Please use "" to opt in key auto-rotation again.')
c.argument('encryption_key_source',
arg_type=get_enum_type(t_key_source),
help='The default encryption key source',
validator=validate_encryption_source)
for scope in ['storage account create', 'storage account update']:
with self.argument_context(scope, resource_type=ResourceType.MGMT_STORAGE, min_api='2017-06-01',
arg_group='Network Rule') as c:
t_bypass, t_default_action = self.get_models('Bypass', 'DefaultAction',
resource_type=ResourceType.MGMT_STORAGE)
c.argument('bypass', nargs='+', validator=validate_bypass, arg_type=get_enum_type(t_bypass),
help='Bypass traffic for space-separated uses.')
c.argument('default_action', arg_type=get_enum_type(t_default_action),
help='Default action to apply when no rule matches.')
with self.argument_context('storage account show-connection-string') as c:
c.argument('protocol', help='The default endpoint protocol.', arg_type=get_enum_type(['http', 'https']))
c.argument('sas_token', help='The SAS token to be used in the connection-string.')
c.argument('key_name', options_list=['--key'], help='The key to use.',
arg_type=get_enum_type(list(storage_account_key_options.keys())))
for item in ['blob', 'file', 'queue', 'table']:
c.argument('{}_endpoint'.format(item), help='Custom endpoint for {}s.'.format(item))
with self.argument_context('storage account encryption-scope') as c:
c.argument('account_name', help='The storage account name.')
c.argument('resource_group_name', validator=process_resource_group, required=False)
c.argument('encryption_scope_name', options_list=['--name', '-n'],
help='The name of the encryption scope within the specified storage account.')
for scope in ['storage account encryption-scope create', 'storage account encryption-scope update']:
with self.argument_context(scope, resource_type=ResourceType.MGMT_STORAGE) as c:
from ._validators import validate_encryption_key
t_encryption_key_source = self.get_models('EncryptionScopeSource', resource_type=ResourceType.MGMT_STORAGE)
c.argument('key_source', options_list=['-s', '--key-source'],
arg_type=get_enum_type(t_encryption_key_source, default="Microsoft.Storage"),
help='The provider for the encryption scope.', validator=validate_encryption_key)
c.argument('key_uri', options_list=['-u', '--key-uri'],
help='The object identifier for a key vault key object. When applied, the encryption scope will '
'use the key referenced by the identifier to enable customer-managed key support on this '
'encryption scope.')
with self.argument_context('storage account encryption-scope update') as c:
t_state = self.get_models("EncryptionScopeState", resource_type=ResourceType.MGMT_STORAGE)
c.argument('key_source', options_list=['-s', '--key-source'],
arg_type=get_enum_type(t_encryption_key_source),
help='The provider for the encryption scope.', validator=validate_encryption_key)
c.argument('state', arg_type=get_enum_type(t_state),
help='Change the state the encryption scope. When disabled, '
'all blob read/write operations using this encryption scope will fail.')
with self.argument_context('storage account keys list', resource_type=ResourceType.MGMT_STORAGE) as c:
t_expand_key_type = self.get_models('ListKeyExpand', resource_type=ResourceType.MGMT_STORAGE)
c.argument("expand", options_list=['--expand-key-type'], help='Specify the expanded key types to be listed.',
arg_type=get_enum_type(t_expand_key_type), min_api='2019-04-01', is_preview=True)
with self.argument_context('storage account keys renew', resource_type=ResourceType.MGMT_STORAGE) as c:
c.argument('key_name', options_list=['--key'], help='The key options to regenerate.',
arg_type=get_enum_type(list(storage_account_key_options.keys())))
c.extra('key_type', help='The key type to regenerate. If --key-type is not specified, one of access keys will '
'be regenerated by default.', arg_type=get_enum_type(['kerb']), min_api='2019-04-01')
c.argument('account_name', acct_name_type, id_part=None)
with self.argument_context('storage account management-policy create') as c:
c.argument('policy', type=file_type, completer=FilesCompleter(),
help='The Storage Account ManagementPolicies Rules, in JSON format. See more details in: '
'https://docs.microsoft.com/azure/storage/common/storage-lifecycle-managment-concepts.')
c.argument('account_name', help='The name of the storage account within the specified resource group.')
with self.argument_context('storage account management-policy update') as c:
c.argument('account_name', help='The name of the storage account within the specified resource group.')
with self.argument_context('storage account keys list') as c:
c.argument('account_name', acct_name_type, id_part=None)
with self.argument_context('storage account network-rule') as c:
from ._validators import validate_subnet
c.argument('account_name', acct_name_type, id_part=None)
c.argument('ip_address', help='IPv4 address or CIDR range.')
c.argument('subnet', help='Name or ID of subnet. If name is supplied, `--vnet-name` must be supplied.')
c.argument('vnet_name', help='Name of a virtual network.', validator=validate_subnet)
c.argument('action', help='The action of virtual network rule.')
with self.argument_context('storage account blob-service-properties show',
resource_type=ResourceType.MGMT_STORAGE) as c:
c.argument('account_name', acct_name_type, id_part=None)
c.argument('resource_group_name', required=False, validator=process_resource_group)
with self.argument_context('storage account blob-service-properties update',
resource_type=ResourceType.MGMT_STORAGE) as c:
c.argument('account_name', acct_name_type, id_part=None)
c.argument('resource_group_name', required=False, validator=process_resource_group)
c.argument('enable_change_feed', arg_type=get_three_state_flag(), min_api='2019-04-01')
c.argument('enable_delete_retention', arg_type=get_three_state_flag(), arg_group='Delete Retention Policy',
min_api='2018-07-01')
c.argument('delete_retention_days', type=int, arg_group='Delete Retention Policy',
validator=validator_delete_retention_days, min_api='2018-07-01')
c.argument('enable_restore_policy', arg_type=get_three_state_flag(), arg_group='Restore Policy',
min_api='2019-06-01', help="Enable blob restore policy when it set to true.")
c.argument('restore_days', type=int, arg_group='Restore Policy',
min_api='2019-06-01', help="The number of days for the blob can be restored. It should be greater "
"than zero and less than Delete Retention Days.")
c.argument('enable_versioning', arg_type=get_three_state_flag(), help='Versioning is enabled if set to true.',
min_api='2019-06-01')
with self.argument_context('storage account file-service-properties show',
resource_type=ResourceType.MGMT_STORAGE) as c:
c.argument('account_name', acct_name_type, id_part=None)
c.argument('resource_group_name', required=False, validator=process_resource_group)
with self.argument_context('storage account file-service-properties update',
resource_type=ResourceType.MGMT_STORAGE) as c:
c.argument('account_name', acct_name_type, id_part=None)
c.argument('resource_group_name', required=False, validator=process_resource_group)
c.argument('enable_delete_retention', arg_type=get_three_state_flag(), arg_group='Delete Retention Policy',
min_api='2019-06-01', help='Enable file service properties for share soft delete.')
c.argument('delete_retention_days', type=int, arg_group='Delete Retention Policy',
validator=validate_delete_retention_days, min_api='2019-06-01',
help=' Indicate the number of days that the deleted item should be retained. The minimum specified '
'value can be 1 and the maximum value can be 365.')
with self.argument_context('storage account generate-sas') as c:
t_account_permissions = self.get_sdk('common.models#AccountPermissions')
c.register_sas_arguments()
c.argument('services', type=services_type(self))
c.argument('resource_types', type=resource_type_type(self))
c.argument('expiry', type=get_datetime_type(True))
c.argument('start', type=get_datetime_type(True))
c.argument('account_name', acct_name_type, options_list=['--account-name'])
c.argument('permission', options_list=('--permissions',),
help='The permissions the SAS grants. Allowed values: {}. Can be combined.'.format(
get_permission_help_string(t_account_permissions)),
validator=get_permission_validator(t_account_permissions))
c.ignore('sas_token')
for item in ['show', 'off']:
with self.argument_context('storage logging {}'.format(item)) as c:
c.extra('services', validator=get_char_options_validator('bqt', 'services'), default='bqt')
with self.argument_context('storage logging update') as c:
c.extra('services', validator=get_char_options_validator('bqt', 'services'), options_list='--services',
required=True)
c.argument('log', validator=get_char_options_validator('rwd', 'log'))
c.argument('retention', type=int)
c.argument('version', type=float, validator=validate_logging_version)
with self.argument_context('storage metrics show') as c:
c.extra('services', validator=get_char_options_validator('bfqt', 'services'), default='bfqt')
c.argument('interval', arg_type=get_enum_type(['hour', 'minute', 'both']))
with self.argument_context('storage metrics update') as c:
c.extra('services', validator=get_char_options_validator('bfqt', 'services'), options_list='--services',
required=True)
c.argument('hour', validator=process_metric_update_namespace, arg_type=get_enum_type(['true', 'false']))
c.argument('minute', arg_type=get_enum_type(['true', 'false']))
c.argument('api', arg_type=get_enum_type(['true', 'false']))
c.argument('retention', type=int)
with self.argument_context('storage blob') as c:
c.argument('blob_name', options_list=('--name', '-n'), arg_type=blob_name_type)
c.argument('destination_path', help='The destination path that will be appended to the blob name.')
with self.argument_context('storage blob list') as c:
c.argument('include', validator=validate_included_datasets)
c.argument('num_results', arg_type=num_results_type)
with self.argument_context('storage blob generate-sas') as c:
from .completers import get_storage_acl_name_completion_list
t_blob_permissions = self.get_sdk('blob.models#BlobPermissions')
c.register_sas_arguments()
c.argument('cache_control', help='Response header value for Cache-Control when resource is accessed'
'using this shared access signature.')
c.argument('content_disposition', help='Response header value for Content-Disposition when resource is accessed'
'using this shared access signature.')
c.argument('content_encoding', help='Response header value for Content-Encoding when resource is accessed'
'using this shared access signature.')
c.argument('content_language', help='Response header value for Content-Language when resource is accessed'
'using this shared access signature.')
c.argument('content_type', help='Response header value for Content-Type when resource is accessed'
'using this shared access signature.')
c.argument('full_uri', action='store_true',
help='Indicates that this command return the full blob URI and the shared access signature token.')
c.argument('as_user', min_api='2018-11-09', action='store_true',
validator=as_user_validator,
help="Indicates that this command return the SAS signed with the user delegation key. "
"The expiry parameter and '--auth-mode login' are required if this argument is specified. ")
c.argument('id', options_list='--policy-name',
help='The name of a stored access policy within the container\'s ACL.',
completer=get_storage_acl_name_completion_list(t_base_blob_service, 'container_name',
'get_container_acl'))
c.argument('permission', options_list='--permissions',
help=sas_help.format(get_permission_help_string(t_blob_permissions)),
validator=get_permission_validator(t_blob_permissions))
c.ignore('sas_token')
with self.argument_context('storage blob restore', resource_type=ResourceType.MGMT_STORAGE) as c:
from ._validators import BlobRangeAddAction
c.argument('blob_ranges', options_list=['--blob-range', '-r'], action=BlobRangeAddAction, nargs='+',
help='Blob ranges to restore. You need to two values to specify start_range and end_range for each '
'blob range, e.g. -r blob1 blob2. Note: Empty means account start as start range value, and '
'means account end for end range.')
c.argument('account_name', acct_name_type, id_part=None)
c.argument('resource_group_name', required=False, validator=process_resource_group)
c.argument('time_to_restore', type=get_datetime_type(True), options_list=['--time-to-restore', '-t'],
help='Restore blob to the specified time, which should be UTC datetime in (Y-m-d\'T\'H:M:S\'Z\').')
with self.argument_context('storage blob update') as c:
t_blob_content_settings = self.get_sdk('blob.models#ContentSettings')
c.register_content_settings_argument(t_blob_content_settings, update=True)
with self.argument_context('storage blob exists') as c:
c.argument('blob_name', required=True)
with self.argument_context('storage blob url') as c:
c.argument('protocol', arg_type=get_enum_type(['http', 'https'], 'https'), help='Protocol to use.')
c.argument('snapshot', help='An string value that uniquely identifies the snapshot. The value of'
'this query parameter indicates the snapshot version.')
with self.argument_context('storage blob set-tier') as c:
from azure.cli.command_modules.storage._validators import (blob_tier_validator,
blob_rehydrate_priority_validator)
c.argument('container_name', container_name_type)
c.argument('blob_name', options_list=('--name', '-n'), help="The blob name")
c.argument('blob_type', options_list=('--type', '-t'), arg_type=get_enum_type(('block', 'page')))
c.argument('tier', validator=blob_tier_validator)
c.argument('timeout', type=int)
c.argument('rehydrate_priority', options_list=('--rehydrate-priority', '-r'),
arg_type=get_enum_type(('High', 'Standard')), validator=blob_rehydrate_priority_validator,
help="Indicates the priority with which to rehydrate an archived blob. "
"The priority can be set on a blob only once, default value is Standard.")
with self.argument_context('storage blob service-properties delete-policy update') as c:
c.argument('enable', arg_type=get_enum_type(['true', 'false']), help='Enables/disables soft-delete.')
c.argument('days_retained', type=int,
help='Number of days that soft-deleted blob will be retained. Must be in range [1,365].')
with self.argument_context('storage blob service-properties update', min_api='2018-03-28') as c:
c.argument('delete_retention', arg_type=get_three_state_flag(), arg_group='Soft Delete',
help='Enables soft-delete.')
c.argument('delete_retention_period', type=int, arg_group='Soft Delete',
help='Number of days that soft-deleted blob will be retained. Must be in range [1,365].')
c.argument('static_website', arg_group='Static Website', arg_type=get_three_state_flag(),
help='Enables static-website.')
c.argument('index_document', help='Represents the name of the index document. This is commonly "index.html".',
arg_group='Static Website')
c.argument('error_document_404_path', options_list=['--404-document'], arg_group='Static Website',
help='Represents the path to the error document that should be shown when an error 404 is issued,'
' in other words, when a browser requests a page that does not exist.')
with self.argument_context('storage blob show') as c:
c.argument('lease_id', help='Required if the blob has an active lease.')
c.argument('snapshot', help='The snapshot parameter is an opaque DateTime value that, when present, '
'specifies the blob snapshot to retrieve.')
c.argument('if_match', help="An ETag value, or the wildcard character (*). Specify this header to perform "
"the operation only if the resource's ETag matches the value specified.")
c.argument('if_none_match', help="An ETag value, or the wildcard character (*). Specify this header to perform "
"the operation only if the resource's ETag does not match the value specified."
" Specify the wildcard character (*) to perform the operation only if the "
"resource does not exist, and fail the operation if it does exist.")
with self.argument_context('storage blob upload') as c:
from ._validators import page_blob_tier_validator, validate_encryption_scope_client_params
from .sdkutil import get_blob_types, get_blob_tier_names
t_blob_content_settings = self.get_sdk('blob.models#ContentSettings')
c.register_content_settings_argument(t_blob_content_settings, update=False)
c.argument('file_path', options_list=('--file', '-f'), type=file_type, completer=FilesCompleter())
c.argument('max_connections', type=int)
c.argument('blob_type', options_list=('--type', '-t'), validator=validate_blob_type,
arg_type=get_enum_type(get_blob_types()))
c.argument('validate_content', action='store_true', min_api='2016-05-31')
c.extra('no_progress', progress_type)
c.extra('socket_timeout', socket_timeout_type)
# TODO: Remove once #807 is complete. Smart Create Generation requires this parameter.
# register_extra_cli_argument('storage blob upload', '_subscription_id', options_list=('--subscription',),
# help=argparse.SUPPRESS)
c.argument('tier', validator=page_blob_tier_validator,
arg_type=get_enum_type(get_blob_tier_names(self.cli_ctx, 'PremiumPageBlobTier')),
min_api='2017-04-17')
c.argument('encryption_scope', validator=validate_encryption_scope_client_params,
help='A predefined encryption scope used to encrypt the data on the service.')
with self.argument_context('storage blob upload-batch') as c:
from .sdkutil import get_blob_types
t_blob_content_settings = self.get_sdk('blob.models#ContentSettings')
c.register_content_settings_argument(t_blob_content_settings, update=False, arg_group='Content Control')
c.ignore('source_files', 'destination_container_name')
c.argument('source', options_list=('--source', '-s'))
c.argument('destination', options_list=('--destination', '-d'))
c.argument('max_connections', type=int,
help='Maximum number of parallel connections to use when the blob size exceeds 64MB.')
c.argument('maxsize_condition', arg_group='Content Control')
c.argument('validate_content', action='store_true', min_api='2016-05-31', arg_group='Content Control')
c.argument('blob_type', options_list=('--type', '-t'), arg_type=get_enum_type(get_blob_types()))
c.extra('no_progress', progress_type)
c.extra('socket_timeout', socket_timeout_type)
with self.argument_context('storage blob download') as c:
c.argument('file_path', options_list=('--file', '-f'), type=file_type, completer=FilesCompleter())
c.argument('max_connections', type=int)
c.argument('start_range', type=int)
c.argument('end_range', type=int)
c.argument('validate_content', action='store_true', min_api='2016-05-31')
c.extra('no_progress', progress_type)
c.extra('socket_timeout', socket_timeout_type)
with self.argument_context('storage blob download-batch') as c:
c.ignore('source_container_name')
c.argument('destination', options_list=('--destination', '-d'))
c.argument('source', options_list=('--source', '-s'))
c.extra('no_progress', progress_type)
c.extra('socket_timeout', socket_timeout_type)
c.argument('max_connections', type=int,
help='Maximum number of parallel connections to use when the blob size exceeds 64MB.')
with self.argument_context('storage blob delete') as c:
from .sdkutil import get_delete_blob_snapshot_type_names
c.argument('delete_snapshots', arg_type=get_enum_type(get_delete_blob_snapshot_type_names()))
with self.argument_context('storage blob delete-batch') as c:
c.ignore('source_container_name')
c.argument('source', options_list=('--source', '-s'))
c.argument('delete_snapshots', arg_type=get_enum_type(get_delete_blob_snapshot_type_names()),
help='Required if the blob has associated snapshots.')
c.argument('lease_id', help='The active lease id for the blob.')
with self.argument_context('storage blob lease') as c:
c.argument('lease_duration', type=int)
c.argument('lease_break_period', type=int)
c.argument('blob_name', arg_type=blob_name_type)
with self.argument_context('storage copy') as c:
c.argument('destination', options_list=['--destination', '-d'], help="The path/url of copy destination. "
"It can be a local path, an url to azure storage server. If you provide destination parameter "
"here, you do not need to provide arguments in copy destination arguments group and copy "
"destination arguments will be deprecated in future.")
c.argument('source', options_list=['--source', '-s'], help="The path/url of copy source. It can be a local"
" path, an url to azure storage server or AWS S3 buckets. If you provide source parameter here,"
" you do not need to provide arguments in copy source arguments group and copy source arguments"
" will be deprecated in future.")
for item in ['destination', 'source']:
c.argument('{}_account_name'.format(item), arg_group='Copy {}'.format(item),
help='Storage account name of copy {}'.format(item))
c.argument('{}_container'.format(item), arg_group='Copy {}'.format(item),
help='Container name of copy {} storage account'.format(item))
c.argument('{}_blob'.format(item), arg_group='Copy {}'.format(item),
help='Blob name in blob container of copy {} storage account'.format(item))
c.argument('{}_share'.format(item), arg_group='Copy {}'.format(item),
help='File share name of copy {} storage account'.format(item))
c.argument('{}_file_path'.format(item), arg_group='Copy {}'.format(item),
help='File path in file share of copy {} storage account'.format(item))
c.argument('{}_local_path'.format(item), arg_group='Copy {}'.format(item),
help='Local file path')
c.argument('put_md5', arg_group='Additional Flags', action='store_true',
help='Create an MD5 hash of each file, and save the hash as the Content-MD5 property of the '
'destination blob/file.Only available when uploading.')
c.argument('blob_type', arg_group='Additional Flags',
arg_type=get_enum_type(["BlockBlob", "PageBlob", "AppendBlob"]),
help='The type of blob at the destination.')
c.argument('preserve_s2s_access_tier', arg_group='Additional Flags', arg_type=get_three_state_flag(),
help='Preserve access tier during service to service copy. '
'Please refer to https://docs.microsoft.com/en-us/azure/storage/blobs/storage-blob-storage-tiers '
'to ensure destination storage account support setting access tier. In the cases that setting '
'access tier is not supported, please use `--preserve-s2s-access-tier false` to bypass copying '
'access tier. (Default true)')
c.argument('exclude_pattern', exclude_pattern_type)
c.argument('include_pattern', include_pattern_type)
c.argument('exclude_path', exclude_path_type)
c.argument('include_path', include_path_type)
c.argument('recursive', recursive_type)
c.argument('content_type', arg_group='Additional Flags', help="Specify content type of the file. ")
c.argument('follow_symlinks', arg_group='Additional Flags', action='store_true',
help='Follow symbolic links when uploading from local file system.')
with self.argument_context('storage blob copy') as c:
for item in ['destination', 'source']:
c.argument('{}_if_modified_since'.format(item), arg_group='Pre-condition')
c.argument('{}_if_unmodified_since'.format(item), arg_group='Pre-condition')
c.argument('{}_if_match'.format(item), arg_group='Pre-condition')
c.argument('{}_if_none_match'.format(item), arg_group='Pre-condition')
c.argument('container_name', container_name_type, options_list=('--destination-container', '-c'))
c.argument('blob_name', blob_name_type, options_list=('--destination-blob', '-b'),
help='Name of the destination blob. If the exists, it will be overwritten.')
c.argument('source_lease_id', arg_group='Copy Source')
with self.argument_context('storage blob copy start') as c:
from azure.cli.command_modules.storage._validators import validate_source_uri
c.register_source_uri_arguments(validator=validate_source_uri)
with self.argument_context('storage blob copy start-batch', arg_group='Copy Source') as c:
from azure.cli.command_modules.storage._validators import get_source_file_or_blob_service_client
c.argument('source_client', ignore_type, validator=get_source_file_or_blob_service_client)
c.extra('source_account_name')
c.extra('source_account_key')
c.extra('source_uri')
c.argument('source_sas')
c.argument('source_container')
c.argument('source_share')
with self.argument_context('storage blob incremental-copy start') as c:
from azure.cli.command_modules.storage._validators import process_blob_source_uri
c.register_source_uri_arguments(validator=process_blob_source_uri, blob_only=True)
c.argument('destination_if_modified_since', arg_group='Pre-condition')
c.argument('destination_if_unmodified_since', arg_group='Pre-condition')
c.argument('destination_if_match', arg_group='Pre-condition')
c.argument('destination_if_none_match', arg_group='Pre-condition')
c.argument('container_name', container_name_type, options_list=('--destination-container', '-c'))
c.argument('blob_name', blob_name_type, options_list=('--destination-blob', '-b'),
help='Name of the destination blob. If the exists, it will be overwritten.')
c.argument('source_lease_id', arg_group='Copy Source')
with self.argument_context('storage blob sync') as c:
c.extra('destination_container', options_list=['--container', '-c'], required=True,
help='The sync destination container.')
c.extra('destination_path', options_list=['--destination', '-d'],
validator=validate_azcopy_upload_destination_url,
help='The sync destination path.')
c.argument('source', options_list=['--source', '-s'],
help='The source file path to sync from.')
c.ignore('destination')
c.argument('exclude_pattern', exclude_pattern_type)
c.argument('include_pattern', include_pattern_type)
c.argument('exclude_path', exclude_path_type)
with self.argument_context('storage container') as c:
from .sdkutil import get_container_access_type_names
c.argument('container_name', container_name_type, options_list=('--name', '-n'))
c.argument('public_access', validator=validate_container_public_access,
arg_type=get_enum_type(get_container_access_type_names()),
help='Specifies whether data in the container may be accessed publicly.')
with self.argument_context('storage container create') as c:
c.argument('container_name', container_name_type, options_list=('--name', '-n'), completer=None)
c.argument('fail_on_exist', help='Throw an exception if the container already exists.')
c.argument('account_name', help='Storage account name. Related environment variable: AZURE_STORAGE_ACCOUNT.')
c.argument('default_encryption_scope', options_list=['--default-encryption-scope', '-d'],
arg_group='Encryption Policy', is_preview=True,
help='Default the container to use specified encryption scope for all writes.')
c.argument('prevent_encryption_scope_override', options_list=['--prevent-encryption-scope-override', '-p'],
arg_type=get_three_state_flag(), arg_group='Encryption Policy', is_preview=True,
help='Block override of encryption scope from the container default.')
with self.argument_context('storage container delete') as c:
c.argument('fail_not_exist', help='Throw an exception if the container does not exist.')
c.argument('bypass_immutability_policy', action='store_true', help='Bypasses upcoming service behavior that '
'will block a container from being deleted if it has a immutability-policy. Specifying this will '
'ignore arguments aside from those used to identify the container ("--name", "--account-name").')
c.argument('lease_id', help="If specified, delete_container only succeeds if the container's lease is active "
"and matches this ID. Required if the container has an active lease.")
c.ignore('processed_resource_group')
c.ignore('processed_account_name')
c.ignore('mgmt_client')
with self.argument_context('storage container exists') as c:
c.ignore('blob_name', 'snapshot')
with self.argument_context('storage container immutability-policy') as c:
c.argument('allow_protected_append_writes', options_list=['--allow-protected-append-writes', '-w'],
arg_type=get_three_state_flag())
with self.argument_context('storage container list') as c:
c.argument('num_results', arg_type=num_results_type)
with self.argument_context('storage container set-permission') as c:
c.ignore('signed_identifiers')
with self.argument_context('storage container lease') as c:
c.argument('container_name', container_name_type)
with self.argument_context('storage container') as c:
c.argument('account_name', completer=get_resource_name_completion_list('Microsoft.Storage/storageAccounts'))
c.argument('resource_group_name', required=False, validator=process_resource_group)
with self.argument_context('storage container immutability-policy') as c:
c.argument('immutability_period_since_creation_in_days', options_list='--period')
c.argument('container_name', container_name_type)
with self.argument_context('storage container legal-hold') as c:
c.argument('container_name', container_name_type)
c.argument('tags', nargs='+',
help='Each tag should be 3 to 23 alphanumeric characters and is normalized to lower case')
with self.argument_context('storage container policy') as c:
from .completers import get_storage_acl_name_completion_list
t_container_permissions = self.get_sdk('blob.models#ContainerPermissions')
c.argument('container_name', container_name_type)
c.argument('policy_name', options_list=('--name', '-n'), help='The stored access policy name.',
completer=get_storage_acl_name_completion_list(t_base_blob_service, 'container_name',
'get_container_acl'))
help_str = 'Allowed values: {}. Can be combined'.format(get_permission_help_string(t_container_permissions))
c.argument('permission', options_list='--permissions', help=help_str,
validator=get_permission_validator(t_container_permissions))
c.argument('start', type=get_datetime_type(True),
help='start UTC datetime (Y-m-d\'T\'H:M:S\'Z\'). Defaults to time of request.')
c.argument('expiry', type=get_datetime_type(True), help='expiration UTC datetime in (Y-m-d\'T\'H:M:S\'Z\')')
for item in ['create', 'delete', 'list', 'show', 'update']:
with self.argument_context('storage container policy {}'.format(item)) as c:
c.extra('lease_id', options_list='--lease-id', help='The container lease ID.')
with self.argument_context('storage container generate-sas') as c:
from .completers import get_storage_acl_name_completion_list
t_container_permissions = self.get_sdk('blob.models#ContainerPermissions')
c.register_sas_arguments()
c.argument('id', options_list='--policy-name',
help='The name of a stored access policy within the container\'s ACL.',
completer=get_storage_acl_name_completion_list(t_container_permissions, 'container_name',
'get_container_acl'))
c.argument('permission', options_list='--permissions',
help=sas_help.format(get_permission_help_string(t_container_permissions)),
validator=get_permission_validator(t_container_permissions))
c.argument('cache_control', help='Response header value for Cache-Control when resource is accessed'
'using this shared access signature.')
c.argument('content_disposition', help='Response header value for Content-Disposition when resource is accessed'
'using this shared access signature.')
c.argument('content_encoding', help='Response header value for Content-Encoding when resource is accessed'
'using this shared access signature.')
c.argument('content_language', help='Response header value for Content-Language when resource is accessed'
'using this shared access signature.')
c.argument('content_type', help='Response header value for Content-Type when resource is accessed'
'using this shared access signature.')
c.argument('as_user', min_api='2018-11-09', action='store_true',
validator=as_user_validator,
help="Indicates that this command return the SAS signed with the user delegation key. "
"The expiry parameter and '--auth-mode login' are required if this argument is specified. ")
c.ignore('sas_token')
with self.argument_context('storage container lease') as c:
c.argument('lease_duration', type=int)
c.argument('lease_break_period', type=int)
with self.argument_context('storage share') as c:
c.argument('share_name', share_name_type, options_list=('--name', '-n'))
for item in ['create', 'delete', 'exists', 'list', 'show', 'update']:
with self.argument_context('storage share-rm {}'.format(item), resource_type=ResourceType.MGMT_STORAGE) as c:
c.argument('resource_group_name', required=False)
c.argument('account_name', storage_account_type)
c.argument('share_name', share_name_type, options_list=('--name', '-n'), id_part='child_name_2')
c.argument('share_quota', type=int, options_list='--quota')
c.argument('metadata', nargs='+',
help='Metadata in space-separated key=value pairs that is associated with the share. '
'This overwrites any existing metadata',
validator=validate_metadata)
c.ignore('filter', 'maxpagesize', 'skip_token')
with self.argument_context('storage share-rm list', resource_type=ResourceType.MGMT_STORAGE) as c:
c.argument('account_name', storage_account_type, id_part=None)
with self.argument_context('storage share url') as c:
c.argument('unc', action='store_true', help='Output UNC network path.')
c.argument('protocol', arg_type=get_enum_type(['http', 'https'], 'https'), help='Protocol to use.')
with self.argument_context('storage share list') as c:
c.argument('num_results', arg_type=num_results_type)
with self.argument_context('storage share exists') as c:
c.ignore('directory_name', 'file_name')
with self.argument_context('storage share policy') as c:
from .completers import get_storage_acl_name_completion_list
t_file_svc = self.get_sdk('file#FileService')
t_share_permissions = self.get_sdk('file.models#SharePermissions')
c.argument('container_name', share_name_type)
c.argument('policy_name', options_list=('--name', '-n'), help='The stored access policy name.',
completer=get_storage_acl_name_completion_list(t_file_svc, 'container_name', 'get_share_acl'))
help_str = 'Allowed values: {}. Can be combined'.format(get_permission_help_string(t_share_permissions))
c.argument('permission', options_list='--permissions', help=help_str,
validator=get_permission_validator(t_share_permissions))
c.argument('start', type=get_datetime_type(True),
help='start UTC datetime (Y-m-d\'T\'H:M:S\'Z\'). Defaults to time of request.')
c.argument('expiry', type=get_datetime_type(True), help='expiration UTC datetime in (Y-m-d\'T\'H:M:S\'Z\')')
with self.argument_context('storage share delete') as c:
from .sdkutil import get_delete_file_snapshot_type_names
c.argument('delete_snapshots', arg_type=get_enum_type(get_delete_file_snapshot_type_names()),
help='Specify the deletion strategy when the share has snapshots.')
with self.argument_context('storage share generate-sas') as c:
from .completers import get_storage_acl_name_completion_list
t_share_permissions = self.get_sdk('file.models#SharePermissions')
c.register_sas_arguments()
c.argument('id', options_list='--policy-name',
help='The name of a stored access policy within the share\'s ACL.',
completer=get_storage_acl_name_completion_list(t_share_permissions, 'share_name', 'get_share_acl'))
c.argument('permission', options_list='--permissions',
help=sas_help.format(get_permission_help_string(t_share_permissions)),
validator=get_permission_validator(t_share_permissions))
c.ignore('sas_token')
with self.argument_context('storage directory') as c:
c.argument('directory_name', directory_type, options_list=('--name', '-n'))
with self.argument_context('storage directory exists') as c:
c.ignore('file_name')
c.argument('directory_name', required=True)
with self.argument_context('storage file') as c:
c.argument('file_name', file_name_type, options_list=('--name', '-n'))
c.argument('directory_name', directory_type, required=False)
with self.argument_context('storage file copy') as c:
c.argument('share_name', share_name_type, options_list=('--destination-share', '-s'),
help='Name of the destination share. The share must exist.')
with self.argument_context('storage file copy cancel') as c:
c.register_path_argument(options_list=('--destination-path', '-p'))
with self.argument_context('storage file delete') as c:
c.register_path_argument()
with self.argument_context('storage file download') as c:
c.register_path_argument()
c.argument('file_path', options_list=('--dest',), type=file_type, required=False,
help='Path of the file to write to. The source filename will be used if not specified.',
validator=process_file_download_namespace, completer=FilesCompleter())
c.argument('path', validator=None) # validator called manually from process_file_download_namespace
c.extra('no_progress', progress_type)
c.argument('max_connections', type=int)
c.argument('start_range', type=int)
c.argument('end_range', type=int)
with self.argument_context('storage file exists') as c:
c.register_path_argument()
with self.argument_context('storage file generate-sas') as c:
from .completers import get_storage_acl_name_completion_list
c.register_path_argument()
c.register_sas_arguments()
t_file_svc = self.get_sdk('file.fileservice#FileService')
t_file_permissions = self.get_sdk('file.models#FilePermissions')
c.argument('id', options_list='--policy-name',
help='The name of a stored access policy within the container\'s ACL.',
completer=get_storage_acl_name_completion_list(t_file_svc, 'container_name', 'get_container_acl'))
c.argument('permission', options_list='--permissions',
help=sas_help.format(get_permission_help_string(t_file_permissions)),
validator=get_permission_validator(t_file_permissions))
c.ignore('sas_token')
with self.argument_context('storage file list') as c:
from .completers import dir_path_completer
c.argument('directory_name', options_list=('--path', '-p'), help='The directory path within the file share.',
completer=dir_path_completer)
c.argument('num_results', arg_type=num_results_type)
with self.argument_context('storage file metadata show') as c:
c.register_path_argument()
with self.argument_context('storage file metadata update') as c:
c.register_path_argument()
with self.argument_context('storage file resize') as c:
c.register_path_argument()
c.argument('content_length', options_list='--size')
with self.argument_context('storage file show') as c:
c.register_path_argument()
with self.argument_context('storage file update') as c:
t_file_content_settings = self.get_sdk('file.models#ContentSettings')
c.register_path_argument()
c.register_content_settings_argument(t_file_content_settings, update=True)
with self.argument_context('storage file upload') as c:
t_file_content_settings = self.get_sdk('file.models#ContentSettings')
c.register_path_argument(default_file_param='local_file_path')
c.register_content_settings_argument(t_file_content_settings, update=False, guess_from_file='local_file_path')
c.argument('local_file_path', options_list='--source', type=file_type, completer=FilesCompleter())
c.extra('no_progress', progress_type)
c.argument('max_connections', type=int)
with self.argument_context('storage file url') as c:
c.register_path_argument()
c.argument('protocol', arg_type=get_enum_type(['http', 'https'], 'https'), help='Protocol to use.')
with self.argument_context('storage file upload-batch') as c:
from ._validators import process_file_upload_batch_parameters
c.argument('source', options_list=('--source', '-s'), validator=process_file_upload_batch_parameters)
c.argument('destination', options_list=('--destination', '-d'))
c.argument('max_connections', arg_group='Download Control', type=int)
c.argument('validate_content', action='store_true', min_api='2016-05-31')
c.register_content_settings_argument(t_file_content_settings, update=False, arg_group='Content Settings')
c.extra('no_progress', progress_type)
with self.argument_context('storage file download-batch') as c:
from ._validators import process_file_download_batch_parameters
c.argument('source', options_list=('--source', '-s'), validator=process_file_download_batch_parameters)
c.argument('destination', options_list=('--destination', '-d'))
c.argument('max_connections', arg_group='Download Control', type=int)
c.argument('validate_content', action='store_true', min_api='2016-05-31')
c.extra('no_progress', progress_type)
with self.argument_context('storage file delete-batch') as c:
from ._validators import process_file_batch_source_parameters
c.argument('source', options_list=('--source', '-s'), validator=process_file_batch_source_parameters)
with self.argument_context('storage file copy start') as c:
from azure.cli.command_modules.storage._validators import validate_source_uri
c.register_path_argument(options_list=('--destination-path', '-p'))
c.register_source_uri_arguments(validator=validate_source_uri)
c.extra('file_snapshot', default=None, arg_group='Copy Source',
help='The file snapshot for the source storage account.')
with self.argument_context('storage file copy start-batch', arg_group='Copy Source') as c:
from ._validators import get_source_file_or_blob_service_client
c.argument('source_client', ignore_type, validator=get_source_file_or_blob_service_client)
c.extra('source_account_name')
c.extra('source_account_key')
c.extra('source_uri')
c.argument('source_sas')
c.argument('source_container')
c.argument('source_share')
with self.argument_context('storage cors list') as c:
c.extra('services', validator=get_char_options_validator('bfqt', 'services'), default='bqft',
options_list='--services', required=False)
with self.argument_context('storage cors add') as c:
c.extra('services', validator=get_char_options_validator('bfqt', 'services'), required=True,
options_list='--services')
c.argument('max_age')
c.argument('origins', nargs='+')
c.argument('methods', nargs='+',
arg_type=get_enum_type(['DELETE', 'GET', 'HEAD', 'MERGE', 'POST', 'OPTIONS', 'PUT']))
c.argument('allowed_headers', nargs='+')
c.argument('exposed_headers', nargs='+')
with self.argument_context('storage cors clear') as c:
c.extra('services', validator=get_char_options_validator('bfqt', 'services'), required=True,
options_list='--services')
with self.argument_context('storage queue generate-sas') as c:
from .completers import get_storage_acl_name_completion_list
t_queue_permissions = self.get_sdk('queue.models#QueuePermissions')
c.register_sas_arguments()
c.argument('id', options_list='--policy-name',
help='The name of a stored access policy within the share\'s ACL.',
completer=get_storage_acl_name_completion_list(t_queue_permissions, 'queue_name', 'get_queue_acl'))
c.argument('permission', options_list='--permissions',
help=sas_help.format(get_permission_help_string(t_queue_permissions)),
validator=get_permission_validator(t_queue_permissions))
c.ignore('sas_token')
c.ignore('auth_mode')
with self.argument_context('storage queue') as c:
c.argument('queue_name', queue_name_type, options_list=('--name', '-n'))
with self.argument_context('storage queue create') as c:
c.argument('queue_name', queue_name_type, options_list=('--name', '-n'), completer=None)
with self.argument_context('storage queue policy') as c:
from .completers import get_storage_acl_name_completion_list
t_queue_permissions = self.get_sdk('queue.models#QueuePermissions')
c.argument('container_name', queue_name_type)
c.argument('policy_name', options_list=('--name', '-n'), help='The stored access policy name.',
completer=get_storage_acl_name_completion_list(t_queue_service, 'container_name', 'get_queue_acl'))
help_str = 'Allowed values: {}. Can be combined'.format(get_permission_help_string(t_queue_permissions))
c.argument('permission', options_list='--permissions', help=help_str,
validator=get_permission_validator(t_queue_permissions))
c.argument('start', type=get_datetime_type(True),
help='start UTC datetime (Y-m-d\'T\'H:M:S\'Z\'). Defaults to time of request.')
c.argument('expiry', type=get_datetime_type(True), help='expiration UTC datetime in (Y-m-d\'T\'H:M:S\'Z\')')
with self.argument_context('storage message') as c:
c.argument('queue_name', queue_name_type)
c.argument('message_id', options_list='--id')
c.argument('content', type=unicode_string, help='Message content, up to 64KB in size.')
with self.argument_context('storage remove') as c:
from .completers import file_path_completer
c.extra('container_name', container_name_type, validator=validate_azcopy_remove_arguments)
c.extra('blob_name', options_list=('--name', '-n'), arg_type=blob_name_type)
c.extra('share_name', share_name_type, help='The file share name.')
c.extra('path', options_list=('--path', '-p'),
help='The path to the file within the file share.',
completer=file_path_completer)
c.argument('exclude_pattern', exclude_pattern_type)
c.argument('include_pattern', include_pattern_type)
c.argument('exclude_path', exclude_path_type)
c.argument('include_path', include_path_type)
c.argument('recursive', recursive_type)
c.ignore('destination')
c.ignore('service')
c.ignore('target')
with self.argument_context('storage table') as c:
c.argument('table_name', table_name_type, options_list=('--name', '-n'))
with self.argument_context('storage table create') as c:
c.argument('table_name', table_name_type, options_list=('--name', '-n'), completer=None)
c.argument('fail_on_exist', help='Throw an exception if the table already exists.')
with self.argument_context('storage table policy') as c:
from ._validators import table_permission_validator
from .completers import get_storage_acl_name_completion_list
c.argument('container_name', table_name_type)
c.argument('policy_name', options_list=('--name', '-n'), help='The stored access policy name.',
completer=get_storage_acl_name_completion_list(t_table_service, 'table_name', 'get_table_acl'))
help_str = 'Allowed values: (r)ead/query (a)dd (u)pdate (d)elete. Can be combined.'
c.argument('permission', options_list='--permissions', help=help_str, validator=table_permission_validator)
c.argument('start', type=get_datetime_type(True),
help='start UTC datetime (Y-m-d\'T\'H:M:S\'Z\'). Defaults to time of request.')
c.argument('expiry', type=get_datetime_type(True), help='expiration UTC datetime in (Y-m-d\'T\'H:M:S\'Z\')')
with self.argument_context('storage table generate-sas') as c:
from .completers import get_storage_acl_name_completion_list
c.register_sas_arguments()
c.argument('id', options_list='--policy-name',
help='The name of a stored access policy within the table\'s ACL.',
completer=get_storage_acl_name_completion_list(t_table_service, 'table_name', 'get_table_acl'))
c.argument('permission', options_list='--permissions',
help=sas_help.format('(r)ead/query (a)dd (u)pdate (d)elete'),
validator=table_permission_validator)
c.ignore('sas_token')
with self.argument_context('storage entity') as c:
c.ignore('property_resolver')
c.argument('entity', options_list=('--entity', '-e'), validator=validate_entity, nargs='+')
c.argument('select', nargs='+', validator=validate_select,
help='Space-separated list of properties to return for each entity.')
with self.argument_context('storage entity insert') as c:
c.argument('if_exists', arg_type=get_enum_type(['fail', 'merge', 'replace']))
with self.argument_context('storage entity query') as c:
c.argument('accept', default='minimal', validator=validate_table_payload_format,
arg_type=get_enum_type(['none', 'minimal', 'full']),
help='Specifies how much metadata to include in the response payload.')
c.argument('marker', validator=validate_marker, nargs='+')
for item in ['create', 'show', 'delete', 'exists', 'metadata update', 'metadata show']:
with self.argument_context('storage fs {}'.format(item)) as c:
c.extra('file_system_name', options_list=['--name', '-n'],
help="File system name.", required=True)
c.extra('timeout', timeout_type)
with self.argument_context('storage fs create') as c:
from .sdkutil import get_fs_access_type_names
c.argument('public_access', arg_type=get_enum_type(get_fs_access_type_names()),
validator=validate_fs_public_access,
help="Specify whether data in the file system may be accessed publicly and the level of access.")
with self.argument_context('storage fs list') as c:
c.argument('include_metadata', arg_type=get_three_state_flag(),
help='Specify that file system metadata be returned in the response. The default value is "False".')
c.argument('name_starts_with', options_list=['--prefix'],
help='Filter the results to return only file systems whose names begin with the specified prefix.')
for item in ['create', 'show', 'delete', 'exists', 'move', 'metadata update', 'metadata show']:
with self.argument_context('storage fs directory {}'.format(item)) as c:
c.extra('file_system_name', options_list=['-f', '--file-system'], help="File system name.", required=True)
c.extra('directory_path', options_list=['--name', '-n'],
help="The name of directory.", required=True)
c.extra('timeout', timeout_type)
with self.argument_context('storage fs directory create') as c:
c.extra('permissions', permissions_type)
c.extra('umask', umask_type)
with self.argument_context('storage fs directory list') as c:
c.extra('file_system_name', options_list=['-f', '--file-system'], help="File system name.", required=True)
c.argument('recursive', arg_type=get_three_state_flag(), default=True,
help='Look into sub-directories recursively when set to true.')
c.argument('path', help="Filter the results to return only paths under the specified path.")
c.argument('num_results', type=int, help='Specify the maximum number of results to return.')
with self.argument_context('storage fs directory move') as c:
c.argument('new_name', options_list=['--new-directory', '-d'],
help='The new directory name the users want to move to. The value must have the following format: '
'"{filesystem}/{directory}/{subdirectory}".')
with self.argument_context('storage fs file list') as c:
c.extra('file_system_name', options_list=['-f', '--file-system'], help="File system name.", required=True)
c.argument('recursive', arg_type=get_three_state_flag(), default=True,
help='Look into sub-directories recursively when set to true.')
c.argument('exclude_dir', action='store_true',
help='List only files in the given file system.')
c.argument('path', help='Filter the results to return only paths under the specified path.')
c.argument('num_results', type=int, default=5000,
help='Specify the maximum number of results to return. If the request does not specify num_results '
'or specifies a value greater than 5,000, the server will return up to 5,000 items.')
c.argument('marker',
help='An opaque continuation token. This value can be retrieved from the next_marker field of a '
'previous generator object. If specified, this generator will begin returning results from this '
'point.')
for item in ['create', 'show', 'delete', 'exists', 'upload', 'append', 'download', 'show', 'metadata update',
'metadata show']:
with self.argument_context('storage fs file {}'.format(item)) as c:
c.extra('file_system_name', options_list=['-f', '--file-system'],
help='File system name.', required=True)
c.extra('path', options_list=['-p', '--path'], help="The file path in a file system.",
required=True)
c.extra('timeout', timeout_type)
c.argument('content', help='Content to be appended to file.')
with self.argument_context('storage fs file create') as c:
t_file_content_settings = self.get_sdk('_models#ContentSettings',
resource_type=ResourceType.DATA_STORAGE_FILEDATALAKE)
c.register_content_settings_argument(t_file_content_settings, update=False)
c.extra('permissions', permissions_type)
c.extra('umask', umask_type)
c.extra('timeout', timeout_type)
with self.argument_context('storage fs file download') as c:
c.argument('destination_path', options_list=['--destination', '-d'], type=file_type,
help='The local file where the file or folder will be downloaded to. The source filename will be '
'used if not specified.')
c.argument('overwrite', arg_type=get_three_state_flag(),
help="Overwrite an existing file when specified. Default value is false.")
with self.argument_context('storage fs file move') as c:
t_file_content_settings = self.get_sdk('_models#ContentSettings',
resource_type=ResourceType.DATA_STORAGE_FILEDATALAKE)
c.register_content_settings_argument(t_file_content_settings, update=False)
c.extra('file_system_name', options_list=['-f', '--file-system'],
help='File system name.', required=True)
c.extra('path', options_list=['-p', '--path'], required=True,
help="The original file path users want to move in a file system.")
c.argument('new_name', options_list=['--new-path'],
help='The new path the users want to move to. The value must have the following format: '
'"{filesystem}/{directory}/{subdirectory}/{file}".')
with self.argument_context('storage fs file upload') as c:
t_file_content_settings = self.get_sdk('_models#ContentSettings',
resource_type=ResourceType.DATA_STORAGE_FILEDATALAKE)
c.register_content_settings_argument(t_file_content_settings, update=False)
c.argument('local_path', options_list=['--source', '-s'],
help='Path of the local file to upload as the file content.')
c.argument('overwrite', arg_type=get_three_state_flag(), help="Overwrite an existing file when specified.")
c.argument('if_match', arg_group='Precondition',
help="An ETag value, or the wildcard character (*). Specify this header to perform the operation "
"only if the resource's ETag matches the value specified.")
c.argument('if_none_match', arg_group='Precondition',
help="An ETag value, or the wildcard character (*). Specify this header to perform the operation "
"only if the resource's ETag does not match the value specified.")
c.argument('if_modified_since', arg_group='Precondition',
help="A Commence only if modified since supplied UTC datetime (Y-m-d'T'H:M'Z').")
c.argument('if_unmodified_since', arg_group='Precondition',
help="A Commence only if unmodified since supplied UTC datetime (Y-m-d'T'H:M'Z').")
c.argument('permissions', permissions_type)
c.argument('umask', umask_type)
for item in ['set', 'show']:
with self.argument_context('storage fs access {}'.format(item)) as c:
from ._validators import validate_access_control
c.extra('file_system_name', options_list=['-f', '--file-system'],
help='File system name.', required=True)
c.extra('directory_path', options_list=['-p', '--path'],
help='The path to a file or directory in the specified file system.', required=True)
c.argument('permissions', validator=validate_access_control)
c.ignore('upn')
|
def load_arguments(self, _): # pylint: disable=too-many-locals, too-many-statements, too-many-lines
from argcomplete.completers import FilesCompleter
from six import u as unicode_string
from knack.arguments import ignore_type, CLIArgumentType
from azure.cli.core.commands.parameters import get_resource_name_completion_list
from .sdkutil import get_table_data_type
from .completers import get_storage_name_completion_list
t_base_blob_service = self.get_sdk('blob.baseblobservice#BaseBlobService')
t_file_service = self.get_sdk('file#FileService')
t_queue_service = self.get_sdk('queue#QueueService')
t_table_service = get_table_data_type(self.cli_ctx, 'table', 'TableService')
storage_account_type = CLIArgumentType(options_list='--storage-account',
help='The name or ID of the storage account.',
validator=parse_storage_account, id_part='name')
acct_name_type = CLIArgumentType(options_list=['--account-name', '-n'], help='The storage account name.',
id_part='name',
completer=get_resource_name_completion_list('Microsoft.Storage/storageAccounts'),
local_context_attribute=LocalContextAttribute(
name='storage_account_name', actions=[LocalContextAction.GET]))
blob_name_type = CLIArgumentType(options_list=['--blob-name', '-b'], help='The blob name.',
completer=get_storage_name_completion_list(t_base_blob_service, 'list_blobs',
parent='container_name'))
container_name_type = CLIArgumentType(options_list=['--container-name', '-c'], help='The container name.',
completer=get_storage_name_completion_list(t_base_blob_service,
'list_containers'))
directory_type = CLIArgumentType(options_list=['--directory-name', '-d'], help='The directory name.',
completer=get_storage_name_completion_list(t_file_service,
'list_directories_and_files',
parent='share_name'))
file_name_type = CLIArgumentType(options_list=['--file-name', '-f'],
completer=get_storage_name_completion_list(t_file_service,
'list_directories_and_files',
parent='share_name'))
share_name_type = CLIArgumentType(options_list=['--share-name', '-s'], help='The file share name.',
completer=get_storage_name_completion_list(t_file_service, 'list_shares'))
table_name_type = CLIArgumentType(options_list=['--table-name', '-t'],
completer=get_storage_name_completion_list(t_table_service, 'list_tables'))
queue_name_type = CLIArgumentType(options_list=['--queue-name', '-q'], help='The queue name.',
completer=get_storage_name_completion_list(t_queue_service, 'list_queues'))
progress_type = CLIArgumentType(help='Include this flag to disable progress reporting for the command.',
action='store_true', validator=add_progress_callback)
socket_timeout_type = CLIArgumentType(help='The socket timeout(secs), used by the service to regulate data flow.',
type=int)
num_results_type = CLIArgumentType(
default=5000, help='Specifies the maximum number of results to return. Provide "*" to return all.',
validator=validate_storage_data_plane_list)
large_file_share_type = CLIArgumentType(
action='store_true', min_api='2019-04-01',
help='Enable the capability to support large file shares with more than 5 TiB capacity for storage account.'
'Once the property is enabled, the feature cannot be disabled. Currently only supported for LRS and '
'ZRS replication types, hence account conversions to geo-redundant accounts would not be possible. '
'For more information, please refer to https://go.microsoft.com/fwlink/?linkid=2086047.')
adds_type = CLIArgumentType(arg_type=get_three_state_flag(), min_api='2019-04-01',
help='Enable Azure Files Active Directory Domain Service Authentication for '
'storage account. When --enable-files-adds is set to true, Azure Active '
'Directory Properties arguments must be provided.')
aadds_type = CLIArgumentType(arg_type=get_three_state_flag(), min_api='2018-11-01',
help='Enable Azure Active Directory Domain Services authentication for Azure Files')
domain_name_type = CLIArgumentType(min_api='2019-04-01', arg_group="Azure Active Directory Properties",
help="Specify the primary domain that the AD DNS server is authoritative for. "
"Required when --enable-files-adds is set to True")
net_bios_domain_name_type = CLIArgumentType(min_api='2019-04-01', arg_group="Azure Active Directory Properties",
help="Specify the NetBIOS domain name. "
"Required when --enable-files-adds is set to True")
forest_name_type = CLIArgumentType(min_api='2019-04-01', arg_group="Azure Active Directory Properties",
help="Specify the Active Directory forest to get. "
"Required when --enable-files-adds is set to True")
domain_guid_type = CLIArgumentType(min_api='2019-04-01', arg_group="Azure Active Directory Properties",
help="Specify the domain GUID. Required when --enable-files-adds is set to True")
domain_sid_type = CLIArgumentType(min_api='2019-04-01', arg_group="Azure Active Directory Properties",
help="Specify the security identifier (SID). Required when --enable-files-adds "
"is set to True")
azure_storage_sid_type = CLIArgumentType(min_api='2019-04-01', arg_group="Azure Active Directory Properties",
help="Specify the security identifier (SID) for Azure Storage. "
"Required when --enable-files-adds is set to True")
exclude_pattern_type = CLIArgumentType(arg_group='Additional Flags', help='Exclude these files where the name '
'matches the pattern list. For example: *.jpg;*.pdf;exactName. This '
'option supports wildcard characters (*)')
include_pattern_type = CLIArgumentType(arg_group='Additional Flags', help='Include only these files where the name '
'matches the pattern list. For example: *.jpg;*.pdf;exactName. This '
'option supports wildcard characters (*)')
exclude_path_type = CLIArgumentType(arg_group='Additional Flags', help='Exclude these paths. This option does not '
'support wildcard characters (*). Checks relative path prefix. For example: '
'myFolder;myFolder/subDirName/file.pdf.')
include_path_type = CLIArgumentType(arg_group='Additional Flags', help='Include only these paths. This option does '
'not support wildcard characters (*). Checks relative path prefix. For example:'
'myFolder;myFolder/subDirName/file.pdf')
recursive_type = CLIArgumentType(options_list=['--recursive', '-r'], action='store_true',
help='Look into sub-directories recursively.')
sas_help = 'The permissions the SAS grants. Allowed values: {}. Do not use if a stored access policy is ' \
'referenced with --id that specifies this value. Can be combined.'
t_routing_choice = self.get_models('RoutingChoice', resource_type=ResourceType.MGMT_STORAGE)
routing_choice_type = CLIArgumentType(
arg_group='Routing Preference', arg_type=get_enum_type(t_routing_choice),
help='Routing Choice defines the kind of network routing opted by the user.',
is_preview=True, min_api='2019-06-01')
publish_microsoft_endpoints_type = CLIArgumentType(
arg_group='Routing Preference', arg_type=get_three_state_flag(), is_preview=True, min_api='2019-06-01',
help='A boolean flag which indicates whether microsoft routing storage endpoints are to be published.')
publish_internet_endpoints_type = CLIArgumentType(
arg_group='Routing Preference', arg_type=get_three_state_flag(), is_preview=True, min_api='2019-06-01',
help='A boolean flag which indicates whether internet routing storage endpoints are to be published.')
umask_type = CLIArgumentType(
help='When creating a file or directory and the parent folder does not have a default ACL, the umask restricts '
'the permissions of the file or directory to be created. The resulting permission is given by p & ^u, '
'where p is the permission and u is the umask. For more information, please refer to '
'https://docs.microsoft.com/en-us/azure/storage/blobs/data-lake-storage-access-control#umask.')
permissions_type = CLIArgumentType(
help='POSIX access permissions for the file owner, the file owning group, and others. Each class may be '
'granted read, write, or execute permission. The sticky bit is also supported. Both symbolic (rwxrw-rw-) '
'and 4-digit octal notation (e.g. 0766) are supported. For more information, please refer to https://'
'docs.microsoft.com/en-us/azure/storage/blobs/data-lake-storage-access-control#levels-of-permission.')
timeout_type = CLIArgumentType(
help='Request timeout in seconds. Applies to each call to the service.', type=int
)
with self.argument_context('storage') as c:
c.argument('container_name', container_name_type)
c.argument('directory_name', directory_type)
c.argument('share_name', share_name_type)
c.argument('table_name', table_name_type)
c.argument('retry_wait', options_list=('--retry-interval',))
c.ignore('progress_callback')
c.argument('metadata', nargs='+',
help='Metadata in space-separated key=value pairs. This overwrites any existing metadata.',
validator=validate_metadata)
c.argument('timeout', help='Request timeout in seconds. Applies to each call to the service.', type=int)
with self.argument_context('storage', arg_group='Precondition') as c:
c.argument('if_modified_since',
help='Commence only if modified since supplied UTC datetime (Y-m-d\'T\'H:M\'Z\')',
type=get_datetime_type(False))
c.argument('if_unmodified_since',
help='Commence only if unmodified since supplied UTC datetime (Y-m-d\'T\'H:M\'Z\')',
type=get_datetime_type(False))
c.argument('if_match')
c.argument('if_none_match')
for item in ['delete', 'show', 'update', 'show-connection-string', 'keys', 'network-rule', 'revoke-delegation-keys', 'failover']: # pylint: disable=line-too-long
with self.argument_context('storage account {}'.format(item)) as c:
c.argument('account_name', acct_name_type, options_list=['--name', '-n'])
c.argument('resource_group_name', required=False, validator=process_resource_group)
with self.argument_context('storage account check-name') as c:
c.argument('name', options_list=['--name', '-n'])
with self.argument_context('storage account delete') as c:
c.argument('account_name', acct_name_type, options_list=['--name', '-n'], local_context_attribute=None)
with self.argument_context('storage account create', resource_type=ResourceType.MGMT_STORAGE) as c:
t_account_type, t_sku_name, t_kind = self.get_models('AccountType', 'SkuName', 'Kind',
resource_type=ResourceType.MGMT_STORAGE)
c.register_common_storage_account_options()
c.argument('location', get_location_type(self.cli_ctx), validator=get_default_location_from_resource_group)
c.argument('account_type', help='The storage account type', arg_type=get_enum_type(t_account_type))
c.argument('account_name', acct_name_type, options_list=['--name', '-n'], completer=None,
local_context_attribute=LocalContextAttribute(
name='storage_account_name', actions=[LocalContextAction.SET], scopes=[ALL]))
c.argument('kind', help='Indicates the type of storage account.', min_api="2018-02-01",
arg_type=get_enum_type(t_kind), default='StorageV2')
c.argument('kind', help='Indicates the type of storage account.', max_api="2017-10-01",
arg_type=get_enum_type(t_kind), default='Storage')
c.argument('https_only', arg_type=get_three_state_flag(), min_api='2019-04-01',
help='Allow https traffic only to storage service if set to true. The default value is true.')
c.argument('https_only', arg_type=get_three_state_flag(), max_api='2018-11-01',
help='Allow https traffic only to storage service if set to true. The default value is false.')
c.argument('tags', tags_type)
c.argument('custom_domain', help='User domain assigned to the storage account. Name is the CNAME source.')
c.argument('sku', help='The storage account SKU.', arg_type=get_enum_type(t_sku_name, default='standard_ragrs'))
c.argument('enable_files_aadds', aadds_type)
c.argument('enable_files_adds', adds_type)
c.argument('enable_large_file_share', arg_type=large_file_share_type)
c.argument('domain_name', domain_name_type)
c.argument('net_bios_domain_name', net_bios_domain_name_type)
c.argument('forest_name', forest_name_type)
c.argument('domain_guid', domain_guid_type)
c.argument('domain_sid', domain_sid_type)
c.argument('azure_storage_sid', azure_storage_sid_type)
c.argument('enable_hierarchical_namespace', arg_type=get_three_state_flag(),
options_list=['--enable-hierarchical-namespace', '--hns'],
help=" Allow the blob service to exhibit filesystem semantics. This property can be enabled only "
"when storage account kind is StorageV2.",
min_api='2018-02-01')
c.argument('encryption_key_type_for_table', arg_type=get_enum_type(['Account', 'Service']),
help='Set the encryption key type for Table service. "Account": Table will be encrypted '
'with account-scoped encryption key. "Service": Table will always be encrypted with '
'service-scoped keys. Currently the default encryption key type is "Service".',
min_api='2019-06-01', options_list=['--encryption-key-type-for-table', '-t'])
c.argument('encryption_key_type_for_queue', arg_type=get_enum_type(['Account', 'Service']),
help='Set the encryption key type for Queue service. "Account": Queue will be encrypted '
'with account-scoped encryption key. "Service": Queue will always be encrypted with '
'service-scoped keys. Currently the default encryption key type is "Service".',
min_api='2019-06-01', options_list=['--encryption-key-type-for-queue', '-q'])
c.argument('routing_choice', routing_choice_type)
c.argument('publish_microsoft_endpoints', publish_microsoft_endpoints_type)
c.argument('publish_internet_endpoints', publish_internet_endpoints_type)
with self.argument_context('storage account private-endpoint-connection',
resource_type=ResourceType.MGMT_STORAGE) as c:
c.argument('private_endpoint_connection_name', options_list=['--name', '-n'],
help='The name of the private endpoint connection associated with the Storage Account.')
for item in ['approve', 'reject', 'show', 'delete']:
with self.argument_context('storage account private-endpoint-connection {}'.format(item),
resource_type=ResourceType.MGMT_STORAGE) as c:
c.argument('private_endpoint_connection_name', options_list=['--name', '-n'], required=False,
help='The name of the private endpoint connection associated with the Storage Account.')
c.extra('connection_id', options_list=['--id'],
help='The ID of the private endpoint connection associated with the Storage Account. You can get '
'it using `az storage account show`.')
c.argument('account_name', help='The storage account name.', required=False)
c.argument('resource_group_name', help='The resource group name of specified storage account.',
required=False)
c.argument('description', help='Comments for {} operation.'.format(item))
with self.argument_context('storage account update', resource_type=ResourceType.MGMT_STORAGE) as c:
c.register_common_storage_account_options()
c.argument('custom_domain',
help='User domain assigned to the storage account. Name is the CNAME source. Use "" to clear '
'existing value.',
validator=validate_custom_domain)
c.argument('use_subdomain', help='Specify whether to use indirect CNAME validation.',
arg_type=get_enum_type(['true', 'false']))
c.argument('tags', tags_type, default=None)
c.argument('enable_files_aadds', aadds_type)
c.argument('enable_files_adds', adds_type)
c.argument('enable_large_file_share', arg_type=large_file_share_type)
c.argument('domain_name', domain_name_type)
c.argument('net_bios_domain_name', net_bios_domain_name_type)
c.argument('forest_name', forest_name_type)
c.argument('domain_guid', domain_guid_type)
c.argument('domain_sid', domain_sid_type)
c.argument('azure_storage_sid', azure_storage_sid_type)
c.argument('routing_choice', routing_choice_type)
c.argument('publish_microsoft_endpoints', publish_microsoft_endpoints_type)
c.argument('publish_internet_endpoints', publish_internet_endpoints_type)
with self.argument_context('storage account update', arg_group='Customer managed key', min_api='2017-06-01') as c:
t_key_source = self.get_models('KeySource', resource_type=ResourceType.MGMT_STORAGE)
c.argument('encryption_key_name', help='The name of the KeyVault key.', )
c.argument('encryption_key_vault', help='The Uri of the KeyVault.')
c.argument('encryption_key_version',
help='The version of the KeyVault key to use, which will opt out of implicit key rotation. '
'Please use "" to opt in key auto-rotation again.')
c.argument('encryption_key_source',
arg_type=get_enum_type(t_key_source),
help='The default encryption key source',
validator=validate_encryption_source)
for scope in ['storage account create', 'storage account update']:
with self.argument_context(scope, resource_type=ResourceType.MGMT_STORAGE, min_api='2017-06-01',
arg_group='Network Rule') as c:
t_bypass, t_default_action = self.get_models('Bypass', 'DefaultAction',
resource_type=ResourceType.MGMT_STORAGE)
c.argument('bypass', nargs='+', validator=validate_bypass, arg_type=get_enum_type(t_bypass),
help='Bypass traffic for space-separated uses.')
c.argument('default_action', arg_type=get_enum_type(t_default_action),
help='Default action to apply when no rule matches.')
with self.argument_context('storage account show-connection-string') as c:
c.argument('protocol', help='The default endpoint protocol.', arg_type=get_enum_type(['http', 'https']))
c.argument('sas_token', help='The SAS token to be used in the connection-string.')
c.argument('key_name', options_list=['--key'], help='The key to use.',
arg_type=get_enum_type(list(storage_account_key_options.keys())))
for item in ['blob', 'file', 'queue', 'table']:
c.argument('{}_endpoint'.format(item), help='Custom endpoint for {}s.'.format(item))
with self.argument_context('storage account encryption-scope') as c:
c.argument('account_name', help='The storage account name.')
c.argument('resource_group_name', validator=process_resource_group, required=False)
c.argument('encryption_scope_name', options_list=['--name', '-n'],
help='The name of the encryption scope within the specified storage account.')
for scope in ['storage account encryption-scope create', 'storage account encryption-scope update']:
with self.argument_context(scope, resource_type=ResourceType.MGMT_STORAGE) as c:
from ._validators import validate_encryption_key
t_encryption_key_source = self.get_models('EncryptionScopeSource', resource_type=ResourceType.MGMT_STORAGE)
c.argument('key_source', options_list=['-s', '--key-source'],
arg_type=get_enum_type(t_encryption_key_source, default="Microsoft.Storage"),
help='The provider for the encryption scope.', validator=validate_encryption_key)
c.argument('key_uri', options_list=['-u', '--key-uri'],
help='The object identifier for a key vault key object. When applied, the encryption scope will '
'use the key referenced by the identifier to enable customer-managed key support on this '
'encryption scope.')
with self.argument_context('storage account encryption-scope update') as c:
t_state = self.get_models("EncryptionScopeState", resource_type=ResourceType.MGMT_STORAGE)
c.argument('key_source', options_list=['-s', '--key-source'],
arg_type=get_enum_type(t_encryption_key_source),
help='The provider for the encryption scope.', validator=validate_encryption_key)
c.argument('state', arg_type=get_enum_type(t_state),
help='Change the state the encryption scope. When disabled, '
'all blob read/write operations using this encryption scope will fail.')
with self.argument_context('storage account keys list', resource_type=ResourceType.MGMT_STORAGE) as c:
t_expand_key_type = self.get_models('ListKeyExpand', resource_type=ResourceType.MGMT_STORAGE)
c.argument("expand", options_list=['--expand-key-type'], help='Specify the expanded key types to be listed.',
arg_type=get_enum_type(t_expand_key_type), min_api='2019-04-01', is_preview=True)
with self.argument_context('storage account keys renew', resource_type=ResourceType.MGMT_STORAGE) as c:
c.argument('key_name', options_list=['--key'], help='The key options to regenerate.',
arg_type=get_enum_type(list(storage_account_key_options.keys())))
c.extra('key_type', help='The key type to regenerate. If --key-type is not specified, one of access keys will '
'be regenerated by default.', arg_type=get_enum_type(['kerb']), min_api='2019-04-01')
c.argument('account_name', acct_name_type, id_part=None)
with self.argument_context('storage account management-policy create') as c:
c.argument('policy', type=file_type, completer=FilesCompleter(),
help='The Storage Account ManagementPolicies Rules, in JSON format. See more details in: '
'https://docs.microsoft.com/azure/storage/common/storage-lifecycle-managment-concepts.')
c.argument('account_name', help='The name of the storage account within the specified resource group.')
with self.argument_context('storage account management-policy update') as c:
c.argument('account_name', help='The name of the storage account within the specified resource group.')
with self.argument_context('storage account keys list') as c:
c.argument('account_name', acct_name_type, id_part=None)
with self.argument_context('storage account network-rule') as c:
from ._validators import validate_subnet
c.argument('account_name', acct_name_type, id_part=None)
c.argument('ip_address', help='IPv4 address or CIDR range.')
c.argument('subnet', help='Name or ID of subnet. If name is supplied, `--vnet-name` must be supplied.')
c.argument('vnet_name', help='Name of a virtual network.', validator=validate_subnet)
c.argument('action', help='The action of virtual network rule.')
with self.argument_context('storage account blob-service-properties show',
resource_type=ResourceType.MGMT_STORAGE) as c:
c.argument('account_name', acct_name_type, id_part=None)
c.argument('resource_group_name', required=False, validator=process_resource_group)
with self.argument_context('storage account blob-service-properties update',
resource_type=ResourceType.MGMT_STORAGE) as c:
c.argument('account_name', acct_name_type, id_part=None)
c.argument('resource_group_name', required=False, validator=process_resource_group)
c.argument('enable_change_feed', arg_type=get_three_state_flag(), min_api='2019-04-01')
c.argument('enable_delete_retention', arg_type=get_three_state_flag(), arg_group='Delete Retention Policy',
min_api='2018-07-01')
c.argument('delete_retention_days', type=int, arg_group='Delete Retention Policy',
validator=validator_delete_retention_days, min_api='2018-07-01')
c.argument('enable_restore_policy', arg_type=get_three_state_flag(), arg_group='Restore Policy',
min_api='2019-06-01', help="Enable blob restore policy when it set to true.")
c.argument('restore_days', type=int, arg_group='Restore Policy',
min_api='2019-06-01', help="The number of days for the blob can be restored. It should be greater "
"than zero and less than Delete Retention Days.")
c.argument('enable_versioning', arg_type=get_three_state_flag(), help='Versioning is enabled if set to true.',
min_api='2019-06-01')
with self.argument_context('storage account file-service-properties show',
resource_type=ResourceType.MGMT_STORAGE) as c:
c.argument('account_name', acct_name_type, id_part=None)
c.argument('resource_group_name', required=False, validator=process_resource_group)
with self.argument_context('storage account file-service-properties update',
resource_type=ResourceType.MGMT_STORAGE) as c:
c.argument('account_name', acct_name_type, id_part=None)
c.argument('resource_group_name', required=False, validator=process_resource_group)
c.argument('enable_delete_retention', arg_type=get_three_state_flag(), arg_group='Delete Retention Policy',
min_api='2019-06-01', help='Enable file service properties for share soft delete.')
c.argument('delete_retention_days', type=int, arg_group='Delete Retention Policy',
validator=validate_delete_retention_days, min_api='2019-06-01',
help=' Indicate the number of days that the deleted item should be retained. The minimum specified '
'value can be 1 and the maximum value can be 365.')
with self.argument_context('storage account generate-sas') as c:
t_account_permissions = self.get_sdk('common.models#AccountPermissions')
c.register_sas_arguments()
c.argument('services', type=services_type(self))
c.argument('resource_types', type=resource_type_type(self))
c.argument('expiry', type=get_datetime_type(True))
c.argument('start', type=get_datetime_type(True))
c.argument('account_name', acct_name_type, options_list=['--account-name'])
c.argument('permission', options_list=('--permissions',),
help='The permissions the SAS grants. Allowed values: {}. Can be combined.'.format(
get_permission_help_string(t_account_permissions)),
validator=get_permission_validator(t_account_permissions))
c.ignore('sas_token')
for item in ['show', 'off']:
with self.argument_context('storage logging {}'.format(item)) as c:
c.extra('services', validator=get_char_options_validator('bqt', 'services'), default='bqt')
with self.argument_context('storage logging update') as c:
c.extra('services', validator=get_char_options_validator('bqt', 'services'), options_list='--services',
required=True)
c.argument('log', validator=get_char_options_validator('rwd', 'log'))
c.argument('retention', type=int)
c.argument('version', type=float, validator=validate_logging_version)
with self.argument_context('storage metrics show') as c:
c.extra('services', validator=get_char_options_validator('bfqt', 'services'), default='bfqt')
c.argument('interval', arg_type=get_enum_type(['hour', 'minute', 'both']))
with self.argument_context('storage metrics update') as c:
c.extra('services', validator=get_char_options_validator('bfqt', 'services'), options_list='--services',
required=True)
c.argument('hour', validator=process_metric_update_namespace, arg_type=get_enum_type(['true', 'false']))
c.argument('minute', arg_type=get_enum_type(['true', 'false']))
c.argument('api', arg_type=get_enum_type(['true', 'false']))
c.argument('retention', type=int)
with self.argument_context('storage blob') as c:
c.argument('blob_name', options_list=('--name', '-n'), arg_type=blob_name_type)
c.argument('destination_path', help='The destination path that will be appended to the blob name.')
with self.argument_context('storage blob list') as c:
c.argument('include', validator=validate_included_datasets)
c.argument('num_results', arg_type=num_results_type)
with self.argument_context('storage blob generate-sas') as c:
from .completers import get_storage_acl_name_completion_list
t_blob_permissions = self.get_sdk('blob.models#BlobPermissions')
c.register_sas_arguments()
c.argument('cache_control', help='Response header value for Cache-Control when resource is accessed'
'using this shared access signature.')
c.argument('content_disposition', help='Response header value for Content-Disposition when resource is accessed'
'using this shared access signature.')
c.argument('content_encoding', help='Response header value for Content-Encoding when resource is accessed'
'using this shared access signature.')
c.argument('content_language', help='Response header value for Content-Language when resource is accessed'
'using this shared access signature.')
c.argument('content_type', help='Response header value for Content-Type when resource is accessed'
'using this shared access signature.')
c.argument('full_uri', action='store_true',
help='Indicates that this command return the full blob URI and the shared access signature token.')
c.argument('as_user', min_api='2018-11-09', action='store_true',
validator=as_user_validator,
help="Indicates that this command return the SAS signed with the user delegation key. "
"The expiry parameter and '--auth-mode login' are required if this argument is specified. ")
c.argument('id', options_list='--policy-name',
help='The name of a stored access policy within the container\'s ACL.',
completer=get_storage_acl_name_completion_list(t_base_blob_service, 'container_name',
'get_container_acl'))
c.argument('permission', options_list='--permissions',
help=sas_help.format(get_permission_help_string(t_blob_permissions)),
validator=get_permission_validator(t_blob_permissions))
c.ignore('sas_token')
with self.argument_context('storage blob restore', resource_type=ResourceType.MGMT_STORAGE) as c:
from ._validators import BlobRangeAddAction
c.argument('blob_ranges', options_list=['--blob-range', '-r'], action=BlobRangeAddAction, nargs='+',
help='Blob ranges to restore. You need to two values to specify start_range and end_range for each '
'blob range, e.g. -r blob1 blob2. Note: Empty means account start as start range value, and '
'means account end for end range.')
c.argument('account_name', acct_name_type, id_part=None)
c.argument('resource_group_name', required=False, validator=process_resource_group)
c.argument('time_to_restore', type=get_datetime_type(True), options_list=['--time-to-restore', '-t'],
help='Restore blob to the specified time, which should be UTC datetime in (Y-m-d\'T\'H:M:S\'Z\').')
with self.argument_context('storage blob update') as c:
t_blob_content_settings = self.get_sdk('blob.models#ContentSettings')
c.register_content_settings_argument(t_blob_content_settings, update=True)
with self.argument_context('storage blob exists') as c:
c.argument('blob_name', required=True)
with self.argument_context('storage blob url') as c:
c.argument('protocol', arg_type=get_enum_type(['http', 'https'], 'https'), help='Protocol to use.')
c.argument('snapshot', help='An string value that uniquely identifies the snapshot. The value of'
'this query parameter indicates the snapshot version.')
with self.argument_context('storage blob set-tier') as c:
from azure.cli.command_modules.storage._validators import (blob_tier_validator,
blob_rehydrate_priority_validator)
c.argument('container_name', container_name_type)
c.argument('blob_name', options_list=('--name', '-n'), help="The blob name")
c.argument('blob_type', options_list=('--type', '-t'), arg_type=get_enum_type(('block', 'page')))
c.argument('tier', validator=blob_tier_validator)
c.argument('timeout', type=int)
c.argument('rehydrate_priority', options_list=('--rehydrate-priority', '-r'),
arg_type=get_enum_type(('High', 'Standard')), validator=blob_rehydrate_priority_validator, is_preview=True,
help="Indicate the priority with which to rehydrate an archived blob. "
"The priority can be set on a blob only once, default value is Standard.")
with self.argument_context('storage blob service-properties delete-policy update') as c:
c.argument('enable', arg_type=get_enum_type(['true', 'false']), help='Enables/disables soft-delete.')
c.argument('days_retained', type=int,
help='Number of days that soft-deleted blob will be retained. Must be in range [1,365].')
with self.argument_context('storage blob service-properties update', min_api='2018-03-28') as c:
c.argument('delete_retention', arg_type=get_three_state_flag(), arg_group='Soft Delete',
help='Enables soft-delete.')
c.argument('delete_retention_period', type=int, arg_group='Soft Delete',
help='Number of days that soft-deleted blob will be retained. Must be in range [1,365].')
c.argument('static_website', arg_group='Static Website', arg_type=get_three_state_flag(),
help='Enables static-website.')
c.argument('index_document', help='Represents the name of the index document. This is commonly "index.html".',
arg_group='Static Website')
c.argument('error_document_404_path', options_list=['--404-document'], arg_group='Static Website',
help='Represents the path to the error document that should be shown when an error 404 is issued,'
' in other words, when a browser requests a page that does not exist.')
with self.argument_context('storage blob show') as c:
c.argument('lease_id', help='Required if the blob has an active lease.')
c.argument('snapshot', help='The snapshot parameter is an opaque DateTime value that, when present, '
'specifies the blob snapshot to retrieve.')
c.argument('if_match', help="An ETag value, or the wildcard character (*). Specify this header to perform "
"the operation only if the resource's ETag matches the value specified.")
c.argument('if_none_match', help="An ETag value, or the wildcard character (*). Specify this header to perform "
"the operation only if the resource's ETag does not match the value specified."
" Specify the wildcard character (*) to perform the operation only if the "
"resource does not exist, and fail the operation if it does exist.")
with self.argument_context('storage blob upload') as c:
from ._validators import page_blob_tier_validator, validate_encryption_scope_client_params
from .sdkutil import get_blob_types, get_blob_tier_names
t_blob_content_settings = self.get_sdk('blob.models#ContentSettings')
c.register_content_settings_argument(t_blob_content_settings, update=False)
c.argument('file_path', options_list=('--file', '-f'), type=file_type, completer=FilesCompleter())
c.argument('max_connections', type=int)
c.argument('blob_type', options_list=('--type', '-t'), validator=validate_blob_type,
arg_type=get_enum_type(get_blob_types()))
c.argument('validate_content', action='store_true', min_api='2016-05-31')
c.extra('no_progress', progress_type)
c.extra('socket_timeout', socket_timeout_type)
# TODO: Remove once #807 is complete. Smart Create Generation requires this parameter.
# register_extra_cli_argument('storage blob upload', '_subscription_id', options_list=('--subscription',),
# help=argparse.SUPPRESS)
c.argument('tier', validator=page_blob_tier_validator,
arg_type=get_enum_type(get_blob_tier_names(self.cli_ctx, 'PremiumPageBlobTier')),
min_api='2017-04-17')
c.argument('encryption_scope', validator=validate_encryption_scope_client_params,
help='A predefined encryption scope used to encrypt the data on the service.')
with self.argument_context('storage blob upload-batch') as c:
from .sdkutil import get_blob_types
t_blob_content_settings = self.get_sdk('blob.models#ContentSettings')
c.register_content_settings_argument(t_blob_content_settings, update=False, arg_group='Content Control')
c.ignore('source_files', 'destination_container_name')
c.argument('source', options_list=('--source', '-s'))
c.argument('destination', options_list=('--destination', '-d'))
c.argument('max_connections', type=int,
help='Maximum number of parallel connections to use when the blob size exceeds 64MB.')
c.argument('maxsize_condition', arg_group='Content Control')
c.argument('validate_content', action='store_true', min_api='2016-05-31', arg_group='Content Control')
c.argument('blob_type', options_list=('--type', '-t'), arg_type=get_enum_type(get_blob_types()))
c.extra('no_progress', progress_type)
c.extra('socket_timeout', socket_timeout_type)
with self.argument_context('storage blob download') as c:
c.argument('file_path', options_list=('--file', '-f'), type=file_type, completer=FilesCompleter())
c.argument('max_connections', type=int)
c.argument('start_range', type=int)
c.argument('end_range', type=int)
c.argument('validate_content', action='store_true', min_api='2016-05-31')
c.extra('no_progress', progress_type)
c.extra('socket_timeout', socket_timeout_type)
with self.argument_context('storage blob download-batch') as c:
c.ignore('source_container_name')
c.argument('destination', options_list=('--destination', '-d'))
c.argument('source', options_list=('--source', '-s'))
c.extra('no_progress', progress_type)
c.extra('socket_timeout', socket_timeout_type)
c.argument('max_connections', type=int,
help='Maximum number of parallel connections to use when the blob size exceeds 64MB.')
with self.argument_context('storage blob delete') as c:
from .sdkutil import get_delete_blob_snapshot_type_names
c.argument('delete_snapshots', arg_type=get_enum_type(get_delete_blob_snapshot_type_names()))
with self.argument_context('storage blob delete-batch') as c:
c.ignore('source_container_name')
c.argument('source', options_list=('--source', '-s'))
c.argument('delete_snapshots', arg_type=get_enum_type(get_delete_blob_snapshot_type_names()),
help='Required if the blob has associated snapshots.')
c.argument('lease_id', help='The active lease id for the blob.')
with self.argument_context('storage blob lease') as c:
c.argument('lease_duration', type=int)
c.argument('lease_break_period', type=int)
c.argument('blob_name', arg_type=blob_name_type)
with self.argument_context('storage copy') as c:
c.argument('destination', options_list=['--destination', '-d'], help="The path/url of copy destination. "
"It can be a local path, an url to azure storage server. If you provide destination parameter "
"here, you do not need to provide arguments in copy destination arguments group and copy "
"destination arguments will be deprecated in future.")
c.argument('source', options_list=['--source', '-s'], help="The path/url of copy source. It can be a local"
" path, an url to azure storage server or AWS S3 buckets. If you provide source parameter here,"
" you do not need to provide arguments in copy source arguments group and copy source arguments"
" will be deprecated in future.")
for item in ['destination', 'source']:
c.argument('{}_account_name'.format(item), arg_group='Copy {}'.format(item),
help='Storage account name of copy {}'.format(item))
c.argument('{}_container'.format(item), arg_group='Copy {}'.format(item),
help='Container name of copy {} storage account'.format(item))
c.argument('{}_blob'.format(item), arg_group='Copy {}'.format(item),
help='Blob name in blob container of copy {} storage account'.format(item))
c.argument('{}_share'.format(item), arg_group='Copy {}'.format(item),
help='File share name of copy {} storage account'.format(item))
c.argument('{}_file_path'.format(item), arg_group='Copy {}'.format(item),
help='File path in file share of copy {} storage account'.format(item))
c.argument('{}_local_path'.format(item), arg_group='Copy {}'.format(item),
help='Local file path')
c.argument('put_md5', arg_group='Additional Flags', action='store_true',
help='Create an MD5 hash of each file, and save the hash as the Content-MD5 property of the '
'destination blob/file.Only available when uploading.')
c.argument('blob_type', arg_group='Additional Flags',
arg_type=get_enum_type(["BlockBlob", "PageBlob", "AppendBlob"]),
help='The type of blob at the destination.')
c.argument('preserve_s2s_access_tier', arg_group='Additional Flags', arg_type=get_three_state_flag(),
help='Preserve access tier during service to service copy. '
'Please refer to https://docs.microsoft.com/en-us/azure/storage/blobs/storage-blob-storage-tiers '
'to ensure destination storage account support setting access tier. In the cases that setting '
'access tier is not supported, please use `--preserve-s2s-access-tier false` to bypass copying '
'access tier. (Default true)')
c.argument('exclude_pattern', exclude_pattern_type)
c.argument('include_pattern', include_pattern_type)
c.argument('exclude_path', exclude_path_type)
c.argument('include_path', include_path_type)
c.argument('recursive', recursive_type)
c.argument('content_type', arg_group='Additional Flags', help="Specify content type of the file. ")
c.argument('follow_symlinks', arg_group='Additional Flags', action='store_true',
help='Follow symbolic links when uploading from local file system.')
with self.argument_context('storage blob copy') as c:
for item in ['destination', 'source']:
c.argument('{}_if_modified_since'.format(item), arg_group='Pre-condition')
c.argument('{}_if_unmodified_since'.format(item), arg_group='Pre-condition')
c.argument('{}_if_match'.format(item), arg_group='Pre-condition')
c.argument('{}_if_none_match'.format(item), arg_group='Pre-condition')
c.argument('container_name', container_name_type, options_list=('--destination-container', '-c'))
c.argument('blob_name', blob_name_type, options_list=('--destination-blob', '-b'),
help='Name of the destination blob. If the exists, it will be overwritten.')
c.argument('source_lease_id', arg_group='Copy Source')
with self.argument_context('storage blob copy start') as c:
from azure.cli.command_modules.storage._validators import validate_source_uri
c.register_source_uri_arguments(validator=validate_source_uri)
with self.argument_context('storage blob copy start-batch', arg_group='Copy Source') as c:
from azure.cli.command_modules.storage._validators import get_source_file_or_blob_service_client
c.argument('source_client', ignore_type, validator=get_source_file_or_blob_service_client)
c.extra('source_account_name')
c.extra('source_account_key')
c.extra('source_uri')
c.argument('source_sas')
c.argument('source_container')
c.argument('source_share')
with self.argument_context('storage blob incremental-copy start') as c:
from azure.cli.command_modules.storage._validators import process_blob_source_uri
c.register_source_uri_arguments(validator=process_blob_source_uri, blob_only=True)
c.argument('destination_if_modified_since', arg_group='Pre-condition')
c.argument('destination_if_unmodified_since', arg_group='Pre-condition')
c.argument('destination_if_match', arg_group='Pre-condition')
c.argument('destination_if_none_match', arg_group='Pre-condition')
c.argument('container_name', container_name_type, options_list=('--destination-container', '-c'))
c.argument('blob_name', blob_name_type, options_list=('--destination-blob', '-b'),
help='Name of the destination blob. If the exists, it will be overwritten.')
c.argument('source_lease_id', arg_group='Copy Source')
with self.argument_context('storage blob sync') as c:
c.extra('destination_container', options_list=['--container', '-c'], required=True,
help='The sync destination container.')
c.extra('destination_path', options_list=['--destination', '-d'],
validator=validate_azcopy_upload_destination_url,
help='The sync destination path.')
c.argument('source', options_list=['--source', '-s'],
help='The source file path to sync from.')
c.ignore('destination')
c.argument('exclude_pattern', exclude_pattern_type)
c.argument('include_pattern', include_pattern_type)
c.argument('exclude_path', exclude_path_type)
with self.argument_context('storage container') as c:
from .sdkutil import get_container_access_type_names
c.argument('container_name', container_name_type, options_list=('--name', '-n'))
c.argument('public_access', validator=validate_container_public_access,
arg_type=get_enum_type(get_container_access_type_names()),
help='Specifies whether data in the container may be accessed publicly.')
with self.argument_context('storage container create') as c:
c.argument('container_name', container_name_type, options_list=('--name', '-n'), completer=None)
c.argument('fail_on_exist', help='Throw an exception if the container already exists.')
c.argument('account_name', help='Storage account name. Related environment variable: AZURE_STORAGE_ACCOUNT.')
c.argument('default_encryption_scope', options_list=['--default-encryption-scope', '-d'],
arg_group='Encryption Policy', is_preview=True,
help='Default the container to use specified encryption scope for all writes.')
c.argument('prevent_encryption_scope_override', options_list=['--prevent-encryption-scope-override', '-p'],
arg_type=get_three_state_flag(), arg_group='Encryption Policy', is_preview=True,
help='Block override of encryption scope from the container default.')
with self.argument_context('storage container delete') as c:
c.argument('fail_not_exist', help='Throw an exception if the container does not exist.')
c.argument('bypass_immutability_policy', action='store_true', help='Bypasses upcoming service behavior that '
'will block a container from being deleted if it has a immutability-policy. Specifying this will '
'ignore arguments aside from those used to identify the container ("--name", "--account-name").')
c.argument('lease_id', help="If specified, delete_container only succeeds if the container's lease is active "
"and matches this ID. Required if the container has an active lease.")
c.ignore('processed_resource_group')
c.ignore('processed_account_name')
c.ignore('mgmt_client')
with self.argument_context('storage container exists') as c:
c.ignore('blob_name', 'snapshot')
with self.argument_context('storage container immutability-policy') as c:
c.argument('allow_protected_append_writes', options_list=['--allow-protected-append-writes', '-w'],
arg_type=get_three_state_flag())
with self.argument_context('storage container list') as c:
c.argument('num_results', arg_type=num_results_type)
with self.argument_context('storage container set-permission') as c:
c.ignore('signed_identifiers')
with self.argument_context('storage container lease') as c:
c.argument('container_name', container_name_type)
with self.argument_context('storage container') as c:
c.argument('account_name', completer=get_resource_name_completion_list('Microsoft.Storage/storageAccounts'))
c.argument('resource_group_name', required=False, validator=process_resource_group)
with self.argument_context('storage container immutability-policy') as c:
c.argument('immutability_period_since_creation_in_days', options_list='--period')
c.argument('container_name', container_name_type)
with self.argument_context('storage container legal-hold') as c:
c.argument('container_name', container_name_type)
c.argument('tags', nargs='+',
help='Each tag should be 3 to 23 alphanumeric characters and is normalized to lower case')
with self.argument_context('storage container policy') as c:
from .completers import get_storage_acl_name_completion_list
t_container_permissions = self.get_sdk('blob.models#ContainerPermissions')
c.argument('container_name', container_name_type)
c.argument('policy_name', options_list=('--name', '-n'), help='The stored access policy name.',
completer=get_storage_acl_name_completion_list(t_base_blob_service, 'container_name',
'get_container_acl'))
help_str = 'Allowed values: {}. Can be combined'.format(get_permission_help_string(t_container_permissions))
c.argument('permission', options_list='--permissions', help=help_str,
validator=get_permission_validator(t_container_permissions))
c.argument('start', type=get_datetime_type(True),
help='start UTC datetime (Y-m-d\'T\'H:M:S\'Z\'). Defaults to time of request.')
c.argument('expiry', type=get_datetime_type(True), help='expiration UTC datetime in (Y-m-d\'T\'H:M:S\'Z\')')
for item in ['create', 'delete', 'list', 'show', 'update']:
with self.argument_context('storage container policy {}'.format(item)) as c:
c.extra('lease_id', options_list='--lease-id', help='The container lease ID.')
with self.argument_context('storage container generate-sas') as c:
from .completers import get_storage_acl_name_completion_list
t_container_permissions = self.get_sdk('blob.models#ContainerPermissions')
c.register_sas_arguments()
c.argument('id', options_list='--policy-name',
help='The name of a stored access policy within the container\'s ACL.',
completer=get_storage_acl_name_completion_list(t_container_permissions, 'container_name',
'get_container_acl'))
c.argument('permission', options_list='--permissions',
help=sas_help.format(get_permission_help_string(t_container_permissions)),
validator=get_permission_validator(t_container_permissions))
c.argument('cache_control', help='Response header value for Cache-Control when resource is accessed'
'using this shared access signature.')
c.argument('content_disposition', help='Response header value for Content-Disposition when resource is accessed'
'using this shared access signature.')
c.argument('content_encoding', help='Response header value for Content-Encoding when resource is accessed'
'using this shared access signature.')
c.argument('content_language', help='Response header value for Content-Language when resource is accessed'
'using this shared access signature.')
c.argument('content_type', help='Response header value for Content-Type when resource is accessed'
'using this shared access signature.')
c.argument('as_user', min_api='2018-11-09', action='store_true',
validator=as_user_validator,
help="Indicates that this command return the SAS signed with the user delegation key. "
"The expiry parameter and '--auth-mode login' are required if this argument is specified. ")
c.ignore('sas_token')
with self.argument_context('storage container lease') as c:
c.argument('lease_duration', type=int)
c.argument('lease_break_period', type=int)
with self.argument_context('storage share') as c:
c.argument('share_name', share_name_type, options_list=('--name', '-n'))
for item in ['create', 'delete', 'exists', 'list', 'show', 'update']:
with self.argument_context('storage share-rm {}'.format(item), resource_type=ResourceType.MGMT_STORAGE) as c:
c.argument('resource_group_name', required=False)
c.argument('account_name', storage_account_type)
c.argument('share_name', share_name_type, options_list=('--name', '-n'), id_part='child_name_2')
c.argument('share_quota', type=int, options_list='--quota')
c.argument('metadata', nargs='+',
help='Metadata in space-separated key=value pairs that is associated with the share. '
'This overwrites any existing metadata',
validator=validate_metadata)
c.ignore('filter', 'maxpagesize', 'skip_token')
with self.argument_context('storage share-rm list', resource_type=ResourceType.MGMT_STORAGE) as c:
c.argument('account_name', storage_account_type, id_part=None)
with self.argument_context('storage share url') as c:
c.argument('unc', action='store_true', help='Output UNC network path.')
c.argument('protocol', arg_type=get_enum_type(['http', 'https'], 'https'), help='Protocol to use.')
with self.argument_context('storage share list') as c:
c.argument('num_results', arg_type=num_results_type)
with self.argument_context('storage share exists') as c:
c.ignore('directory_name', 'file_name')
with self.argument_context('storage share policy') as c:
from .completers import get_storage_acl_name_completion_list
t_file_svc = self.get_sdk('file#FileService')
t_share_permissions = self.get_sdk('file.models#SharePermissions')
c.argument('container_name', share_name_type)
c.argument('policy_name', options_list=('--name', '-n'), help='The stored access policy name.',
completer=get_storage_acl_name_completion_list(t_file_svc, 'container_name', 'get_share_acl'))
help_str = 'Allowed values: {}. Can be combined'.format(get_permission_help_string(t_share_permissions))
c.argument('permission', options_list='--permissions', help=help_str,
validator=get_permission_validator(t_share_permissions))
c.argument('start', type=get_datetime_type(True),
help='start UTC datetime (Y-m-d\'T\'H:M:S\'Z\'). Defaults to time of request.')
c.argument('expiry', type=get_datetime_type(True), help='expiration UTC datetime in (Y-m-d\'T\'H:M:S\'Z\')')
with self.argument_context('storage share delete') as c:
from .sdkutil import get_delete_file_snapshot_type_names
c.argument('delete_snapshots', arg_type=get_enum_type(get_delete_file_snapshot_type_names()),
help='Specify the deletion strategy when the share has snapshots.')
with self.argument_context('storage share generate-sas') as c:
from .completers import get_storage_acl_name_completion_list
t_share_permissions = self.get_sdk('file.models#SharePermissions')
c.register_sas_arguments()
c.argument('id', options_list='--policy-name',
help='The name of a stored access policy within the share\'s ACL.',
completer=get_storage_acl_name_completion_list(t_share_permissions, 'share_name', 'get_share_acl'))
c.argument('permission', options_list='--permissions',
help=sas_help.format(get_permission_help_string(t_share_permissions)),
validator=get_permission_validator(t_share_permissions))
c.ignore('sas_token')
with self.argument_context('storage directory') as c:
c.argument('directory_name', directory_type, options_list=('--name', '-n'))
with self.argument_context('storage directory exists') as c:
c.ignore('file_name')
c.argument('directory_name', required=True)
with self.argument_context('storage file') as c:
c.argument('file_name', file_name_type, options_list=('--name', '-n'))
c.argument('directory_name', directory_type, required=False)
with self.argument_context('storage file copy') as c:
c.argument('share_name', share_name_type, options_list=('--destination-share', '-s'),
help='Name of the destination share. The share must exist.')
with self.argument_context('storage file copy cancel') as c:
c.register_path_argument(options_list=('--destination-path', '-p'))
with self.argument_context('storage file delete') as c:
c.register_path_argument()
with self.argument_context('storage file download') as c:
c.register_path_argument()
c.argument('file_path', options_list=('--dest',), type=file_type, required=False,
help='Path of the file to write to. The source filename will be used if not specified.',
validator=process_file_download_namespace, completer=FilesCompleter())
c.argument('path', validator=None) # validator called manually from process_file_download_namespace
c.extra('no_progress', progress_type)
c.argument('max_connections', type=int)
c.argument('start_range', type=int)
c.argument('end_range', type=int)
with self.argument_context('storage file exists') as c:
c.register_path_argument()
with self.argument_context('storage file generate-sas') as c:
from .completers import get_storage_acl_name_completion_list
c.register_path_argument()
c.register_sas_arguments()
t_file_svc = self.get_sdk('file.fileservice#FileService')
t_file_permissions = self.get_sdk('file.models#FilePermissions')
c.argument('id', options_list='--policy-name',
help='The name of a stored access policy within the container\'s ACL.',
completer=get_storage_acl_name_completion_list(t_file_svc, 'container_name', 'get_container_acl'))
c.argument('permission', options_list='--permissions',
help=sas_help.format(get_permission_help_string(t_file_permissions)),
validator=get_permission_validator(t_file_permissions))
c.ignore('sas_token')
with self.argument_context('storage file list') as c:
from .completers import dir_path_completer
c.argument('directory_name', options_list=('--path', '-p'), help='The directory path within the file share.',
completer=dir_path_completer)
c.argument('num_results', arg_type=num_results_type)
with self.argument_context('storage file metadata show') as c:
c.register_path_argument()
with self.argument_context('storage file metadata update') as c:
c.register_path_argument()
with self.argument_context('storage file resize') as c:
c.register_path_argument()
c.argument('content_length', options_list='--size')
with self.argument_context('storage file show') as c:
c.register_path_argument()
with self.argument_context('storage file update') as c:
t_file_content_settings = self.get_sdk('file.models#ContentSettings')
c.register_path_argument()
c.register_content_settings_argument(t_file_content_settings, update=True)
with self.argument_context('storage file upload') as c:
t_file_content_settings = self.get_sdk('file.models#ContentSettings')
c.register_path_argument(default_file_param='local_file_path')
c.register_content_settings_argument(t_file_content_settings, update=False, guess_from_file='local_file_path')
c.argument('local_file_path', options_list='--source', type=file_type, completer=FilesCompleter())
c.extra('no_progress', progress_type)
c.argument('max_connections', type=int)
with self.argument_context('storage file url') as c:
c.register_path_argument()
c.argument('protocol', arg_type=get_enum_type(['http', 'https'], 'https'), help='Protocol to use.')
with self.argument_context('storage file upload-batch') as c:
from ._validators import process_file_upload_batch_parameters
c.argument('source', options_list=('--source', '-s'), validator=process_file_upload_batch_parameters)
c.argument('destination', options_list=('--destination', '-d'))
c.argument('max_connections', arg_group='Download Control', type=int)
c.argument('validate_content', action='store_true', min_api='2016-05-31')
c.register_content_settings_argument(t_file_content_settings, update=False, arg_group='Content Settings')
c.extra('no_progress', progress_type)
with self.argument_context('storage file download-batch') as c:
from ._validators import process_file_download_batch_parameters
c.argument('source', options_list=('--source', '-s'), validator=process_file_download_batch_parameters)
c.argument('destination', options_list=('--destination', '-d'))
c.argument('max_connections', arg_group='Download Control', type=int)
c.argument('validate_content', action='store_true', min_api='2016-05-31')
c.extra('no_progress', progress_type)
with self.argument_context('storage file delete-batch') as c:
from ._validators import process_file_batch_source_parameters
c.argument('source', options_list=('--source', '-s'), validator=process_file_batch_source_parameters)
with self.argument_context('storage file copy start') as c:
from azure.cli.command_modules.storage._validators import validate_source_uri
c.register_path_argument(options_list=('--destination-path', '-p'))
c.register_source_uri_arguments(validator=validate_source_uri)
c.extra('file_snapshot', default=None, arg_group='Copy Source',
help='The file snapshot for the source storage account.')
with self.argument_context('storage file copy start-batch', arg_group='Copy Source') as c:
from ._validators import get_source_file_or_blob_service_client
c.argument('source_client', ignore_type, validator=get_source_file_or_blob_service_client)
c.extra('source_account_name')
c.extra('source_account_key')
c.extra('source_uri')
c.argument('source_sas')
c.argument('source_container')
c.argument('source_share')
with self.argument_context('storage cors list') as c:
c.extra('services', validator=get_char_options_validator('bfqt', 'services'), default='bqft',
options_list='--services', required=False)
with self.argument_context('storage cors add') as c:
c.extra('services', validator=get_char_options_validator('bfqt', 'services'), required=True,
options_list='--services')
c.argument('max_age')
c.argument('origins', nargs='+')
c.argument('methods', nargs='+',
arg_type=get_enum_type(['DELETE', 'GET', 'HEAD', 'MERGE', 'POST', 'OPTIONS', 'PUT']))
c.argument('allowed_headers', nargs='+')
c.argument('exposed_headers', nargs='+')
with self.argument_context('storage cors clear') as c:
c.extra('services', validator=get_char_options_validator('bfqt', 'services'), required=True,
options_list='--services')
with self.argument_context('storage queue generate-sas') as c:
from .completers import get_storage_acl_name_completion_list
t_queue_permissions = self.get_sdk('queue.models#QueuePermissions')
c.register_sas_arguments()
c.argument('id', options_list='--policy-name',
help='The name of a stored access policy within the share\'s ACL.',
completer=get_storage_acl_name_completion_list(t_queue_permissions, 'queue_name', 'get_queue_acl'))
c.argument('permission', options_list='--permissions',
help=sas_help.format(get_permission_help_string(t_queue_permissions)),
validator=get_permission_validator(t_queue_permissions))
c.ignore('sas_token')
c.ignore('auth_mode')
with self.argument_context('storage queue') as c:
c.argument('queue_name', queue_name_type, options_list=('--name', '-n'))
with self.argument_context('storage queue create') as c:
c.argument('queue_name', queue_name_type, options_list=('--name', '-n'), completer=None)
with self.argument_context('storage queue policy') as c:
from .completers import get_storage_acl_name_completion_list
t_queue_permissions = self.get_sdk('queue.models#QueuePermissions')
c.argument('container_name', queue_name_type)
c.argument('policy_name', options_list=('--name', '-n'), help='The stored access policy name.',
completer=get_storage_acl_name_completion_list(t_queue_service, 'container_name', 'get_queue_acl'))
help_str = 'Allowed values: {}. Can be combined'.format(get_permission_help_string(t_queue_permissions))
c.argument('permission', options_list='--permissions', help=help_str,
validator=get_permission_validator(t_queue_permissions))
c.argument('start', type=get_datetime_type(True),
help='start UTC datetime (Y-m-d\'T\'H:M:S\'Z\'). Defaults to time of request.')
c.argument('expiry', type=get_datetime_type(True), help='expiration UTC datetime in (Y-m-d\'T\'H:M:S\'Z\')')
with self.argument_context('storage message') as c:
c.argument('queue_name', queue_name_type)
c.argument('message_id', options_list='--id')
c.argument('content', type=unicode_string, help='Message content, up to 64KB in size.')
with self.argument_context('storage remove') as c:
from .completers import file_path_completer
c.extra('container_name', container_name_type, validator=validate_azcopy_remove_arguments)
c.extra('blob_name', options_list=('--name', '-n'), arg_type=blob_name_type)
c.extra('share_name', share_name_type, help='The file share name.')
c.extra('path', options_list=('--path', '-p'),
help='The path to the file within the file share.',
completer=file_path_completer)
c.argument('exclude_pattern', exclude_pattern_type)
c.argument('include_pattern', include_pattern_type)
c.argument('exclude_path', exclude_path_type)
c.argument('include_path', include_path_type)
c.argument('recursive', recursive_type)
c.ignore('destination')
c.ignore('service')
c.ignore('target')
with self.argument_context('storage table') as c:
c.argument('table_name', table_name_type, options_list=('--name', '-n'))
with self.argument_context('storage table create') as c:
c.argument('table_name', table_name_type, options_list=('--name', '-n'), completer=None)
c.argument('fail_on_exist', help='Throw an exception if the table already exists.')
with self.argument_context('storage table policy') as c:
from ._validators import table_permission_validator
from .completers import get_storage_acl_name_completion_list
c.argument('container_name', table_name_type)
c.argument('policy_name', options_list=('--name', '-n'), help='The stored access policy name.',
completer=get_storage_acl_name_completion_list(t_table_service, 'table_name', 'get_table_acl'))
help_str = 'Allowed values: (r)ead/query (a)dd (u)pdate (d)elete. Can be combined.'
c.argument('permission', options_list='--permissions', help=help_str, validator=table_permission_validator)
c.argument('start', type=get_datetime_type(True),
help='start UTC datetime (Y-m-d\'T\'H:M:S\'Z\'). Defaults to time of request.')
c.argument('expiry', type=get_datetime_type(True), help='expiration UTC datetime in (Y-m-d\'T\'H:M:S\'Z\')')
with self.argument_context('storage table generate-sas') as c:
from .completers import get_storage_acl_name_completion_list
c.register_sas_arguments()
c.argument('id', options_list='--policy-name',
help='The name of a stored access policy within the table\'s ACL.',
completer=get_storage_acl_name_completion_list(t_table_service, 'table_name', 'get_table_acl'))
c.argument('permission', options_list='--permissions',
help=sas_help.format('(r)ead/query (a)dd (u)pdate (d)elete'),
validator=table_permission_validator)
c.ignore('sas_token')
with self.argument_context('storage entity') as c:
c.ignore('property_resolver')
c.argument('entity', options_list=('--entity', '-e'), validator=validate_entity, nargs='+')
c.argument('select', nargs='+', validator=validate_select,
help='Space-separated list of properties to return for each entity.')
with self.argument_context('storage entity insert') as c:
c.argument('if_exists', arg_type=get_enum_type(['fail', 'merge', 'replace']))
with self.argument_context('storage entity query') as c:
c.argument('accept', default='minimal', validator=validate_table_payload_format,
arg_type=get_enum_type(['none', 'minimal', 'full']),
help='Specifies how much metadata to include in the response payload.')
c.argument('marker', validator=validate_marker, nargs='+')
for item in ['create', 'show', 'delete', 'exists', 'metadata update', 'metadata show']:
with self.argument_context('storage fs {}'.format(item)) as c:
c.extra('file_system_name', options_list=['--name', '-n'],
help="File system name.", required=True)
c.extra('timeout', timeout_type)
with self.argument_context('storage fs create') as c:
from .sdkutil import get_fs_access_type_names
c.argument('public_access', arg_type=get_enum_type(get_fs_access_type_names()),
validator=validate_fs_public_access,
help="Specify whether data in the file system may be accessed publicly and the level of access.")
with self.argument_context('storage fs list') as c:
c.argument('include_metadata', arg_type=get_three_state_flag(),
help='Specify that file system metadata be returned in the response. The default value is "False".')
c.argument('name_starts_with', options_list=['--prefix'],
help='Filter the results to return only file systems whose names begin with the specified prefix.')
for item in ['create', 'show', 'delete', 'exists', 'move', 'metadata update', 'metadata show']:
with self.argument_context('storage fs directory {}'.format(item)) as c:
c.extra('file_system_name', options_list=['-f', '--file-system'], help="File system name.", required=True)
c.extra('directory_path', options_list=['--name', '-n'],
help="The name of directory.", required=True)
c.extra('timeout', timeout_type)
with self.argument_context('storage fs directory create') as c:
c.extra('permissions', permissions_type)
c.extra('umask', umask_type)
with self.argument_context('storage fs directory list') as c:
c.extra('file_system_name', options_list=['-f', '--file-system'], help="File system name.", required=True)
c.argument('recursive', arg_type=get_three_state_flag(), default=True,
help='Look into sub-directories recursively when set to true.')
c.argument('path', help="Filter the results to return only paths under the specified path.")
c.argument('num_results', type=int, help='Specify the maximum number of results to return.')
with self.argument_context('storage fs directory move') as c:
c.argument('new_name', options_list=['--new-directory', '-d'],
help='The new directory name the users want to move to. The value must have the following format: '
'"{filesystem}/{directory}/{subdirectory}".')
with self.argument_context('storage fs file list') as c:
c.extra('file_system_name', options_list=['-f', '--file-system'], help="File system name.", required=True)
c.argument('recursive', arg_type=get_three_state_flag(), default=True,
help='Look into sub-directories recursively when set to true.')
c.argument('exclude_dir', action='store_true',
help='List only files in the given file system.')
c.argument('path', help='Filter the results to return only paths under the specified path.')
c.argument('num_results', type=int, default=5000,
help='Specify the maximum number of results to return. If the request does not specify num_results '
'or specifies a value greater than 5,000, the server will return up to 5,000 items.')
c.argument('marker',
help='An opaque continuation token. This value can be retrieved from the next_marker field of a '
'previous generator object. If specified, this generator will begin returning results from this '
'point.')
for item in ['create', 'show', 'delete', 'exists', 'upload', 'append', 'download', 'show', 'metadata update',
'metadata show']:
with self.argument_context('storage fs file {}'.format(item)) as c:
c.extra('file_system_name', options_list=['-f', '--file-system'],
help='File system name.', required=True)
c.extra('path', options_list=['-p', '--path'], help="The file path in a file system.",
required=True)
c.extra('timeout', timeout_type)
c.argument('content', help='Content to be appended to file.')
with self.argument_context('storage fs file create') as c:
t_file_content_settings = self.get_sdk('_models#ContentSettings',
resource_type=ResourceType.DATA_STORAGE_FILEDATALAKE)
c.register_content_settings_argument(t_file_content_settings, update=False)
c.extra('permissions', permissions_type)
c.extra('umask', umask_type)
c.extra('timeout', timeout_type)
with self.argument_context('storage fs file download') as c:
c.argument('destination_path', options_list=['--destination', '-d'], type=file_type,
help='The local file where the file or folder will be downloaded to. The source filename will be '
'used if not specified.')
c.argument('overwrite', arg_type=get_three_state_flag(),
help="Overwrite an existing file when specified. Default value is false.")
with self.argument_context('storage fs file move') as c:
t_file_content_settings = self.get_sdk('_models#ContentSettings',
resource_type=ResourceType.DATA_STORAGE_FILEDATALAKE)
c.register_content_settings_argument(t_file_content_settings, update=False)
c.extra('file_system_name', options_list=['-f', '--file-system'],
help='File system name.', required=True)
c.extra('path', options_list=['-p', '--path'], required=True,
help="The original file path users want to move in a file system.")
c.argument('new_name', options_list=['--new-path'],
help='The new path the users want to move to. The value must have the following format: '
'"{filesystem}/{directory}/{subdirectory}/{file}".')
with self.argument_context('storage fs file upload') as c:
t_file_content_settings = self.get_sdk('_models#ContentSettings',
resource_type=ResourceType.DATA_STORAGE_FILEDATALAKE)
c.register_content_settings_argument(t_file_content_settings, update=False)
c.argument('local_path', options_list=['--source', '-s'],
help='Path of the local file to upload as the file content.')
c.argument('overwrite', arg_type=get_three_state_flag(), help="Overwrite an existing file when specified.")
c.argument('if_match', arg_group='Precondition',
help="An ETag value, or the wildcard character (*). Specify this header to perform the operation "
"only if the resource's ETag matches the value specified.")
c.argument('if_none_match', arg_group='Precondition',
help="An ETag value, or the wildcard character (*). Specify this header to perform the operation "
"only if the resource's ETag does not match the value specified.")
c.argument('if_modified_since', arg_group='Precondition',
help="A Commence only if modified since supplied UTC datetime (Y-m-d'T'H:M'Z').")
c.argument('if_unmodified_since', arg_group='Precondition',
help="A Commence only if unmodified since supplied UTC datetime (Y-m-d'T'H:M'Z').")
c.argument('permissions', permissions_type)
c.argument('umask', umask_type)
for item in ['set', 'show']:
with self.argument_context('storage fs access {}'.format(item)) as c:
from ._validators import validate_access_control
c.extra('file_system_name', options_list=['-f', '--file-system'],
help='File system name.', required=True)
c.extra('directory_path', options_list=['-p', '--path'],
help='The path to a file or directory in the specified file system.', required=True)
c.argument('permissions', validator=validate_access_control)
c.ignore('upn')
|
47,182 |
def main():
# region Argument Parsing
parser = HfArgumentParser((ModelArguments, DataTrainingArguments, TFTrainingArguments))
if len(sys.argv) == 2 and sys.argv[1].endswith(".json"):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
model_args, data_args, training_args = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1]))
else:
model_args, data_args, training_args = parser.parse_args_into_dataclasses()
# Sanity checks
if data_args.dataset_name is None and data_args.train_file is None and data_args.validation_file is None:
raise ValueError("Need either a dataset name or a training/validation file.")
else:
if data_args.train_file is not None:
extension = data_args.train_file.split(".")[-1]
assert extension in ["csv", "json", "txt"], "`train_file` should be a csv, json or txt file."
if data_args.validation_file is not None:
extension = data_args.validation_file.split(".")[-1]
assert extension in ["csv", "json", "txt"], "`validation_file` should be a csv, json or txt file."
if training_args.output_dir is not None:
training_args.output_dir = Path(training_args.output_dir)
os.makedirs(training_args.output_dir, exist_ok=True)
if isinstance(training_args.strategy, tf.distribute.TPUStrategy) and not data_args.pad_to_max_length:
logger.warning("We are training on TPU - forcing pad_to_max_length")
data_args.pad_to_max_length = True
# endregion
# region Checkpoints
# Detecting last checkpoint.
checkpoint = None
if len(os.listdir(training_args.output_dir)) > 0 and not training_args.overwrite_output_dir:
config_path = training_args.output_dir / CONFIG_NAME
weights_path = training_args.output_dir / TF2_WEIGHTS_NAME
if config_path.is_file() and weights_path.is_file():
checkpoint = training_args.output_dir
logger.warning(
f"Checkpoint detected, resuming training from checkpoint in {training_args.output_dir}. To avoid this"
" behavior, change the `--output_dir` or add `--overwrite_output_dir` to train from scratch."
)
else:
raise ValueError(
f"Output directory ({training_args.output_dir}) already exists and is not empty. "
"Use --overwrite_output_dir to continue regardless."
)
# endregion
# region Setup logging
# accelerator.is_local_main_process is only True for one process per machine.
logger.setLevel(logging.INFO)
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_info()
# endregion
# If passed along, set the training seed now.
if training_args.seed is not None:
set_seed(training_args.seed)
# region Load datasets
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
#
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
#
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
raw_datasets = load_dataset(data_args.dataset_name, data_args.dataset_config_name)
if "validation" not in raw_datasets.keys():
raw_datasets["validation"] = load_dataset(
data_args.dataset_name,
data_args.dataset_config_name,
split=f"train[:{data_args.validation_split_percentage}%]",
)
raw_datasets["train"] = load_dataset(
data_args.dataset_name,
data_args.dataset_config_name,
split=f"train[{data_args.validation_split_percentage}%:]",
)
else:
data_files = {}
if data_args.train_file is not None:
data_files["train"] = data_args.train_file
if data_args.validation_file is not None:
data_files["validation"] = data_args.validation_file
extension = data_args.train_file.split(".")[-1]
if extension == "txt":
extension = "text"
raw_datasets = load_dataset(extension, data_files=data_files)
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# endregion
# region Load pretrained model and tokenizer
#
# In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
if checkpoint is not None:
config = AutoConfig.from_pretrained(checkpoint)
elif model_args.config_name:
config = AutoConfig.from_pretrained(model_args.config_name)
elif model_args.model_name_or_path:
config = AutoConfig.from_pretrained(model_args.model_name_or_path)
else:
config = CONFIG_MAPPING[model_args.model_type]()
logger.warning("You are instantiating a new config instance from scratch.")
if model_args.tokenizer_name:
tokenizer = AutoTokenizer.from_pretrained(model_args.tokenizer_name)
elif model_args.model_name_or_path:
tokenizer = AutoTokenizer.from_pretrained(model_args.model_name_or_path)
else:
raise ValueError(
"You are instantiating a new tokenizer from scratch. This is not supported by this script."
"You can do it from another script, save it, and load it from here, using --tokenizer_name."
)
# endregion
# region Dataset preprocessing
# First we tokenize all the texts.
column_names = raw_datasets["train"].column_names
text_column_name = "text" if "text" in column_names else column_names[0]
if data_args.max_seq_length is None:
max_seq_length = tokenizer.model_max_length
if max_seq_length > 1024:
logger.warning(
f"The tokenizer picked seems to have a very large `model_max_length` ({tokenizer.model_max_length}). "
"Picking 1024 instead. You can reduce that default value by passing --max_seq_length xxx."
)
max_seq_length = 1024
else:
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
f"The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the"
f"model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}."
)
max_seq_length = min(data_args.max_seq_length, tokenizer.model_max_length)
if data_args.line_by_line:
# When using line_by_line, we just tokenize each nonempty line.
padding = "max_length" if data_args.pad_to_max_length else False
def tokenize_function(examples):
# Remove empty lines
examples[text_column_name] = [
line for line in examples[text_column_name] if len(line) > 0 and not line.isspace()
]
return tokenizer(
examples[text_column_name],
padding=padding,
truncation=True,
max_length=max_seq_length,
# We use this option because DataCollatorForLanguageModeling (see below) is more efficient when it
# receives the `special_tokens_mask`.
return_special_tokens_mask=True,
)
tokenized_datasets = raw_datasets.map(
tokenize_function,
batched=True,
num_proc=data_args.preprocessing_num_workers,
remove_columns=[text_column_name],
load_from_cache_file=not data_args.overwrite_cache,
desc="Running tokenizer on dataset line_by_line",
)
else:
# Otherwise, we tokenize every text, then concatenate them together before splitting them in smaller parts.
# We use `return_special_tokens_mask=True` because DataCollatorForLanguageModeling (see below) is more
# efficient when it receives the `special_tokens_mask`.
def tokenize_function(examples):
return tokenizer(examples[text_column_name], return_special_tokens_mask=True)
tokenized_datasets = raw_datasets.map(
tokenize_function,
batched=True,
num_proc=data_args.preprocessing_num_workers,
remove_columns=column_names,
load_from_cache_file=not data_args.overwrite_cache,
desc="Running tokenizer on every text in dataset",
)
# Main data processing function that will concatenate all texts from our dataset and generate chunks of
# max_seq_length.
def group_texts(examples):
# Concatenate all texts.
concatenated_examples = {k: sum(examples[k], []) for k in examples.keys()}
total_length = len(concatenated_examples[list(examples.keys())[0]])
# We drop the small remainder, we could add padding if the model supported it instead of this drop, you can
# customize this part to your needs.
total_length = (total_length // max_seq_length) * max_seq_length
# Split by chunks of max_len.
result = {
k: [t[i : i + max_seq_length] for i in range(0, total_length, max_seq_length)]
for k, t in concatenated_examples.items()
}
return result
# Note that with `batched=True`, this map processes 1,000 texts together, so group_texts throws away a
# remainder for each of those groups of 1,000 texts. You can adjust that batch_size here but a higher value
# might be slower to preprocess.
#
# To speed up this part, we use multiprocessing. See the documentation of the map method for more information:
# https://huggingface.co/docs/datasets/package_reference/main_classes.html#datasets.Dataset.map
tokenized_datasets = tokenized_datasets.map(
group_texts,
batched=True,
num_proc=data_args.preprocessing_num_workers,
load_from_cache_file=not data_args.overwrite_cache,
desc=f"Grouping texts in chunks of {max_seq_length}",
)
train_dataset = tokenized_datasets["train"]
if data_args.max_train_samples is not None:
train_dataset = train_dataset.select(range(data_args.max_train_samples))
if(data_args.validation_file is not None):
eval_dataset = tokenized_datasets["validation"]
else:
print(f'Validation file not found: using {data_args.validation_split_percentage}% of the dataset as validation as provided in data_args')
train_indices,val_indices=train_test_split(list(range(len(train_dataset))),test_size=data_args.validation_split_percentage)
eval_dataset = train_dataset.select(val_indices)
train_dataset = train_dataset.select(train_indices)
if data_args.max_eval_samples is not None:
eval_dataset = eval_dataset.select(range(data_args.max_eval_samples))
# Log a few random samples from the training set:
for index in random.sample(range(len(train_dataset)), 3):
logger.info(f"Sample {index} of the training set: {train_dataset[index]}.")
# endregion
with training_args.strategy.scope():
# region Prepare model
if checkpoint is not None:
model = TFAutoModelForMaskedLM.from_pretrained(checkpoint, config=config)
elif model_args.model_name_or_path:
model = TFAutoModelForMaskedLM.from_pretrained(model_args.model_name_or_path, config=config)
else:
logger.info("Training new model from scratch")
model = TFAutoModelForMaskedLM.from_config(config)
model.resize_token_embeddings(len(tokenizer))
# endregion
# region TF Dataset preparation
num_replicas = training_args.strategy.num_replicas_in_sync
train_generator = partial(sample_generator, train_dataset, tokenizer)
train_signature = {
feature: tf.TensorSpec(shape=(None,), dtype=tf.int64)
for feature in train_dataset.features
if feature != "special_tokens_mask"
}
train_signature["labels"] = train_signature["input_ids"]
train_signature = (train_signature, train_signature["labels"])
options = tf.data.Options()
options.experimental_distribute.auto_shard_policy = tf.data.experimental.AutoShardPolicy.OFF
tf_train_dataset = (
tf.data.Dataset.from_generator(train_generator, output_signature=train_signature)
.with_options(options)
.batch(batch_size=num_replicas * training_args.per_device_train_batch_size, drop_remainder=True)
.repeat(int(training_args.num_train_epochs))
)
eval_generator = partial(sample_generator, eval_dataset, tokenizer)
eval_signature = {
feature: tf.TensorSpec(shape=(None,), dtype=tf.int64)
for feature in eval_dataset.features
if feature != "special_tokens_mask"
}
eval_signature["labels"] = eval_signature["input_ids"]
eval_signature = (eval_signature, eval_signature["labels"])
tf_eval_dataset = (
tf.data.Dataset.from_generator(eval_generator, output_signature=eval_signature)
.with_options(options)
.batch(batch_size=num_replicas * training_args.per_device_eval_batch_size, drop_remainder=True)
)
# endregion
# region Optimizer and loss
batches_per_epoch = len(train_dataset) // (num_replicas * training_args.per_device_train_batch_size)
# Bias and layernorm weights are automatically excluded from the decay
optimizer, lr_schedule = create_optimizer(
init_lr=training_args.learning_rate,
num_train_steps=int(training_args.num_train_epochs * batches_per_epoch),
num_warmup_steps=training_args.warmup_steps,
adam_beta1=training_args.adam_beta1,
adam_beta2=training_args.adam_beta2,
adam_epsilon=training_args.adam_epsilon,
weight_decay_rate=training_args.weight_decay,
)
def dummy_loss(y_true, y_pred):
return tf.reduce_mean(y_pred)
model.compile(optimizer=optimizer, loss={"loss": dummy_loss})
# endregion
# region Training and validation
logger.info("***** Running training *****")
logger.info(f" Num examples = {len(train_dataset)}")
logger.info(f" Num Epochs = {training_args.num_train_epochs}")
logger.info(f" Instantaneous batch size per device = {training_args.per_device_train_batch_size}")
logger.info(f" Total train batch size = {training_args.per_device_train_batch_size * num_replicas}")
history = model.fit(
tf_train_dataset,
validation_data=tf_eval_dataset,
epochs=int(training_args.num_train_epochs),
steps_per_epoch=len(train_dataset) // (training_args.per_device_train_batch_size * num_replicas),
callbacks=[SavePretrainedCallback(output_dir=training_args.output_dir)],
)
try:
train_perplexity = math.exp(history.history["loss"][-1])
except OverflowError:
train_perplexity = math.inf
try:
validation_perplexity = math.exp(history.history["val_loss"][-1])
except OverflowError:
validation_perplexity = math.inf
logger.warning(f" Final train loss: {history.history['loss'][-1]:.3f}")
logger.warning(f" Final train perplexity: {train_perplexity:.3f}")
logger.warning(f" Final validation loss: {history.history['val_loss'][-1]:.3f}")
logger.warning(f" Final validation perplexity: {validation_perplexity:.3f}")
# endregion
if training_args.output_dir is not None:
model.save_pretrained(training_args.output_dir)
if training_args.push_to_hub:
# You'll probably want to append some of your own metadata here!
model.push_to_hub()
|
def main():
# region Argument Parsing
parser = HfArgumentParser((ModelArguments, DataTrainingArguments, TFTrainingArguments))
if len(sys.argv) == 2 and sys.argv[1].endswith(".json"):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
model_args, data_args, training_args = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1]))
else:
model_args, data_args, training_args = parser.parse_args_into_dataclasses()
# Sanity checks
if data_args.dataset_name is None and data_args.train_file is None and data_args.validation_file is None:
raise ValueError("Need either a dataset name or a training/validation file.")
else:
if data_args.train_file is not None:
extension = data_args.train_file.split(".")[-1]
assert extension in ["csv", "json", "txt"], "`train_file` should be a csv, json or txt file."
if data_args.validation_file is not None:
extension = data_args.validation_file.split(".")[-1]
assert extension in ["csv", "json", "txt"], "`validation_file` should be a csv, json or txt file."
if training_args.output_dir is not None:
training_args.output_dir = Path(training_args.output_dir)
os.makedirs(training_args.output_dir, exist_ok=True)
if isinstance(training_args.strategy, tf.distribute.TPUStrategy) and not data_args.pad_to_max_length:
logger.warning("We are training on TPU - forcing pad_to_max_length")
data_args.pad_to_max_length = True
# endregion
# region Checkpoints
# Detecting last checkpoint.
checkpoint = None
if len(os.listdir(training_args.output_dir)) > 0 and not training_args.overwrite_output_dir:
config_path = training_args.output_dir / CONFIG_NAME
weights_path = training_args.output_dir / TF2_WEIGHTS_NAME
if config_path.is_file() and weights_path.is_file():
checkpoint = training_args.output_dir
logger.warning(
f"Checkpoint detected, resuming training from checkpoint in {training_args.output_dir}. To avoid this"
" behavior, change the `--output_dir` or add `--overwrite_output_dir` to train from scratch."
)
else:
raise ValueError(
f"Output directory ({training_args.output_dir}) already exists and is not empty. "
"Use --overwrite_output_dir to continue regardless."
)
# endregion
# region Setup logging
# accelerator.is_local_main_process is only True for one process per machine.
logger.setLevel(logging.INFO)
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_info()
# endregion
# If passed along, set the training seed now.
if training_args.seed is not None:
set_seed(training_args.seed)
# region Load datasets
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
#
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
#
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
raw_datasets = load_dataset(data_args.dataset_name, data_args.dataset_config_name)
if "validation" not in raw_datasets.keys():
raw_datasets["validation"] = load_dataset(
data_args.dataset_name,
data_args.dataset_config_name,
split=f"train[:{data_args.validation_split_percentage}%]",
)
raw_datasets["train"] = load_dataset(
data_args.dataset_name,
data_args.dataset_config_name,
split=f"train[{data_args.validation_split_percentage}%:]",
)
else:
data_files = {}
if data_args.train_file is not None:
data_files["train"] = data_args.train_file
if data_args.validation_file is not None:
data_files["validation"] = data_args.validation_file
extension = data_args.train_file.split(".")[-1]
if extension == "txt":
extension = "text"
raw_datasets = load_dataset(extension, data_files=data_files)
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# endregion
# region Load pretrained model and tokenizer
#
# In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
if checkpoint is not None:
config = AutoConfig.from_pretrained(checkpoint)
elif model_args.config_name:
config = AutoConfig.from_pretrained(model_args.config_name)
elif model_args.model_name_or_path:
config = AutoConfig.from_pretrained(model_args.model_name_or_path)
else:
config = CONFIG_MAPPING[model_args.model_type]()
logger.warning("You are instantiating a new config instance from scratch.")
if model_args.tokenizer_name:
tokenizer = AutoTokenizer.from_pretrained(model_args.tokenizer_name)
elif model_args.model_name_or_path:
tokenizer = AutoTokenizer.from_pretrained(model_args.model_name_or_path)
else:
raise ValueError(
"You are instantiating a new tokenizer from scratch. This is not supported by this script."
"You can do it from another script, save it, and load it from here, using --tokenizer_name."
)
# endregion
# region Dataset preprocessing
# First we tokenize all the texts.
column_names = raw_datasets["train"].column_names
text_column_name = "text" if "text" in column_names else column_names[0]
if data_args.max_seq_length is None:
max_seq_length = tokenizer.model_max_length
if max_seq_length > 1024:
logger.warning(
f"The tokenizer picked seems to have a very large `model_max_length` ({tokenizer.model_max_length}). "
"Picking 1024 instead. You can reduce that default value by passing --max_seq_length xxx."
)
max_seq_length = 1024
else:
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
f"The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the"
f"model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}."
)
max_seq_length = min(data_args.max_seq_length, tokenizer.model_max_length)
if data_args.line_by_line:
# When using line_by_line, we just tokenize each nonempty line.
padding = "max_length" if data_args.pad_to_max_length else False
def tokenize_function(examples):
# Remove empty lines
examples[text_column_name] = [
line for line in examples[text_column_name] if len(line) > 0 and not line.isspace()
]
return tokenizer(
examples[text_column_name],
padding=padding,
truncation=True,
max_length=max_seq_length,
# We use this option because DataCollatorForLanguageModeling (see below) is more efficient when it
# receives the `special_tokens_mask`.
return_special_tokens_mask=True,
)
tokenized_datasets = raw_datasets.map(
tokenize_function,
batched=True,
num_proc=data_args.preprocessing_num_workers,
remove_columns=[text_column_name],
load_from_cache_file=not data_args.overwrite_cache,
desc="Running tokenizer on dataset line_by_line",
)
else:
# Otherwise, we tokenize every text, then concatenate them together before splitting them in smaller parts.
# We use `return_special_tokens_mask=True` because DataCollatorForLanguageModeling (see below) is more
# efficient when it receives the `special_tokens_mask`.
def tokenize_function(examples):
return tokenizer(examples[text_column_name], return_special_tokens_mask=True)
tokenized_datasets = raw_datasets.map(
tokenize_function,
batched=True,
num_proc=data_args.preprocessing_num_workers,
remove_columns=column_names,
load_from_cache_file=not data_args.overwrite_cache,
desc="Running tokenizer on every text in dataset",
)
# Main data processing function that will concatenate all texts from our dataset and generate chunks of
# max_seq_length.
def group_texts(examples):
# Concatenate all texts.
concatenated_examples = {k: sum(examples[k], []) for k in examples.keys()}
total_length = len(concatenated_examples[list(examples.keys())[0]])
# We drop the small remainder, we could add padding if the model supported it instead of this drop, you can
# customize this part to your needs.
total_length = (total_length // max_seq_length) * max_seq_length
# Split by chunks of max_len.
result = {
k: [t[i : i + max_seq_length] for i in range(0, total_length, max_seq_length)]
for k, t in concatenated_examples.items()
}
return result
# Note that with `batched=True`, this map processes 1,000 texts together, so group_texts throws away a
# remainder for each of those groups of 1,000 texts. You can adjust that batch_size here but a higher value
# might be slower to preprocess.
#
# To speed up this part, we use multiprocessing. See the documentation of the map method for more information:
# https://huggingface.co/docs/datasets/package_reference/main_classes.html#datasets.Dataset.map
tokenized_datasets = tokenized_datasets.map(
group_texts,
batched=True,
num_proc=data_args.preprocessing_num_workers,
load_from_cache_file=not data_args.overwrite_cache,
desc=f"Grouping texts in chunks of {max_seq_length}",
)
train_dataset = tokenized_datasets["train"]
if data_args.max_train_samples is not None:
train_dataset = train_dataset.select(range(data_args.max_train_samples))
if(data_args.validation_file is not None):
eval_dataset = tokenized_datasets["validation"]
else:
logger.info(f'Validation file not found: using {data_args.validation_split_percentage}% of the dataset as validation as provided in data_args')
train_indices,val_indices=train_test_split(list(range(len(train_dataset))),test_size=data_args.validation_split_percentage)
eval_dataset = train_dataset.select(val_indices)
train_dataset = train_dataset.select(train_indices)
if data_args.max_eval_samples is not None:
eval_dataset = eval_dataset.select(range(data_args.max_eval_samples))
# Log a few random samples from the training set:
for index in random.sample(range(len(train_dataset)), 3):
logger.info(f"Sample {index} of the training set: {train_dataset[index]}.")
# endregion
with training_args.strategy.scope():
# region Prepare model
if checkpoint is not None:
model = TFAutoModelForMaskedLM.from_pretrained(checkpoint, config=config)
elif model_args.model_name_or_path:
model = TFAutoModelForMaskedLM.from_pretrained(model_args.model_name_or_path, config=config)
else:
logger.info("Training new model from scratch")
model = TFAutoModelForMaskedLM.from_config(config)
model.resize_token_embeddings(len(tokenizer))
# endregion
# region TF Dataset preparation
num_replicas = training_args.strategy.num_replicas_in_sync
train_generator = partial(sample_generator, train_dataset, tokenizer)
train_signature = {
feature: tf.TensorSpec(shape=(None,), dtype=tf.int64)
for feature in train_dataset.features
if feature != "special_tokens_mask"
}
train_signature["labels"] = train_signature["input_ids"]
train_signature = (train_signature, train_signature["labels"])
options = tf.data.Options()
options.experimental_distribute.auto_shard_policy = tf.data.experimental.AutoShardPolicy.OFF
tf_train_dataset = (
tf.data.Dataset.from_generator(train_generator, output_signature=train_signature)
.with_options(options)
.batch(batch_size=num_replicas * training_args.per_device_train_batch_size, drop_remainder=True)
.repeat(int(training_args.num_train_epochs))
)
eval_generator = partial(sample_generator, eval_dataset, tokenizer)
eval_signature = {
feature: tf.TensorSpec(shape=(None,), dtype=tf.int64)
for feature in eval_dataset.features
if feature != "special_tokens_mask"
}
eval_signature["labels"] = eval_signature["input_ids"]
eval_signature = (eval_signature, eval_signature["labels"])
tf_eval_dataset = (
tf.data.Dataset.from_generator(eval_generator, output_signature=eval_signature)
.with_options(options)
.batch(batch_size=num_replicas * training_args.per_device_eval_batch_size, drop_remainder=True)
)
# endregion
# region Optimizer and loss
batches_per_epoch = len(train_dataset) // (num_replicas * training_args.per_device_train_batch_size)
# Bias and layernorm weights are automatically excluded from the decay
optimizer, lr_schedule = create_optimizer(
init_lr=training_args.learning_rate,
num_train_steps=int(training_args.num_train_epochs * batches_per_epoch),
num_warmup_steps=training_args.warmup_steps,
adam_beta1=training_args.adam_beta1,
adam_beta2=training_args.adam_beta2,
adam_epsilon=training_args.adam_epsilon,
weight_decay_rate=training_args.weight_decay,
)
def dummy_loss(y_true, y_pred):
return tf.reduce_mean(y_pred)
model.compile(optimizer=optimizer, loss={"loss": dummy_loss})
# endregion
# region Training and validation
logger.info("***** Running training *****")
logger.info(f" Num examples = {len(train_dataset)}")
logger.info(f" Num Epochs = {training_args.num_train_epochs}")
logger.info(f" Instantaneous batch size per device = {training_args.per_device_train_batch_size}")
logger.info(f" Total train batch size = {training_args.per_device_train_batch_size * num_replicas}")
history = model.fit(
tf_train_dataset,
validation_data=tf_eval_dataset,
epochs=int(training_args.num_train_epochs),
steps_per_epoch=len(train_dataset) // (training_args.per_device_train_batch_size * num_replicas),
callbacks=[SavePretrainedCallback(output_dir=training_args.output_dir)],
)
try:
train_perplexity = math.exp(history.history["loss"][-1])
except OverflowError:
train_perplexity = math.inf
try:
validation_perplexity = math.exp(history.history["val_loss"][-1])
except OverflowError:
validation_perplexity = math.inf
logger.warning(f" Final train loss: {history.history['loss'][-1]:.3f}")
logger.warning(f" Final train perplexity: {train_perplexity:.3f}")
logger.warning(f" Final validation loss: {history.history['val_loss'][-1]:.3f}")
logger.warning(f" Final validation perplexity: {validation_perplexity:.3f}")
# endregion
if training_args.output_dir is not None:
model.save_pretrained(training_args.output_dir)
if training_args.push_to_hub:
# You'll probably want to append some of your own metadata here!
model.push_to_hub()
|
26,782 |
def run_migrations_online():
"""Run migrations in 'online' mode.
In this scenario we need to create an Engine
and associate a connection with the context.
"""
connectable = settings.engine
with connectable.connect() as connection:
context.configure(
connection=connection,
transaction_per_migration=True,
target_metadata=target_metadata,
compare_type=COMPARE_TYPE,
include_object=include_object,
render_as_batch=True
)
with context.begin_transaction():
if connection.dialect.name == 'mysql' and \
connection.dialect.server_version_info[0] == 5 and \
connection.dialect.server_version_info[1] == 7:
connection.execute(
"select GET_LOCK('alembic',1800);"
)
context.run_migrations()
if connection.dialect.name == 'mysql' and \
connection.dialect.server_version_info[0] == 5 and \
connection.dialect.server_version_info[1] == 7:
connection.execute(
"select RELEASE_LOCK('alembic');"
)
|
def run_migrations_online():
"""Run migrations in 'online' mode.
In this scenario we need to create an Engine
and associate a connection with the context.
"""
connectable = settings.engine
with connectable.connect() as connection:
context.configure(
connection=connection,
transaction_per_migration=True,
target_metadata=target_metadata,
compare_type=COMPARE_TYPE,
include_object=include_object,
render_as_batch=True
)
with context.begin_transaction():
if connection.dialect.name == 'mysql' and \
connection.dialect.server_version_info[0] == 5 and \
connection.dialect.server_version_info[1] == 7:
connection.execute(
"select GET_LOCK('alembic',1800);"
)
context.run_migrations()
if connection.dialect.name == 'mysql' and \
connection.dialect.server_version_info >= (5, 6):
connection.execute(
"select RELEASE_LOCK('alembic');"
)
|
32,269 |
def get_token_soap_request(user, password, instance, domain=None):
if domain is not None:
return_xml = '<?xml version="1.0" encoding="utf-8"?>' + \
'<soap:Envelope xmlns:xsi="http://www.w3.orecord_to_incidentrg/2001/XMLSchema-instance" ' \
' xmlns:xsd="http://www.w3.org/2001/XMLSchema" xmlns:soap="http://schemas.xmlsoap.org/soap/envelope/">' + \
' <soap:Body>' + \
' <CreateDomainUserSessionFromInstance xmlns="http://archer-tech.com/webservices/">' + \
f' <userName>{user}</userName>' + \
f' <instanceName>{instance}</instanceName>' + \
f' <password>{password}</password>' + \
f' <usersDomain>{domain}</usersDomain>' + \
' </CreateDomainUserSessionFromInstance>' + \
' </soap:Body>' + \
'</soap:Envelope>'
else:
return_xml = '<?xml version="1.0" encoding="utf-8"?>' + \
'<soap:Envelope xmlns:xsi="http://www.w3.orecord_to_incidentrg/2001/XMLSchema-instance" ' \
' xmlns:xsd="http://www.w3.org/2001/XMLSchema" xmlns:soap="http://schemas.xmlsoap.org/soap/envelope/">' + \
' <soap:Body>' + \
' <CreateUserSessionFromInstance xmlns="http://archer-tech.com/webservices/">' + \
f' <userName>{user}</userName>' + \
f' <instanceName>{instance}</instanceName>' + \
f' <password>{password}</password>' + \
' </CreateUserSessionFromInstance>' + \
' </soap:Body>' + \
'</soap:Envelope>'
return return_xml
|
def get_token_soap_request(user, password, instance, domain=None):
if domain:
return_xml = '<?xml version="1.0" encoding="utf-8"?>' + \
'<soap:Envelope xmlns:xsi="http://www.w3.orecord_to_incidentrg/2001/XMLSchema-instance" ' \
' xmlns:xsd="http://www.w3.org/2001/XMLSchema" xmlns:soap="http://schemas.xmlsoap.org/soap/envelope/">' + \
' <soap:Body>' + \
' <CreateDomainUserSessionFromInstance xmlns="http://archer-tech.com/webservices/">' + \
f' <userName>{user}</userName>' + \
f' <instanceName>{instance}</instanceName>' + \
f' <password>{password}</password>' + \
f' <usersDomain>{domain}</usersDomain>' + \
' </CreateDomainUserSessionFromInstance>' + \
' </soap:Body>' + \
'</soap:Envelope>'
else:
return_xml = '<?xml version="1.0" encoding="utf-8"?>' + \
'<soap:Envelope xmlns:xsi="http://www.w3.orecord_to_incidentrg/2001/XMLSchema-instance" ' \
' xmlns:xsd="http://www.w3.org/2001/XMLSchema" xmlns:soap="http://schemas.xmlsoap.org/soap/envelope/">' + \
' <soap:Body>' + \
' <CreateUserSessionFromInstance xmlns="http://archer-tech.com/webservices/">' + \
f' <userName>{user}</userName>' + \
f' <instanceName>{instance}</instanceName>' + \
f' <password>{password}</password>' + \
' </CreateUserSessionFromInstance>' + \
' </soap:Body>' + \
'</soap:Envelope>'
return return_xml
|
57,103 |
def set_multi(
namespace: str, sub_namespace: str | None, id_value_mapping: Dict[str, Any]
) -> bool:
"""Set multiple id values at once to the cache, where the values are all
of a specific namespace type or a Redis compatible type (more details here:
https://redis.io/topics/data-types).
Args:
namespace: str. The namespace under which the values associated with the
id lie. Use CACHE_NAMESPACE_DEFAULT as the namespace for objects
that are not associated with a conceptual domain-layer entity and
therefore don't require serialization.
sub_namespace: str|None. The sub-namespace further differentiates the
values. For Explorations, Skills, Stories, Topics, and Collections,
the sub-namespace is either None or the stringified version number
of the objects. If the sub-namespace is not required, pass in None.
id_value_mapping:
dict(str, Exploration|Skill|Story|Topic|Collection|str). A dict of
{id, value} pairs to set to the cache.
Raises:
ValueError. The namespace does not exist or is not recognized.
Returns:
bool. Whether all operations complete successfully.
"""
if len(id_value_mapping) == 0:
return True
if namespace not in SERIALIZATION_FUNCTIONS:
raise ValueError('Invalid namespace: %s.' % namespace)
memory_cache_id_value_mapping = {
_get_memcache_key(namespace, sub_namespace, obj_id):
SERIALIZATION_FUNCTIONS[namespace](value) # type: ignore[operator]
for obj_id, value in id_value_mapping.items()
}
return bool(memory_cache_services.set_multi(memory_cache_id_value_mapping)) # type: ignore[attr-defined]
|
def set_multi(
namespace: str, sub_namespace: str | None, id_value_mapping: Dict[str, Any]
) -> bool:
"""Set multiple id values at once to the cache, where the values are all
of a specific namespace type or a Redis compatible type (more details here:
https://redis.io/topics/data-types).
Args:
namespace: str. The namespace under which the values associated with the
id lie. Use CACHE_NAMESPACE_DEFAULT as the namespace for objects
that are not associated with a conceptual domain-layer entity and
therefore don't require serialization.
sub_namespace: str|None. The sub-namespace further differentiates the
values. For Explorations, Skills, Stories, Topics, and Collections,
the sub-namespace is either None or the stringified version number
of the objects. If the sub-namespace is not required, pass in None.
id_value_mapping:
dict(str, Exploration|Skill|Story|Topic|Collection|str). A dict of
{id, value} pairs to set to the cache.
Raises:
ValueError. The namespace does not exist or is not recognized.
Returns:
bool. Whether all operations complete successfully.
"""
if len(id_value_mapping) == 0:
return True
if namespace not in SERIALIZATION_FUNCTIONS:
raise ValueError('Invalid namespace: %s.' % namespace)
memory_cache_id_value_mapping = {
_get_memcache_key(namespace, sub_namespace, obj_id):
SERIALIZATION_FUNCTIONS[namespace](value) # type: ignore[operator]
for obj_id, value in id_value_mapping.items()
}
return bool(memory_cache_services.set_multi(memory_cache_id_value_mapping)) # type: ignore[attr-defined]
|
36,613 |
def _maybe_compile(compiler, source, filename, symbol):
# Check for source consisting of only blank lines and comments.
for line in source.split("\n"):
line = line.strip()
if line and line[0] != '#':
break # Leave it alone.
else:
if symbol != "eval":
source = "pass" # Replace it with a 'pass' statement
try:
return compiler(source, filename, symbol)
except SyntaxError: # Let other compile() errors propagate.
pass
# Catch syntax warnings after the first compile
# to emit warnings (SyntaxWarning, DeprecationWarning) at most once.
with warnings.catch_warnings():
warnings.simplefilter("error")
code1 = err1 = err2 = None
try:
code1 = compiler(source + "\n", filename, symbol)
except SyntaxError as e:
err1 = e
try:
code2 = compiler(source + "\n\n", filename, symbol)
except SyntaxError as e:
err2 = e
try:
if not code1 and _is_syntax_error(err1, err2):
raise err1
else:
return None
finally:
err1 = err2 = None
|
def _maybe_compile(compiler, source, filename, symbol):
# Check for source consisting of only blank lines and comments.
for line in source.split("\n"):
line = line.strip()
if line and line[0] != '#':
break # Leave it alone.
else:
if symbol != "eval":
source = "pass" # Replace it with a 'pass' statement
try:
return compiler(source, filename, symbol)
except SyntaxError: # Let other compile() errors propagate.
pass
# Catch syntax warnings after the first compile
# to emit warnings (SyntaxWarning, DeprecationWarning) at most once.
with warnings.catch_warnings():
warnings.simplefilter("error")
code1 = err1 = err2 = None
try:
code1 = compiler(source + "\n", filename, symbol)
except SyntaxError as e:
err1 = e
try:
code2 = compiler(source + "\n\n", filename, symbol)
except SyntaxError as e:
err2 = e
try:
if not code1 and _is_syntax_error(err1, err2):
raise err1
return None
finally:
err1 = err2 = None
|
49,951 |
def test_multiples_operation():
with Image.open("Tests/images/gbr.gbr") as im:
rect = (0, 0, 10, 10)
im.crop(rect)
im.crop(rect)
|
def test_multiples_operation():
with Image.open("Tests/images/gbr.gbr") as im:
im.load()
im.load()
|
40,078 |
def kitti_poses_and_timestamps_to_trajectory(poses_file, timestamp_file):
pose_path = file_interface.read_kitti_poses_file(poses_file)
raw_timestamps_mat = file_interface.csv_read_matrix(timestamp_file)
error_msg = ("timestamp file must have same row with KITTI poses file")
if len(raw_timestamps_mat) > 0 and len(raw_timestamps_mat[0]) != 1 and len(timestamps) != pose_path.num_poses:
raise file_interface.FileInterfaceException(error_msg)
try:
timestamps_mat = np.array(raw_timestamps_mat).astype(float)
except ValueError:
raise file_interface.FileInterfaceException(error_msg)
return PoseTrajectory3D(poses_se3=pose_path.poses_se3, timestamps=timestamps_mat)
|
def kitti_poses_and_timestamps_to_trajectory(poses_file, timestamp_file):
pose_path = file_interface.read_kitti_poses_file(poses_file)
raw_timestamps_mat = file_interface.csv_read_matrix(timestamp_file)
error_msg = ("timestamp file must have one column of timestamps and same number of rows as the KITTI poses file")
if len(raw_timestamps_mat) > 0 and len(raw_timestamps_mat[0]) != 1 and len(timestamps) != pose_path.num_poses:
raise file_interface.FileInterfaceException(error_msg)
try:
timestamps_mat = np.array(raw_timestamps_mat).astype(float)
except ValueError:
raise file_interface.FileInterfaceException(error_msg)
return PoseTrajectory3D(poses_se3=pose_path.poses_se3, timestamps=timestamps_mat)
|
43,925 |
def expansion(la, lb, ra, rb, alpha, beta, t):
r"""Compute Hermite Gaussian expansion coefficients recursively for two Gaussian functions.
An overlap distribution, which defines the product of two Gaussians, can be written as a Hermite
expansion as [`Helgaker (1995) p798 <https://www.worldscientific.com/doi/abs/10.1142/9789812832115_0001>`_]
.. math::
\Omega_{ij} = \sum_{t=0}^{i+j} E_t^{ij} \Lambda_t,
where :math:`\Lambda` is a Hermite polynomial of degree t, :math:`E` denotes the expansion
coefficients, :math:`\Omega_{ij} = G_i G_j` and :math:`G` is a Gaussian function. The overalp
integral between two Gaussian functions can be simply computed by integrating over the overlap
distribution which requires obtaining the expansion coefficients. This can be done recursively
as [`Helgaker (1995) p799 <https://www.worldscientific.com/doi/abs/10.1142/9789812832115_0001>`_]
.. math::
E_t^{i+1,j} = \frac{1}{2p} E_{t-1}^{ij} - \frac{qr}{\alpha} E_{t}^{ij} + (t+1) E_{t+1}^{ij},
and
.. math::
E_t^{i,j+1} = \frac{1}{2p} E_{t-1}^{ij} + \frac{qr}{\beta} E_{t}^{ij} + (t+1) E_{t+1}^{ij},
where :math:`p = \alpha + \beta` and :math:`q = \alpha \beta / (\alpha + \beta)` are computed
from the Gaussian exponents :math:`\alpha, \beta` and the position :math:`r` is computed as
:math:`r = r_\alpha - r_\beta`. The starting coefficient is
.. math::
E_0^{00} = e^{-qr^2},
and :math:`E_t^{ij} = 0` is :math:`t < 0` or :math:`t > (i+j)`.
Args:
la (integer): angular momentum component for the first Gaussian function
lb (integer): angular momentum component for the second Gaussian function
ra (float): position component of the the first Gaussian function
rb (float): position component of the the second Gaussian function
alpha (array[float]): exponent of the first Gaussian function
beta (array[float]): exponent of the second Gaussian function
t(integer): number of nodes in the Hermite Gaussian
Returns:
array[float]: expansion coefficients for each Gaussian combination
**Example**
>>> la, lb = 0, 0
>>> ra, rb = 0.0, 0.0
>>> alpha = np.array([3.42525091])
>>> beta = np.array([3.42525091])
>>> t = 0
>>> c = expansion(la, lb, ra, rb, alpha, beta, t)
>>> c
array([1.])
"""
p = alpha + beta
q = alpha * beta / p
r = ra - rb
if la == lb == t == 0:
return anp.exp(-q * r ** 2)
elif t < 0 or t > (la + lb):
return 0.0
elif lb == 0:
return (
(1 / (2 * p)) * expansion(la - 1, lb, ra, rb, alpha, beta, t - 1)
- (q * r / alpha) * expansion(la - 1, lb, ra, rb, alpha, beta, t)
+ (t + 1) * expansion(la - 1, lb, ra, rb, alpha, beta, t + 1)
)
else:
return (
(1 / (2 * p)) * expansion(la, lb - 1, ra, rb, alpha, beta, t - 1)
+ (q * r / beta) * expansion(la, lb - 1, ra, rb, alpha, beta, t)
+ (t + 1) * expansion(la, lb - 1, ra, rb, alpha, beta, t + 1)
)
|
def expansion(la, lb, ra, rb, alpha, beta, t):
r"""Compute Hermite Gaussian expansion coefficients recursively for two Gaussian functions.
An overlap distribution, which defines the product of two Gaussians, can be written as a Hermite
expansion as [`Helgaker (1995) p798 <https://www.worldscientific.com/doi/abs/10.1142/9789812832115_0001>`_]
.. math::
\Omega_{ij} = \sum_{t=0}^{i+j} E_t^{ij} \Lambda_t,
where :math:`\Lambda` is a Hermite polynomial of degree :math:`t`, :math:`E` denotes the expansion
coefficients, :math:`\Omega_{ij} = G_i G_j` and :math:`G` is a Gaussian function. The overalp
integral between two Gaussian functions can be simply computed by integrating over the overlap
distribution which requires obtaining the expansion coefficients. This can be done recursively
as [`Helgaker (1995) p799 <https://www.worldscientific.com/doi/abs/10.1142/9789812832115_0001>`_]
.. math::
E_t^{i+1,j} = \frac{1}{2p} E_{t-1}^{ij} - \frac{qr}{\alpha} E_{t}^{ij} + (t+1) E_{t+1}^{ij},
and
.. math::
E_t^{i,j+1} = \frac{1}{2p} E_{t-1}^{ij} + \frac{qr}{\beta} E_{t}^{ij} + (t+1) E_{t+1}^{ij},
where :math:`p = \alpha + \beta` and :math:`q = \alpha \beta / (\alpha + \beta)` are computed
from the Gaussian exponents :math:`\alpha, \beta` and the position :math:`r` is computed as
:math:`r = r_\alpha - r_\beta`. The starting coefficient is
.. math::
E_0^{00} = e^{-qr^2},
and :math:`E_t^{ij} = 0` is :math:`t < 0` or :math:`t > (i+j)`.
Args:
la (integer): angular momentum component for the first Gaussian function
lb (integer): angular momentum component for the second Gaussian function
ra (float): position component of the the first Gaussian function
rb (float): position component of the the second Gaussian function
alpha (array[float]): exponent of the first Gaussian function
beta (array[float]): exponent of the second Gaussian function
t(integer): number of nodes in the Hermite Gaussian
Returns:
array[float]: expansion coefficients for each Gaussian combination
**Example**
>>> la, lb = 0, 0
>>> ra, rb = 0.0, 0.0
>>> alpha = np.array([3.42525091])
>>> beta = np.array([3.42525091])
>>> t = 0
>>> c = expansion(la, lb, ra, rb, alpha, beta, t)
>>> c
array([1.])
"""
p = alpha + beta
q = alpha * beta / p
r = ra - rb
if la == lb == t == 0:
return anp.exp(-q * r ** 2)
elif t < 0 or t > (la + lb):
return 0.0
elif lb == 0:
return (
(1 / (2 * p)) * expansion(la - 1, lb, ra, rb, alpha, beta, t - 1)
- (q * r / alpha) * expansion(la - 1, lb, ra, rb, alpha, beta, t)
+ (t + 1) * expansion(la - 1, lb, ra, rb, alpha, beta, t + 1)
)
else:
return (
(1 / (2 * p)) * expansion(la, lb - 1, ra, rb, alpha, beta, t - 1)
+ (q * r / beta) * expansion(la, lb - 1, ra, rb, alpha, beta, t)
+ (t + 1) * expansion(la, lb - 1, ra, rb, alpha, beta, t + 1)
)
|
23,193 |
def _register_completer(name: str, func: str, pos="start", stack=None):
"""adds a new completer to xonsh
Parameters
----------
name
unique name to use in the listing (run "completer list" to see the
current completers in order)
func
the name of a completer function to use. This should be a function
of the following arguments, and should return a set of valid completions
for the given prefix. If this completer should not be used in a given
context, it should return an empty set or None.
Arguments to FUNC:
* prefix: the string to be matched
* line: a string representing the whole current line, for context
* begidx: the index at which prefix starts in line
* endidx: the index at which prefix ends in line
* ctx: the current Python environment
If the completer expands the prefix in any way, it should return a tuple
of two elements: the first should be the set of completions, and the
second should be the length of the modified prefix (for an example, see
xonsh.completers.path.complete_path).
pos
position into the list of completers at which the new
completer should be added. It can be one of the following values:
* "start" indicates that the completer should be added to the start of
the list of completers (it should be run before all others)
* "end" indicates that the completer should be added to the end of the
list of completers (it should be run after all others)
* ">KEY", where KEY is a pre-existing name, indicates that this should
be added after the completer named KEY
* "<KEY", where KEY is a pre-existing name, indicates that this should
be added before the completer named KEY
(Default value: "start")
"""
err = None
func_name = func
xsh = builtins.__xonsh__ # type: ignore
if name in xsh.completers:
err = "The name %s is already a registered completer function." % name
else:
if func_name in xsh.ctx:
func = xsh.ctx[func_name]
if not callable(func):
err = "%s is not callable" % func_name
else:
for frame_info in stack:
frame = frame_info[0]
if func_name in frame.f_locals:
func = frame.f_locals[func_name]
break
elif func_name in frame.f_globals:
func = frame.f_globals[func_name]
break
else:
err = "No such function: %s" % func_name
if err is None:
_add_one_completer(name, func, pos)
else:
return None, err + "\n", 1
|
def _register_completer(name: str, func: str, pos="start", stack=None):
"""adds a new completer to xonsh
Parameters
----------
name
unique name to use in the listing (run "completer list" to see the
current completers in order)
func
the name of a completer function to use. This should be a function
of the following arguments, and should return a set of valid completions
for the given prefix. If this completer should not be used in a given
context, it should return an empty set or None.
Arguments to FUNC:
* prefix: the string to be matched
* line: a string representing the whole current line, for context
* begidx: the index at which prefix starts in line
* endidx: the index at which prefix ends in line
* ctx: the current Python environment
If the completer expands the prefix in any way, it should return a tuple
of two elements: the first should be the set of completions, and the
second should be the length of the modified prefix (for an example, see
xonsh.completers.path.complete_path).
pos
position into the list of completers at which the new
completer should be added. It can be one of the following values:
* "start" indicates that the completer should be added to the start of
the list of completers (it should be run before all others)
* "end" indicates that the completer should be added to the end of the
list of completers (it should be run after all others)
* ">KEY", where KEY is a pre-existing name, indicates that this should
be added after the completer named KEY
* "<KEY", where KEY is a pre-existing name, indicates that this should
be added before the completer named KEY
(Default value: "start")
"""
err = None
func_name = func
xsh = builtins.__xonsh__ # type: ignore
if name in xsh.completers:
err = f"The name {name} is already a registered completer function."
else:
if func_name in xsh.ctx:
func = xsh.ctx[func_name]
if not callable(func):
err = "%s is not callable" % func_name
else:
for frame_info in stack:
frame = frame_info[0]
if func_name in frame.f_locals:
func = frame.f_locals[func_name]
break
elif func_name in frame.f_globals:
func = frame.f_globals[func_name]
break
else:
err = "No such function: %s" % func_name
if err is None:
_add_one_completer(name, func, pos)
else:
return None, err + "\n", 1
|
4,624 |
def _calculate_tfce(scores_array, masker, E=0.5, H=2, dh=0.1):
"""Calculate threshold-free cluster enhancement values for scores maps.
The TFCE calculation is implemented as described in [1]_.
Parameters
----------
scores_array : :obj:`numpy.ndarray`, shape=(n_regressors, n_descriptors)
Scores (t-statistics) for a set of regressors.
masker
E : :obj:`float`, optional
Extent weight. Default is 0.5.
H : :obj:`float`, optional
Height weight. Default is 2.
dh : :obj:`float`, optional
Step size for TFCE calculation. Default is 0.1.
Returns
-------
tfce_arr : :obj:`numpy.ndarray`, shape=(n_regressors, n_descriptors)
TFCE values.
References
----------
.. [1] Smith, S. M., & Nichols, T. E. (2009).
Threshold-free cluster enhancement: addressing problems of smoothing,
threshold dependence and localisation in cluster inference.
Neuroimage, 44(1), 83-98.
"""
# Define connectivity matrix for cluster labeling
conn = ndimage.generate_binary_structure(3, 1)
scores_4d_img = unmask(scores_array.T, masker.mask_img_)
scores_4d = scores_4d_img.get_fdata()
tfce_4d = np.zeros_like(scores_4d)
for i_regressor in range(scores_4d.shape[3]):
scores_3d = scores_4d[..., i_regressor]
# Get the step right before the maximum z-statistic in the map
max_z = np.floor(np.max(scores_3d) / dh) * dh
for score_thresh in np.arange(dh, max_z + dh, dh):
# Threshold map
thresh_scores_3d = scores_3d.copy()
thresh_scores_3d[thresh_scores_3d < score_thresh] = 0
# Derive clusters
labeled_arr3d, n_clusters = ndimage.measurements.label(
thresh_scores_3d,
conn,
)
# Label each cluster with its extent
cluster_map = np.zeros(scores_3d.shape, int)
for cluster_val in range(1, n_clusters + 1):
bool_map = labeled_arr3d == cluster_val
cluster_map[bool_map] = np.sum(bool_map)
# Calculate each voxel's tfce value based on its cluster extent
# and z-value
tfce_step_values = (cluster_map**E) * (score_thresh**H)
tfce_4d[..., i_regressor] += tfce_step_values
tfce_arr = apply_mask(
nib.Nifti1Image(
tfce_4d,
masker.mask_img_.affine,
masker.mask_img_.header,
),
masker.mask_img_,
)
return tfce_arr.T
|
def _calculate_tfce(scores_array, masker, E=0.5, H=2, dh=0.1):
"""Calculate threshold-free cluster enhancement values for scores maps.
The TFCE calculation is implemented as described in [1]_.
Parameters
----------
scores_array : :obj:`numpy.ndarray`, shape=(n_regressors, n_descriptors)
Scores (t-statistics) for a set of regressors.
masker
E : :obj:`float`, optional
Extent weight. Default is 0.5.
H : :obj:`float`, optional
Height weight. Default is 2.
dh : :obj:`float`, optional
Step size for TFCE calculation. Default is 0.1.
Returns
-------
tfce_arr : :obj:`numpy.ndarray`, shape=(n_regressors, n_descriptors)
TFCE values.
References
----------
.. [1] Smith, S. M., & Nichols, T. E. (2009).
Threshold-free cluster enhancement: addressing problems of smoothing,
threshold dependence and localisation in cluster inference.
Neuroimage, 44(1), 83-98.
"""
# Define connectivity matrix for cluster labeling
conn = ndimage.generate_binary_structure(3, 1)
scores_4d_img = unmask(scores_array.T, masker.mask_img_)
scores_4d = scores_4d_img.get_fdata()
tfce_4d = np.zeros_like(scores_4d)
for i_regressor in range(scores_4d.shape[3]):
scores_3d = scores_4d[..., i_regressor]
# Get the step right before the maximum z-statistic in the map
max_z = np.floor(np.max(scores_3d) / dh) * dh
for score_thresh in np.arange(dh, max_z + dh, dh):
# Threshold map
thresh_scores_3d = scores_3d.copy()
thresh_scores_3d[thresh_scores_3d < score_thresh] = 0
# Derive clusters
labeled_arr3d, n_clusters = ndimage.measurements.label(
thresh_scores_3d,
conn,
)
# Label each cluster with its extent
cluster_map = np.zeros(scores_3d.shape, int)
for cluster_val in range(1, n_clusters + 1):
bool_map = labeled_arr3d == cluster_val
cluster_map[bool_map] = np.sum(bool_map)
# Calculate each voxel's tfce value based on its cluster extent
# and z-value
tfce_step_values = (cluster_map ** E) * (score_thresh ** H)
tfce_4d[..., i_regressor] += tfce_step_values
tfce_arr = apply_mask(
nib.Nifti1Image(
tfce_4d,
masker.mask_img_.affine,
masker.mask_img_.header,
),
masker.mask_img_,
)
return tfce_arr.T
|
49,953 |
def test_textsize(request, tmp_path):
tempname = save_font(request, tmp_path)
font = ImageFont.load(tempname)
for i in range(255):
(dx, dy) = font.getsize(chr(i))
assert dy == 20
assert dx in (0, 10)
for j in range(len(message)):
msg = message[: j + 1]
assert font.getsize(msg) == (len(msg) * 10, 20)
|
def test_textsize(request, tmp_path):
tempname = save_font(request, tmp_path)
font = ImageFont.load(tempname)
for i in range(255):
(dx, dy) = font.getsize(chr(i))
assert dy == 20
assert dx in (0, 10)
for i in range(len(message)):
msg = message[: i + 1]
assert font.getsize(msg) == (len(msg) * 10, 20)
|
36,253 |
def wishbone_marker_trajectory(
adata: AnnData, no_bins: int = 150, smoothing_factor: int = 1,
):
"""\
Plot marker trends along trajectory
Parameters
----------
adata
Annotated data matrix.
no_bins
Number of bins for calculating marker density
smoothing_factor
Parameter controlling the degree of smoothing
Returns
-------
Updates `adata` with the following fields:
`weights_wishbone` : :class:`pandas.DataFrame` (`adata.uns`)
Computed gaussian weights for points at each location
`branch_point_bin` : 'int' (`adata.uns`)
Identifies the bin with the branch point. In case of no branching,
defaults to `no_bins`
`bins_wishbone` : :class:`numpy.ndarray` (`adata.uns`)
Computed bin locations and bin memberships
`branches_wishbone`: :class:`numpy.ndarray` (`adata.uns`)
In case of branching, returns a list of branches [2, 3].
"""
# Compute bin locations and bin memberships
# Sort trajectory
trajectory = adata.obs['trajectory_wishbone'].sort_values()
bins = np.linspace(np.min(trajectory), np.max(trajectory), no_bins)
# Compute gaussian weights for points at each location
# Standard deviation estimated from Silverman's approximation
stdev = np.std(trajectory) * 1.34 * len(trajectory) ** (-1 / 5) * smoothing_factor
weights = np.exp(
-((np.tile(trajectory, [no_bins, 1]).T - bins) ** 2 / (2 * stdev ** 2))
) * (1 / (2 * np.pi * stdev ** 2) ** 0.5)
# Adjust weights if data has branches
if 'branch_wishbone' in adata.obs.keys():
# Branch of the trunk
trunk = adata.obs['branch_wishbone'][trajectory.index[0]]
adata.uns['branches_wishbone'] = np.array([2, 3])
# Counts of branch cells in each bin
branch_counts = pd.DataFrame(np.zeros([len(bins) - 1, 3]), columns=[1, 2, 3])
for j in branch_counts.columns:
branch_counts[j] = pd.Series(
[
pd.Series(
adata.obs['branch_wishbone'][
trajectory.index[
(trajectory > bins[i - 1]) & (trajectory < bins[i])
]
]
== j
).sum()
for i in range(1, len(bins))
]
)
# Frequencies
branch_counts = branch_counts.divide(branch_counts.sum(axis=1), axis=0)
# Identify the bin with the branch point by looking at the weights
weights = pd.DataFrame(weights, index=trajectory.index, columns=range(no_bins))
bp_bin = weights.columns[np.where(branch_counts[trunk] < 0.9)[0][0]] + 0
if bp_bin < 0:
bp_bin = 3
else:
bp_bin = no_bins
# return weight to adata
adata.uns['weights_wishbone'] = weights
adata.uns['branch_point_bin'] = bp_bin
adata.uns['bins_wishbone'] = bins
|
def wishbone_marker_trajectory(
adata: AnnData, no_bins: int = 150, smoothing_factor: int = 1,
):
"""\
Plot marker trends along trajectory
Parameters
----------
adata
Annotated data matrix.
no_bins
Number of bins for calculating marker density
smoothing_factor
Parameter controlling the degree of smoothing
Returns
-------
Updates `adata` with the following fields:
`weights_wishbone` : :class:`pandas.DataFrame` (`adata.uns`)
Computed gaussian weights for points at each location
`branch_point_bin` : :class:`int` (`adata.uns`)
Identifies the bin with the branch point. In case of no branching,
defaults to `no_bins`
`bins_wishbone` : :class:`numpy.ndarray` (`adata.uns`)
Computed bin locations and bin memberships
`branches_wishbone`: :class:`numpy.ndarray` (`adata.uns`)
In case of branching, returns a list of branches [2, 3].
"""
# Compute bin locations and bin memberships
# Sort trajectory
trajectory = adata.obs['trajectory_wishbone'].sort_values()
bins = np.linspace(np.min(trajectory), np.max(trajectory), no_bins)
# Compute gaussian weights for points at each location
# Standard deviation estimated from Silverman's approximation
stdev = np.std(trajectory) * 1.34 * len(trajectory) ** (-1 / 5) * smoothing_factor
weights = np.exp(
-((np.tile(trajectory, [no_bins, 1]).T - bins) ** 2 / (2 * stdev ** 2))
) * (1 / (2 * np.pi * stdev ** 2) ** 0.5)
# Adjust weights if data has branches
if 'branch_wishbone' in adata.obs.keys():
# Branch of the trunk
trunk = adata.obs['branch_wishbone'][trajectory.index[0]]
adata.uns['branches_wishbone'] = np.array([2, 3])
# Counts of branch cells in each bin
branch_counts = pd.DataFrame(np.zeros([len(bins) - 1, 3]), columns=[1, 2, 3])
for j in branch_counts.columns:
branch_counts[j] = pd.Series(
[
pd.Series(
adata.obs['branch_wishbone'][
trajectory.index[
(trajectory > bins[i - 1]) & (trajectory < bins[i])
]
]
== j
).sum()
for i in range(1, len(bins))
]
)
# Frequencies
branch_counts = branch_counts.divide(branch_counts.sum(axis=1), axis=0)
# Identify the bin with the branch point by looking at the weights
weights = pd.DataFrame(weights, index=trajectory.index, columns=range(no_bins))
bp_bin = weights.columns[np.where(branch_counts[trunk] < 0.9)[0][0]] + 0
if bp_bin < 0:
bp_bin = 3
else:
bp_bin = no_bins
# return weight to adata
adata.uns['weights_wishbone'] = weights
adata.uns['branch_point_bin'] = bp_bin
adata.uns['bins_wishbone'] = bins
|
53,806 |
def anat_qc_workflow(name="anatMRIQC"):
"""
One-subject-one-session-one-run pipeline to extract the NR-IQMs from
anatomical images
.. workflow::
import os.path as op
from mriqc.workflows.anatomical import anat_qc_workflow
from mriqc.testing import mock_config
with mock_config():
wf = anat_qc_workflow()
"""
from niworkflows.anat.skullstrip import afni_wf as skullstrip_wf
dataset = config.workflow.inputs.get("T1w", []) + config.workflow.inputs.get(
"T2w", []
)
message = BUILDING_WORKFLOW.format(dataset=", ".join(dataset))
config.loggers.workflow.info(message)
# Initialize workflow
workflow = pe.Workflow(name=name)
# Define workflow, inputs and outputs
# 0. Get data
inputnode = pe.Node(niu.IdentityInterface(fields=["in_file"]), name="inputnode")
inputnode.iterables = [("in_file", dataset)]
outputnode = pe.Node(niu.IdentityInterface(fields=["out_json"]), name="outputnode")
# 1. Reorient anatomical image
to_ras = pe.Node(ConformImage(check_dtype=False), name="conform")
# 2. Skull-stripping (afni)
asw = skullstrip_wf(n4_nthreads=config.nipype.omp_nthreads, unifize=False)
# 3. Head mask
hmsk = headmsk_wf()
# 4. Spatial Normalization, using ANTs
norm = spatial_normalization()
# 5. Air mask (with and without artifacts)
amw = airmsk_wf()
# 6. Brain tissue segmentation
segment = pe.Node(
fsl.FAST(segments=True, out_basename="segment"),
name="segmentation",
mem_gb=5,
)
# 7. Compute IQMs
iqmswf = compute_iqms()
# Reports
repwf = individual_reports()
# Connect all nodes
workflow.connect(
[
(inputnode, to_ras, [("in_file", "in_file")]),
(inputnode, iqmswf, [("in_file", "inputnode.in_file")]),
(inputnode, norm, [(("in_file", _get_mod), "inputnode.modality")]),
(inputnode, segment, [(("in_file", _get_imgtype), "img_type")]),
(to_ras, asw, [("out_file", "inputnode.in_file")]),
(asw, segment, [("outputnode.out_file", "in_files")]),
(asw, hmsk, [("outputnode.bias_corrected", "inputnode.in_file")]),
(segment, hmsk, [("tissue_class_map", "inputnode.in_segm")]),
(
asw,
norm,
[
("outputnode.bias_corrected", "inputnode.moving_image"),
("outputnode.out_mask", "inputnode.moving_mask"),
],
),
(
norm,
amw,
[
(
"outputnode.inverse_composite_transform",
"inputnode.inverse_composite_transform",
)
],
),
(
norm,
iqmswf,
[
(
"outputnode.inverse_composite_transform",
"inputnode.inverse_composite_transform",
)
],
),
(
norm,
repwf,
([("outputnode.out_report", "inputnode.mni_report")]),
),
(to_ras, amw, [("out_file", "inputnode.in_file")]),
(asw, amw, [("outputnode.out_mask", "inputnode.in_mask")]),
(hmsk, amw, [("outputnode.out_file", "inputnode.head_mask")]),
(to_ras, iqmswf, [("out_file", "inputnode.in_ras")]),
(
asw,
iqmswf,
[
("outputnode.bias_corrected", "inputnode.inu_corrected"),
("outputnode.bias_image", "inputnode.in_inu"),
("outputnode.out_mask", "inputnode.brainmask"),
],
),
(
amw,
iqmswf,
[
("outputnode.air_mask", "inputnode.airmask"),
("outputnode.hat_mask", "inputnode.hatmask"),
("outputnode.art_mask", "inputnode.artmask"),
("outputnode.rot_mask", "inputnode.rotmask"),
],
),
(
segment,
iqmswf,
[
("tissue_class_map", "inputnode.segmentation"),
("partial_volume_files", "inputnode.pvms"),
],
),
(hmsk, iqmswf, [("outputnode.out_file", "inputnode.headmask")]),
(to_ras, repwf, [("out_file", "inputnode.in_ras")]),
(
asw,
repwf,
[
("outputnode.bias_corrected", "inputnode.inu_corrected"),
("outputnode.out_mask", "inputnode.brainmask"),
],
),
(hmsk, repwf, [("outputnode.out_file", "inputnode.headmask")]),
(
amw,
repwf,
[
("outputnode.air_mask", "inputnode.airmask"),
("outputnode.art_mask", "inputnode.artmask"),
("outputnode.rot_mask", "inputnode.rotmask"),
],
),
(segment, repwf, [("tissue_class_map", "inputnode.segmentation")]),
(iqmswf, repwf, [("outputnode.noisefit", "inputnode.noisefit")]),
(iqmswf, repwf, [("outputnode.out_file", "inputnode.in_iqms")]),
(iqmswf, outputnode, [("outputnode.out_file", "out_json")]),
]
)
# Upload metrics
if not config.execution.no_sub:
from ..interfaces.webapi import UploadIQMs
upldwf = pe.Node(UploadIQMs(), name="UploadMetrics")
upldwf.inputs.url = config.execution.webapi_url
upldwf.inputs.strict = config.execution.upload_strict
if config.execution.webapi_port:
upldwf.inputs.port = config.execution.webapi_port
workflow.connect(
[
(iqmswf, upldwf, [("outputnode.out_file", "in_iqms")]),
(upldwf, repwf, [("api_id", "inputnode.api_id")]),
]
)
return workflow
|
def anat_qc_workflow(name="anatMRIQC"):
"""
One-subject-one-session-one-run pipeline to extract the NR-IQMs from
anatomical images
.. workflow::
import os.path as op
from mriqc.workflows.anatomical import anat_qc_workflow
from mriqc.testing import mock_config
with mock_config():
wf = anat_qc_workflow()
"""
from niworkflows.anat.skullstrip import afni_wf as skullstrip_wf
dataset = config.workflow.inputs.get("T1w", []) + config.workflow.inputs.get(
"T2w", []
)
message = BUILDING_WORKFLOW.format(dataset=", ".join(dataset))
config.loggers.workflow.info(message)
# Initialize workflow
workflow = pe.Workflow(name=name)
# Define workflow, inputs and outputs
# 0. Get data
inputnode = pe.Node(niu.IdentityInterface(fields=["in_file"]), name="inputnode")
inputnode.iterables = [("in_file", dataset)]
outputnode = pe.Node(niu.IdentityInterface(fields=["out_json"]), name="outputnode")
# 1. Reorient anatomical image
to_ras = pe.Node(ConformImage(check_dtype=False), name="conform")
# 2. Skull-stripping (afni)
asw = skullstrip_wf(n4_nthreads=config.nipype.omp_nthreads, unifize=False)
# 3. Head mask
hmsk = headmsk_wf()
# 4. Spatial Normalization, using ANTs
norm = spatial_normalization()
# 5. Air mask (with and without artifacts)
amw = airmsk_wf()
# 6. Brain tissue segmentation
segment = pe.Node(
fsl.FAST(segments=True, out_basename="segment"),
name="segmentation",
mem_gb=5,
)
# 7. Compute IQMs
iqmswf = compute_iqms()
# Reports
repwf = individual_reports()
# Connect all nodes
# fmt: off
workflow.connect([
(inputnode, to_ras, [('in_file', 'in_file')]),
(inputnode, iqmswf, [('in_file', 'inputnode.in_file')]),
(inputnode, norm, [(('in_file', _get_mod), 'inputnode.modality')]),
(inputnode, segment, [(('in_file', _get_imgtype), 'img_type')]),
(to_ras, asw, [('out_file', 'inputnode.in_file')]),
(asw, segment, [('outputnode.out_file', 'in_files')]),
(asw, hmsk, [('outputnode.bias_corrected', 'inputnode.in_file')]),
(segment, hmsk, [('tissue_class_map', 'inputnode.in_segm')]),
(asw, norm, [('outputnode.bias_corrected', 'inputnode.moving_image'),
('outputnode.out_mask', 'inputnode.moving_mask')]),
(norm, amw, [
('outputnode.inverse_composite_transform', 'inputnode.inverse_composite_transform')]),
(norm, iqmswf, [
('outputnode.inverse_composite_transform', 'inputnode.inverse_composite_transform')]),
(norm, repwf, ([
('outputnode.out_report', 'inputnode.mni_report')])),
(to_ras, amw, [('out_file', 'inputnode.in_file')]),
(asw, amw, [('outputnode.out_mask', 'inputnode.in_mask')]),
(hmsk, amw, [('outputnode.out_file', 'inputnode.head_mask')]),
(to_ras, iqmswf, [('out_file', 'inputnode.in_ras')]),
(asw, iqmswf, [('outputnode.bias_corrected', 'inputnode.inu_corrected'),
('outputnode.bias_image', 'inputnode.in_inu'),
('outputnode.out_mask', 'inputnode.brainmask')]),
(amw, iqmswf, [('outputnode.air_mask', 'inputnode.airmask'),
('outputnode.hat_mask', 'inputnode.hatmask'),
('outputnode.art_mask', 'inputnode.artmask'),
('outputnode.rot_mask', 'inputnode.rotmask')]),
(segment, iqmswf, [('tissue_class_map', 'inputnode.segmentation'),
('partial_volume_files', 'inputnode.pvms')]),
(hmsk, iqmswf, [('outputnode.out_file', 'inputnode.headmask')]),
(to_ras, repwf, [('out_file', 'inputnode.in_ras')]),
(asw, repwf, [('outputnode.bias_corrected', 'inputnode.inu_corrected'),
('outputnode.out_mask', 'inputnode.brainmask')]),
(hmsk, repwf, [('outputnode.out_file', 'inputnode.headmask')]),
(amw, repwf, [('outputnode.air_mask', 'inputnode.airmask'),
('outputnode.art_mask', 'inputnode.artmask'),
('outputnode.rot_mask', 'inputnode.rotmask')]),
(segment, repwf, [('tissue_class_map', 'inputnode.segmentation')]),
(iqmswf, repwf, [('outputnode.noisefit', 'inputnode.noisefit')]),
(iqmswf, repwf, [('outputnode.out_file', 'inputnode.in_iqms')]),
(iqmswf, outputnode, [('outputnode.out_file', 'out_json')])
])
# fmt:on
# Upload metrics
if not config.execution.no_sub:
from ..interfaces.webapi import UploadIQMs
upldwf = pe.Node(UploadIQMs(), name="UploadMetrics")
upldwf.inputs.url = config.execution.webapi_url
upldwf.inputs.strict = config.execution.upload_strict
if config.execution.webapi_port:
upldwf.inputs.port = config.execution.webapi_port
workflow.connect(
[
(iqmswf, upldwf, [("outputnode.out_file", "in_iqms")]),
(upldwf, repwf, [("api_id", "inputnode.api_id")]),
]
)
return workflow
|
6,965 |
def _make(
doctype=None,
name=None,
content=None,
subject=None,
sent_or_received="Sent",
sender=None,
sender_full_name=None,
recipients=None,
communication_medium="Email",
send_email=False,
print_html=None,
print_format=None,
attachments="[]",
send_me_a_copy=False,
cc=None,
bcc=None,
read_receipt=None,
print_letterhead=True,
email_template=None,
communication_type=None,
) -> Dict[str, str]:
"""Internal method to make a new communication that ignores Permission checks.
"""
sender = sender or get_formatted_email(frappe.session.user)
recipients = list_to_str(recipients) if isinstance(recipients, list) else recipients
cc = list_to_str(cc) if isinstance(cc, list) else cc
bcc = list_to_str(bcc) if isinstance(bcc, list) else bcc
comm: "Communication" = frappe.get_doc({
"doctype":"Communication",
"subject": subject,
"content": content,
"sender": sender,
"sender_full_name":sender_full_name,
"recipients": recipients,
"cc": cc or None,
"bcc": bcc or None,
"communication_medium": communication_medium,
"sent_or_received": sent_or_received,
"reference_doctype": doctype,
"reference_name": name,
"email_template": email_template,
"message_id":get_message_id().strip(" <>"),
"read_receipt":read_receipt,
"has_attachment": 1 if attachments else 0,
"communication_type": communication_type,
})
comm.flags.skip_add_signature = True
comm.insert(ignore_permissions=True)
# if not committed, delayed task doesn't find the communication
if attachments:
if isinstance(attachments, str):
attachments = json.loads(attachments)
add_attachments(comm.name, attachments)
if cint(send_email):
if not comm.get_outgoing_email_account():
frappe.throw(
msg=OUTGOING_EMAIL_ACCOUNT_MISSING, exc=frappe.OutgoingEmailError
)
comm.send_email(
print_html=print_html,
print_format=print_format,
send_me_a_copy=send_me_a_copy,
print_letterhead=print_letterhead,
)
emails_not_sent_to = comm.exclude_emails_list(include_sender=send_me_a_copy)
return {"name": comm.name, "emails_not_sent_to": ", ".join(emails_not_sent_to)}
|
def _make(
doctype=None,
name=None,
content=None,
subject=None,
sent_or_received="Sent",
sender=None,
sender_full_name=None,
recipients=None,
communication_medium="Email",
send_email=False,
print_html=None,
print_format=None,
attachments="[]",
send_me_a_copy=False,
cc=None,
bcc=None,
read_receipt=None,
print_letterhead=True,
email_template=None,
communication_type=None,
) -> Dict[str, str]:
"""Internal method to make a new communication that ignores Permission checks.
"""
sender = sender or get_formatted_email(frappe.session.user)
recipients = list_to_str(recipients) if isinstance(recipients, list) else recipients
cc = list_to_str(cc) if isinstance(cc, list) else cc
bcc = list_to_str(bcc) if isinstance(bcc, list) else bcc
comm: "Communication" = frappe.get_doc({
"doctype":"Communication",
"subject": subject,
"content": content,
"sender": sender,
"sender_full_name":sender_full_name,
"recipients": recipients,
"cc": cc or None,
"bcc": bcc or None,
"communication_medium": communication_medium,
"sent_or_received": sent_or_received,
"reference_doctype": doctype,
"reference_name": name,
"email_template": email_template,
"message_id":get_message_id().strip(" <>"),
"read_receipt":read_receipt,
"has_attachment": 1 if attachments else 0,
"communication_type": communication_type,
})
comm.flags.add_signature = True
comm.insert(ignore_permissions=True)
# if not committed, delayed task doesn't find the communication
if attachments:
if isinstance(attachments, str):
attachments = json.loads(attachments)
add_attachments(comm.name, attachments)
if cint(send_email):
if not comm.get_outgoing_email_account():
frappe.throw(
msg=OUTGOING_EMAIL_ACCOUNT_MISSING, exc=frappe.OutgoingEmailError
)
comm.send_email(
print_html=print_html,
print_format=print_format,
send_me_a_copy=send_me_a_copy,
print_letterhead=print_letterhead,
)
emails_not_sent_to = comm.exclude_emails_list(include_sender=send_me_a_copy)
return {"name": comm.name, "emails_not_sent_to": ", ".join(emails_not_sent_to)}
|
13,562 |
def eigs(A, E=None, k=3, which='LM', b=None, l=None, maxiter=1000, tol=1e-13):
"""Approximate a few eigenvalues of an |Operator|.
Computes `k` eigenvalues `w[i]` with corresponding eigenvectors `v[i]` which solve
the eigenvalue problem
.. math::
A v[i] = w[i] v[i]
or the generalized eigenvalue problem
.. math::
A v[i] = w[i] E v[i]
if `E` is not `None`.
The implementation is based on Algorithm 4.2 in [RL95]_.
Parameters
----------
A
The real |Operator| for which the eigenvalues are to be computed.
E
The |Operator| which defines the generalized eigenvalue problem.
k
The number of eigenvalues and eigenvectors which are to be computed.
which
A string specifying which `k` eigenvalues and eigenvectors to compute:
- `'LM'`: select eigenvalues with largest |v[i]|
- `'SM'`: select eigenvalues with smallest |v[i]|
- `'LR'`: select eigenvalues with largest Re(v[i])
- `'SR'`: select eigenvalues with smallest Re(v[i])
- `'LI'`: select eigenvalues with largest Im(v[i])
- `'SI'`: select eigenvalues with smallest Im(v[i])
b
Initial vector for Arnoldi iteration. Default is a random vector.
l
The size of the Arnoldi factorization. Default is `min(n - 1, max(2*k + 1, 20))`.
maxiter
The maximum number of iterations.
tol
The relative error tolerance for the ritz estimates.
Returns
-------
w
A |NumPy array| which contains the computed eigenvalues.
v
A |VectorArray| which contains the computed eigenvectors.
"""
n = A.source.dim
if l is None:
l = np.min((n - 1, np.max((2 * k + 1, 20))))
if E is None:
E = IdentityOperator(A.source)
assert A.source == A.range
assert E.source == A.source
assert E.range == A.source
assert k < n
assert l > k
if b is None:
b = A.source.random()
V, H, f = arnoldi(A, E, k, b)
k0 = k
i = 0
while True:
i = i + 1
V, H, f = extend_arnoldi(A, E, V, H, f, l - k)
ew, ev = spla.eig(H)
# truncate small imaginary parts
ew.imag[np.abs(ew.imag) / np.abs(ew) < 1e-12] = 0
if which == 'LM':
idx = np.argsort(-np.abs(ew))
elif which == 'SM':
idx = np.argsort(np.abs(ew))
elif which == 'LR':
idx = np.argsort(-np.real(ew))
elif which == 'SR':
idx = np.argsort(np.real(ew))
elif which == 'LI':
idx = np.argsort(-np.abs(np.imag(ew)))
elif which == 'SI':
idx = np.argsort(np.abs(np.imag(ew)))
k = k0
ews = ew[idx]
evs = ev[:, idx]
rres = f.l2_norm()[0] * np.abs(evs[l - 1]) / np.abs(ews)
# increase k by one in order to keep complex conjugate pairs together
if ews[k - 1].imag != 0 and ews[k - 1].imag + ews[k].imag < 1e-12:
k = k + 1
if np.all(rres[:k] <= tol) or i >= maxiter:
break
# increase k in order to prevent stagnation
k = np.min((l - 1, k + np.min((np.count_nonzero(rres[:k] <= tol), (l - k) // 2))))
# sort shifts for QR iteration based on their residual
shifts = ews[k:l]
srres = rres[k:l]
idx = np.argsort(-srres)
srres = srres[idx]
shifts = shifts[idx]
# don't use converged unwanted ritzvalues as shifts
shifts = np.delete(shifts, np.where(srres == 0))
k = k + np.count_nonzero(srres == 0)
if shifts[0].imag != 0 and shifts[0].imag + ews[1].imag >= 1e-12:
shifts = shifts[1:]
k = k + 1
H, Qs = QR_iteration(H, shifts)
V = V.lincomb(Qs.T)
f = V[k] * H[k, k - 1] + f * Qs[l - 1, k - 1]
V = V[:k]
H = H[:k, :k]
return ews[:k0], V.lincomb(evs[:, :k0].T)
|
def eigs(A, E=None, k=3, which='LM', b=None, l=None, maxiter=1000, tol=1e-13):
"""Approximate a few eigenvalues of an |Operator|.
Computes `k` eigenvalues `w[i]` with corresponding eigenvectors `v[i]` which solve
the eigenvalue problem
.. math::
A v[i] = w[i] v[i]
or the generalized eigenvalue problem
.. math::
A v[i] = w[i] E v[i]
if `E` is not `None`.
The implementation is based on Algorithm 4.2 in [RL95]_.
Parameters
----------
A
The real |Operator| for which the eigenvalues are to be computed.
E
The |Operator| which defines the generalized eigenvalue problem.
k
The number of eigenvalues and eigenvectors which are to be computed.
which
A string specifying which `k` eigenvalues and eigenvectors to compute:
- `'LM'`: select eigenvalues with largest |v[i]|
- `'SM'`: select eigenvalues with smallest |v[i]|
- `'LR'`: select eigenvalues with largest Re(v[i])
- `'SR'`: select eigenvalues with smallest Re(v[i])
- `'LI'`: select eigenvalues with largest Im(v[i])
- `'SI'`: select eigenvalues with smallest Im(v[i])
b
Initial vector for Arnoldi iteration. Default is a random vector.
l
The size of the Arnoldi factorization. Default is `min(n - 1, max(2*k + 1, 20))`.
maxiter
The maximum number of iterations.
tol
The relative error tolerance for the ritz estimates.
Returns
-------
w
A |NumPy array| which contains the computed eigenvalues.
v
A |VectorArray| which contains the computed eigenvectors.
"""
n = A.source.dim
if l is None:
l = np.min((n - 1, np.max((2 * k + 1, 20))))
if E is None:
E = IdentityOperator(A.source)
assert A.source == A.range
assert E.source == A.source
assert E.range == A.source
assert k < n
assert l > k
if b is None:
b = A.source.random()
V, H, f = arnoldi(A, E, k, b)
k0 = k
i = 0
while True:
i = i + 1
V, H, f = extend_arnoldi(A, E, V, H, f, l - k)
ew, ev = spla.eig(H)
# truncate small imaginary parts
ew.imag[np.abs(ew.imag) / np.abs(ew) < 1e-12] = 0
if which == 'LM':
idx = np.argsort(-np.abs(ew))
elif which == 'SM':
idx = np.argsort(np.abs(ew))
elif which == 'LR':
idx = np.argsort(-np.real(ew))
elif which == 'SR':
idx = np.argsort(np.real(ew))
elif which == 'LI':
idx = np.argsort(-np.abs(np.imag(ew)))
elif which == 'SI':
idx = np.argsort(np.abs(np.imag(ew)))
k = k0
ews = ew[idx]
evs = ev[:, idx]
rres = f.l2_norm()[0] * np.abs(evs[l - 1]) / np.abs(ews)
# increase k by one in order to keep complex conjugate pairs together
if ews[k - 1].imag != 0 and ews[k - 1].imag + ews[k].imag < 1e-12:
k = k + 1
if np.all(rres[:k] <= tol) or i >= maxiter:
break
# increase k in order to prevent stagnation
k = np.min((l - 1, k + np.min((np.count_nonzero(rres[:k] <= tol), (l - k) // 2))))
# sort shifts for QR iteration based on their residual
shifts = ews[k:l]
srres = rres[k:l]
idx = np.argsort(-srres)
srres = srres[idx]
shifts = shifts[idx]
# don't use converged unwanted ritzvalues as shifts
shifts = np.delete(shifts, np.where(srres == 0))
k += np.count_nonzero(srres == 0)
if shifts[0].imag != 0 and shifts[0].imag + ews[1].imag >= 1e-12:
shifts = shifts[1:]
k = k + 1
H, Qs = QR_iteration(H, shifts)
V = V.lincomb(Qs.T)
f = V[k] * H[k, k - 1] + f * Qs[l - 1, k - 1]
V = V[:k]
H = H[:k, :k]
return ews[:k0], V.lincomb(evs[:, :k0].T)
|
31,995 |
def create_time(given_time) -> str:
"""converts given argument time to iso format,
if received None returns None"""
if not given_time:
return given_time
datetime_time = arg_to_datetime(given_time)
if not datetime_time:
raise DemistoException("Time parameter supplied in invalid, please supply a valid argument")
return datetime_time.strftime("%Y-%m-%dT%H:%M:%S.%fZ")
|
def create_time(given_time) -> str:
"""converts given argument time to iso format,
if received None returns None"""
if not given_time:
return given_time
datetime_time = arg_to_datetime(given_time)
if not datetime_time:
raise DemistoException("Time parameter supplied in invalid, make sure to supply a valid argument")
return datetime_time.strftime("%Y-%m-%dT%H:%M:%S.%fZ")
|
7,027 |
def key_setup(reg, platform=None, create=True):
"""Clean any existing authentication keys and create new ones.
If create is set to false, keys will only be cleaned from
server."""
suite_srv_dir = get_suite_srv_dir(reg)
keys = {
"client_public_key": KeyInfo(
KeyType.PUBLIC,
KeyOwner.CLIENT,
suite_srv_dir=suite_srv_dir, platform=platform),
"client_private_key": KeyInfo(
KeyType.PRIVATE,
KeyOwner.CLIENT,
suite_srv_dir=suite_srv_dir),
"server_public_key": KeyInfo(
KeyType.PUBLIC,
KeyOwner.SERVER,
suite_srv_dir=suite_srv_dir),
"server_private_key": KeyInfo(
KeyType.PRIVATE,
KeyOwner.SERVER,
suite_srv_dir=suite_srv_dir)
}
remove_keys_on_server(keys)
if create:
create_server_keys(keys, suite_srv_dir)
|
def key_housekeeping(reg, platform=None, create=True):
"""Clean any existing authentication keys and create new ones.
If create is set to false, keys will only be cleaned from
server."""
suite_srv_dir = get_suite_srv_dir(reg)
keys = {
"client_public_key": KeyInfo(
KeyType.PUBLIC,
KeyOwner.CLIENT,
suite_srv_dir=suite_srv_dir, platform=platform),
"client_private_key": KeyInfo(
KeyType.PRIVATE,
KeyOwner.CLIENT,
suite_srv_dir=suite_srv_dir),
"server_public_key": KeyInfo(
KeyType.PUBLIC,
KeyOwner.SERVER,
suite_srv_dir=suite_srv_dir),
"server_private_key": KeyInfo(
KeyType.PRIVATE,
KeyOwner.SERVER,
suite_srv_dir=suite_srv_dir)
}
remove_keys_on_server(keys)
if create:
create_server_keys(keys, suite_srv_dir)
|
30,717 |
def upload_files(excluded_files, dir_path, file_path):
"""
:param excluded_files: excluded files
:param dir_path: dir path for the files
:param file_path: the path to the pcap file
:return:
"""
filenames = [] # type: ignore
# recursive call over the file system top down
for root, directories, files in os.walk(dir_path):
for f in files:
# skipping previously existing files
# adding it to the extracted pcap files list
if f not in excluded_files and isfile(os.path.join(root, f)):
filenames.append(os.path.join(root, f))
if len(filenames) == 0:
return_error('Could not find files')
else:
results = []
context = []
protocol, packet_data = find_files_protocol(file_path)
md5 = hashlib.md5()
sha1 = hashlib.sha1()
sha256 = hashlib.sha256()
files_base_names = [os.path.basename(file_path) for file_path in filenames] # noqa[F812]
files_dic = {file_path: os.path.basename(file_path) for file_path in filenames}
for file_path, file_name in files_dic.items():
for data in packet_data:
packet_number = data.split()[0]
for packet_number in packet_data:
data = [i for i in packet_number.split()]
source_ip = data[2]
dest_ip = data[4]
with open(file_path, 'rb') as _file:
demisto.results(fileResult(file_name, _file.read()))
with open(file_path, 'rb') as _file:
data = _file.read()
md5.update(data)
sha1.update(data)
sha256.update(data)
context.append({
'FileMD5': md5.hexdigest(),
'FileSHA1': sha1.hexdigest(),
'FileSHA256': sha256.hexdigest(),
'FileName': file_name,
'FileSize': os.path.getsize(file_path),
'DetectedInProtocol': protocol,
'FileExtension': os.path.splitext(file_name)[1],
'SourceIP': source_ip,
'DestinationIP': dest_ip
})
ec = {
'PcapExtractedFiles(val.FileMD5 === obj.FileMD5)': context
}
results.append(
{
'Type': entryTypes['note'],
'ContentsFormat': formats['json'],
'Contents': {'extractedFiles': files_base_names},
'EntryContext': ec,
'ReadableContentsFormat': formats['markdown'],
'HumanReadable': tableToMarkdown('Pcap Extracted Files',
[{'name': file_name} for file_name in files_base_names])
})
demisto.results(results)
|
def upload_files(excluded_files, dir_path, file_path):
"""
:param excluded_files: excluded files
:param dir_path: dir path for the files
:param file_path: the path to the pcap file
:return:
"""
filenames = [] # type: ignore
# recursive call over the file system top down
for root, directories, files in os.walk(dir_path):
for f in files:
# skipping previously existing files
# adding it to the extracted pcap files list
if f not in excluded_files and isfile(os.path.join(root, f)):
filenames.append(os.path.join(root, f))
if len(filenames) == 0:
return_error('Could not find files')
else:
results = []
context = []
protocol, packet_data = find_files_protocol(file_path)
md5 = hashlib.md5()
sha1 = hashlib.sha1()
sha256 = hashlib.sha256()
files_base_names = [os.path.basename(file_path) for file_path in filenames] # noqa[F812]
files_dic = {file_path: os.path.basename(file_path) for file_path in filenames}
for file_path, file_name in files_dic.items():
for data in packet_data:
packet_number = data.split()[0]
for packet_number in packet_data:
data = packet_number.split()
source_ip = data[2]
dest_ip = data[4]
with open(file_path, 'rb') as _file:
demisto.results(fileResult(file_name, _file.read()))
with open(file_path, 'rb') as _file:
data = _file.read()
md5.update(data)
sha1.update(data)
sha256.update(data)
context.append({
'FileMD5': md5.hexdigest(),
'FileSHA1': sha1.hexdigest(),
'FileSHA256': sha256.hexdigest(),
'FileName': file_name,
'FileSize': os.path.getsize(file_path),
'DetectedInProtocol': protocol,
'FileExtension': os.path.splitext(file_name)[1],
'SourceIP': source_ip,
'DestinationIP': dest_ip
})
ec = {
'PcapExtractedFiles(val.FileMD5 === obj.FileMD5)': context
}
results.append(
{
'Type': entryTypes['note'],
'ContentsFormat': formats['json'],
'Contents': {'extractedFiles': files_base_names},
'EntryContext': ec,
'ReadableContentsFormat': formats['markdown'],
'HumanReadable': tableToMarkdown('Pcap Extracted Files',
[{'name': file_name} for file_name in files_base_names])
})
demisto.results(results)
|
29,120 |
def main(personal_access_token):
"""Updates the files corresponding to LOCAL_FECONF_PATH and
LOCAL_CONSTANTS_PATH after doing the prerequisite checks.
Args:
personal_access_token: str. The personal access token for the
GitHub id of user.
"""
# Do prerequisite checks.
common.require_cwd_to_be_oppia()
assert common.is_current_branch_a_release_branch()
common.ensure_release_scripts_folder_exists_and_is_up_to_date()
try:
python_utils.url_open(TERMS_PAGE_URL)
except Exception:
raise Exception('Terms mainpage does not exist on Github.')
try:
check_updates_to_terms_of_service(personal_access_token)
add_mailgun_api_key()
apply_changes_based_on_config(
LOCAL_FECONF_PATH, FECONF_CONFIG_PATH, FECONF_REGEX)
apply_changes_based_on_config(
LOCAL_CONSTANTS_PATH, CONSTANTS_CONFIG_PATH, CONSTANTS_REGEX)
except Exception as e:
common.run_cmd([
'git', 'checkout', '--', LOCAL_FECONF_PATH, LOCAL_CONSTANTS_PATH])
raise Exception(e)
common.ask_user_to_confirm(
'Done! Please check feconf.py and constants.ts to ensure that '
'the changes made are correct. Specifically verify that the '
'MAILGUN_API_KEY is updated correctly and other config changes '
'are corresponding to %s and %s.\n' % (
FECONF_CONFIG_PATH, CONSTANTS_CONFIG_PATH))
|
def main(personal_access_token):
"""Updates the files corresponding to LOCAL_FECONF_PATH and
LOCAL_CONSTANTS_PATH after doing the prerequisite checks.
Args:
personal_access_token: str. The personal access token for the
GitHub id of user.
"""
# Do prerequisite checks.
common.require_cwd_to_be_oppia()
assert common.is_current_branch_a_release_branch()
common.ensure_release_scripts_folder_exists_and_is_up_to_date()
try:
python_utils.url_open(TERMS_PAGE_URL)
except Exception:
raise Exception('Terms mainpage does not exist on Github.')
try:
check_updates_to_terms_of_service(personal_access_token)
add_mailgun_api_key()
apply_changes_based_on_config(
LOCAL_FECONF_PATH, FECONF_CONFIG_PATH, FECONF_REGEX)
apply_changes_based_on_config(
LOCAL_CONSTANTS_PATH, CONSTANTS_CONFIG_PATH, CONSTANTS_REGEX)
except Exception as e:
common.run_cmd([
'git', 'checkout', '--', LOCAL_FECONF_PATH, LOCAL_CONSTANTS_PATH])
raise Exception(e)
common.ask_user_to_confirm(
'Done! Please check feconf.py and assetes/constants.ts to ensure that '
'the changes made are correct. Specifically verify that the '
'MAILGUN_API_KEY is updated correctly and other config changes '
'are corresponding to %s and %s.\n' % (
FECONF_CONFIG_PATH, CONSTANTS_CONFIG_PATH))
|
55,807 |
def run(dataset: Dataset, config: TaskConfig):
avaible_task_list = ['classification', 'regression']
if config.type not in avaible_task_list:
raise ValueError('{} is not supported.'.format(config.type))
dir_path = os.path.dirname(os.path.realpath(__file__))
DOTNET_INSTALL_DIR = os.path.join(dir_path, '.dotnet')
os.environ['MODELBUILDER_AUTOML'] = 'NNI'
os.environ['DOTNET_ROOT'] = DOTNET_INSTALL_DIR
mlnet = os.path.join(DOTNET_INSTALL_DIR, 'mlnet')
save_metadata(config)
name = config.name
temp_output_folder = os.path.join(config.output_dir,name)
if not os.path.exists(temp_output_folder):
os.mkdir(temp_output_folder)
train_time_in_seconds = config.max_runtime_seconds
log_path = os.path.join(temp_output_folder, 'log.txt')
sub_command = config.type
column_num = dataset.train.X.shape[1]
columns=['column_{}'.format(i) for i in range(column_num)]
train_df = pd.DataFrame(dataset.train.X, columns=columns)
train_df['label'] = dataset.train.y
test_df = pd.DataFrame(dataset.test.X, columns=columns)
test_df['label'] = dataset.test.y
train_dataset = os.path.join(temp_output_folder, 'train.csv')
test_dataset = os.path.join(temp_output_folder, 'test.csv')
log.info("saving train to {}".format(train_dataset))
train_df.to_csv(train_dataset, index=False, header=True)
test_df.to_csv(test_dataset, index=False, header=True)
with Timer() as training:
cmd = '{} {}'.format(mlnet, sub_command)
# dataset & test dataset
cmd += ' --dataset {} --test-dataset {}'.format(train_dataset, test_dataset)
# train time
cmd += ' --train-time {}'.format(train_time_in_seconds)
# label
cmd += ' --label-col label'
# output folder & name
cmd += ' --output {} --name {}'.format(config.output_dir, name)
# log level & log file place
cmd += ' --verbosity q --log-file-path {}'.format(log_path)
run_cmd(cmd, _env_=os.environ)
train_result_json = os.path.join(temp_output_folder, '{}.mbconfig'.format(name))
if not os.path.exists(train_result_json):
raise NoResultError("MLNet failed producing any prediction.")
with open(train_result_json, 'r') as f:
json_str = f.read()
mb_config = json.loads(json_str)
model_path = mb_config['Artifact']['MLNetModelPath']
output_prediction_txt = config.output_predictions_file.replace('.csv', '.txt')
# predict
if config.type == 'classification':
predict_cmd = '{} {}'.format(mlnet, 'predict')
predict_cmd += ' --model {} --dataset {} --task-type classification'.format(model_path, test_dataset)
predict_cmd += ' > {}'.format(output_prediction_txt)
with Timer() as prediction:
run_cmd(predict_cmd)
prediction_df = pd.read_csv(output_prediction_txt, dtype={'PredictedLabel':'object'})
#rename_df = prediction_df.rename(columns={'PredictedLabel':'predictions'})
#rename_df['truth'] = dataset.test.y
#rename_df.to_csv(config.output_predictions_file)
save_predictions(
dataset=dataset,
output_file=config.output_predictions_file,
predictions=prediction_df['PredictedLabel'].values,
truth=dataset.test.y,
probabilities=prediction_df.values[:,:-1],
probabilities_labels=list(prediction_df.columns.values[:-1]),
)
return dict(
training_duration=training.duration,
predict_duration=prediction.duration,
)
if config.type == 'regression':
predict_cmd = '{} {}'.format(mlnet, 'predict')
predict_cmd += ' --model {} --dataset {} --task-type regression'.format(model_path, test_dataset)
predict_cmd += ' > {}'.format(output_prediction_txt)
with Timer() as prediction:
run_cmd(predict_cmd)
prediction_df = pd.read_csv(output_prediction_txt)
rename_df = prediction_df.rename(columns={'Score':'predictions'})
rename_df['truth'] = dataset.test.y
rename_df.to_csv(config.output_predictions_file)
return dict(
training_duration=training.duration,
predict_duration=prediction.duration,
)
|
def run(dataset: Dataset, config: TaskConfig):
avaible_task_list = ['classification', 'regression']
if config.type not in avaible_task_list:
raise ValueError('{} is not supported.'.format(config.type))
dir_path = os.path.dirname(os.path.realpath(__file__))
DOTNET_INSTALL_DIR = os.path.join(dir_path, '.dotnet')
os.environ['MODELBUILDER_AUTOML'] = 'NNI'
os.environ['DOTNET_ROOT'] = DOTNET_INSTALL_DIR
mlnet = os.path.join(DOTNET_INSTALL_DIR, 'mlnet')
save_metadata(config)
name = config.name
# from frameworks.shared.callee import output_subdir
temp_output_folder = output_subdir(name, config)
train_time_in_seconds = config.max_runtime_seconds
log_path = os.path.join(temp_output_folder, 'log.txt')
sub_command = config.type
column_num = dataset.train.X.shape[1]
columns=['column_{}'.format(i) for i in range(column_num)]
train_df = pd.DataFrame(dataset.train.X, columns=columns)
train_df['label'] = dataset.train.y
test_df = pd.DataFrame(dataset.test.X, columns=columns)
test_df['label'] = dataset.test.y
train_dataset = os.path.join(temp_output_folder, 'train.csv')
test_dataset = os.path.join(temp_output_folder, 'test.csv')
log.info("saving train to {}".format(train_dataset))
train_df.to_csv(train_dataset, index=False, header=True)
test_df.to_csv(test_dataset, index=False, header=True)
with Timer() as training:
cmd = '{} {}'.format(mlnet, sub_command)
# dataset & test dataset
cmd += ' --dataset {} --test-dataset {}'.format(train_dataset, test_dataset)
# train time
cmd += ' --train-time {}'.format(train_time_in_seconds)
# label
cmd += ' --label-col label'
# output folder & name
cmd += ' --output {} --name {}'.format(config.output_dir, name)
# log level & log file place
cmd += ' --verbosity q --log-file-path {}'.format(log_path)
run_cmd(cmd, _env_=os.environ)
train_result_json = os.path.join(temp_output_folder, '{}.mbconfig'.format(name))
if not os.path.exists(train_result_json):
raise NoResultError("MLNet failed producing any prediction.")
with open(train_result_json, 'r') as f:
json_str = f.read()
mb_config = json.loads(json_str)
model_path = mb_config['Artifact']['MLNetModelPath']
output_prediction_txt = config.output_predictions_file.replace('.csv', '.txt')
# predict
if config.type == 'classification':
predict_cmd = '{} {}'.format(mlnet, 'predict')
predict_cmd += ' --model {} --dataset {} --task-type classification'.format(model_path, test_dataset)
predict_cmd += ' > {}'.format(output_prediction_txt)
with Timer() as prediction:
run_cmd(predict_cmd)
prediction_df = pd.read_csv(output_prediction_txt, dtype={'PredictedLabel':'object'})
#rename_df = prediction_df.rename(columns={'PredictedLabel':'predictions'})
#rename_df['truth'] = dataset.test.y
#rename_df.to_csv(config.output_predictions_file)
save_predictions(
dataset=dataset,
output_file=config.output_predictions_file,
predictions=prediction_df['PredictedLabel'].values,
truth=dataset.test.y,
probabilities=prediction_df.values[:,:-1],
probabilities_labels=list(prediction_df.columns.values[:-1]),
)
return dict(
training_duration=training.duration,
predict_duration=prediction.duration,
)
if config.type == 'regression':
predict_cmd = '{} {}'.format(mlnet, 'predict')
predict_cmd += ' --model {} --dataset {} --task-type regression'.format(model_path, test_dataset)
predict_cmd += ' > {}'.format(output_prediction_txt)
with Timer() as prediction:
run_cmd(predict_cmd)
prediction_df = pd.read_csv(output_prediction_txt)
rename_df = prediction_df.rename(columns={'Score':'predictions'})
rename_df['truth'] = dataset.test.y
rename_df.to_csv(config.output_predictions_file)
return dict(
training_duration=training.duration,
predict_duration=prediction.duration,
)
|
56,236 |
def build_arg():
parser = ArgumentParser(add_help=False)
in_args = parser.add_argument_group('Options')
in_args.add_argument('-h', '--help', action='help', default=SUPPRESS, help='Help with the script.')
in_args.add_argument("-m", "--model", help="Required. Path to .xml file with pre-trained model.",
required=True, type=Path)
in_args.add_argument("-d", "--device",
help="Optional. Specify target device for infer: CPU, GPU, FPGA, HDDL or MYRIAD. "
"Default: CPU",
default="CPU", type=str)
in_args.add_argument('-i', "--input", required=True,
help='Required. An input to process. The input must be a single image, '
'a folder of images or anything that cv2.VideoCapture can process.')
in_args.add_argument('--loop', default=False, action='store_true',
help='Optional. Enable reading the input in a loop.')
in_args.add_argument('-o', '--output_video', required=False,
help='Optional. Path to an output video file.')
in_args.add_argument("--no_show", help="Optional. Disable display of results on screen.",
action='store_true', default=False)
in_args.add_argument("-v", "--verbose", help="Optional. Enable display of processing logs on screen.",
action='store_true', default=False)
in_args.add_argument("-u", "--utilization_monitors", default="", type=str,
help="Optional. List of monitors to show initially.")
return parser
|
def build_arg():
parser = ArgumentParser(add_help=False)
in_args = parser.add_argument_group('Options')
in_args.add_argument('-h', '--help', action='help', default=SUPPRESS, help='Help with the script.')
in_args.add_argument("-m", "--model", help="Required. Path to .xml file with pre-trained model.",
required=True, type=Path)
in_args.add_argument("-d", "--device",
help="Optional. Specify target device for infer: CPU, GPU, FPGA, HDDL or MYRIAD. "
"Default: CPU",
default="CPU", type=str)
in_args.add_argument('-i', "--input", required=True,
help='Required. An input to process. The input must be a single image, '
'a folder of images or anything that cv2.VideoCapture can process.')
in_args.add_argument('--loop', default=False, action='store_true',
help='Optional. Enable reading the input in a loop.')
in_args.add_argument('-o', '--output', required=False,
help='Optional. Name of output to save.')
in_args.add_argument("--no_show", help="Optional. Disable display of results on screen.",
action='store_true', default=False)
in_args.add_argument("-v", "--verbose", help="Optional. Enable display of processing logs on screen.",
action='store_true', default=False)
in_args.add_argument("-u", "--utilization_monitors", default="", type=str,
help="Optional. List of monitors to show initially.")
return parser
|
36,648 |
def main(opcode_py, outfile='Include/opcode.h'):
opcode = {}
if hasattr(tokenize, 'open'):
fp = tokenize.open(opcode_py) # Python 3.2+
else:
fp = open(opcode_py) # Python 2.7
with fp:
code = fp.read()
exec(code, opcode)
opmap = opcode['opmap']
opname = opcode['opname']
hasconst = opcode['hasconst']
hasjrel = opcode['hasjrel']
hasjabs = opcode['hasjabs']
used = [ False ] * 256
next_op = 1
for name, op in opmap.items():
used[op] = True
with open(outfile, 'w') as fobj:
fobj.write(header)
for name in opname:
if name in opmap:
fobj.write(DEFINE.format(name, opmap[name]))
if name == 'POP_EXCEPT': # Special entry for HAVE_ARGUMENT
fobj.write(DEFINE.format("HAVE_ARGUMENT", opcode["HAVE_ARGUMENT"]))
for name in opcode['_specialized_instructions']:
while used[next_op]:
next_op += 1
fobj.write(DEFINE.format(name, next_op))
used[next_op] = True
fobj.write(DEFINE.format('DO_TRACING', 255))
fobj.write("\nextern const uint8_t _PyOpcode_InlineCacheEntries[256];\n")
fobj.write("#ifdef NEED_OPCODE_TABLES\n")
write_int_array_from_ops("_PyOpcode_RelativeJump", opcode['hasjrel'], fobj)
write_int_array_from_ops("_PyOpcode_Jump", opcode['hasjrel'] + opcode['hasjabs'], fobj)
fobj.write("\nconst uint8_t _PyOpcode_InlineCacheEntries[256] = {\n")
for i, entries in enumerate(opcode["_inline_cache_entries"]):
if entries:
fobj.write(f" [{opname[i]}] = {entries},\n")
fobj.write("};\n")
fobj.write("#endif /* OPCODE_TABLES */\n")
fobj.write("\n")
fobj.write("#define HAS_CONST(op) (false\\")
for op in hasconst:
fobj.write(f"\n || ((op) == {op}) \\")
fobj.write("\n )\n")
fobj.write("\n")
for i, (op, _) in enumerate(opcode["_nb_ops"]):
fobj.write(DEFINE.format(op, i))
fobj.write(footer)
print(f"{outfile} regenerated from {opcode_py}")
|
def main(opcode_py, outfile='Include/opcode.h'):
opcode = {}
if hasattr(tokenize, 'open'):
fp = tokenize.open(opcode_py) # Python 3.2+
else:
fp = open(opcode_py) # Python 2.7
with fp:
code = fp.read()
exec(code, opcode)
opmap = opcode['opmap']
opname = opcode['opname']
hasconst = opcode['hasconst']
hasjrel = opcode['hasjrel']
hasjabs = opcode['hasjabs']
used = [ False ] * 256
next_op = 1
for name, op in opmap.items():
used[op] = True
with open(outfile, 'w') as fobj:
fobj.write(header)
for name in opname:
if name in opmap:
fobj.write(DEFINE.format(name, opmap[name]))
if name == 'POP_EXCEPT': # Special entry for HAVE_ARGUMENT
fobj.write(DEFINE.format("HAVE_ARGUMENT", opcode["HAVE_ARGUMENT"]))
for name in opcode['_specialized_instructions']:
while used[next_op]:
next_op += 1
fobj.write(DEFINE.format(name, next_op))
used[next_op] = True
fobj.write(DEFINE.format('DO_TRACING', 255))
fobj.write("\nextern const uint8_t _PyOpcode_InlineCacheEntries[256];\n")
fobj.write("\n#ifdef NEED_OPCODE_TABLES\n")
write_int_array_from_ops("_PyOpcode_RelativeJump", opcode['hasjrel'], fobj)
write_int_array_from_ops("_PyOpcode_Jump", opcode['hasjrel'] + opcode['hasjabs'], fobj)
fobj.write("\nconst uint8_t _PyOpcode_InlineCacheEntries[256] = {\n")
for i, entries in enumerate(opcode["_inline_cache_entries"]):
if entries:
fobj.write(f" [{opname[i]}] = {entries},\n")
fobj.write("};\n")
fobj.write("#endif /* OPCODE_TABLES */\n")
fobj.write("\n")
fobj.write("#define HAS_CONST(op) (false\\")
for op in hasconst:
fobj.write(f"\n || ((op) == {op}) \\")
fobj.write("\n )\n")
fobj.write("\n")
for i, (op, _) in enumerate(opcode["_nb_ops"]):
fobj.write(DEFINE.format(op, i))
fobj.write(footer)
print(f"{outfile} regenerated from {opcode_py}")
|
6,944 |
def get_notification_message(doc):
owner_name = get_fullname(doc.owner)
points = doc.points
title = get_title(doc.reference_doctype, doc.reference_name)
if doc.type == 'Auto':
owner_name = _(frappe.bold('You'))
if points == 1:
message = _('{0} gained {1} point for {2} {3}')
else:
message = _('{0} gained {1} points for {2} {3}')
message = message.format(owner_name, frappe.bold(points), _(doc.rule), get_title_html(title))
elif doc.type == 'Appreciation':
if points == 1:
message = _('{0} appreciated your work on {1} with {2} point')
else:
message = _('{0} appreciated your work on {1} with {2} points')
message = message.format(frappe.bold(owner_name), get_title_html(title), frappe.bold(points))
elif doc.type == 'Criticism':
if points == 1:
message = _('{0} criticized your work on {1} with {2} point')
else:
message = _('{0} criticized your work on {1} with {2} points')
message = message.format(frappe.bold(owner_name), get_title_html(title), frappe.bold(points))
elif doc.type == 'Revert':
if points == 1:
message = _('{0} reverted your point on {1}')
else:
message = _('{0} reverted your points on {1}')
message = message.format(frappe.bold(owner_name), get_title_html(title))
return message
|
def get_notification_message(doc):
owner_name = get_fullname(doc.owner)
points = doc.points
title = get_title(doc.reference_doctype, doc.reference_name)
if doc.type == 'Auto':
owner_name = frappe.bold(_('You'))
if points == 1:
message = _('{0} gained {1} point for {2} {3}')
else:
message = _('{0} gained {1} points for {2} {3}')
message = message.format(owner_name, frappe.bold(points), _(doc.rule), get_title_html(title))
elif doc.type == 'Appreciation':
if points == 1:
message = _('{0} appreciated your work on {1} with {2} point')
else:
message = _('{0} appreciated your work on {1} with {2} points')
message = message.format(frappe.bold(owner_name), get_title_html(title), frappe.bold(points))
elif doc.type == 'Criticism':
if points == 1:
message = _('{0} criticized your work on {1} with {2} point')
else:
message = _('{0} criticized your work on {1} with {2} points')
message = message.format(frappe.bold(owner_name), get_title_html(title), frappe.bold(points))
elif doc.type == 'Revert':
if points == 1:
message = _('{0} reverted your point on {1}')
else:
message = _('{0} reverted your points on {1}')
message = message.format(frappe.bold(owner_name), get_title_html(title))
return message
|
38,939 |
def test_bytesize_to():
class Model(BaseModel):
size: ByteSize
m = Model(size='1GiB')
assert pytest.approx(m.size.to('MiB')) == 1024
assert pytest.approx(m.size.to('MB')) == 1073.741824
assert pytest.approx(m.size.to('TiB')) == 0.0009765625
|
def test_bytesize_to():
class Model(BaseModel):
size: ByteSize
m = Model(size='1GiB')
assert pytest.approx(m.size.to('MiB')) == 1024
assert m.size.to('MB') == pytest.approx(1073.741824)
assert pytest.approx(m.size.to('TiB')) == 0.0009765625
|
33,730 |
def _import_aws(provider_config: Dict[str, Any]) -> Any:
from ray.autoscaler._private.aws.node_provider import AWSNodeProvider
return AWSNodeProvider
|
def _import_aws(provider_config: Dict[str, Any]) -> "NodeProvider":
from ray.autoscaler._private.aws.node_provider import AWSNodeProvider
return AWSNodeProvider
|
35,526 |
def calc_pos_fix_gauss_newton(measurements, posfix_functions, x0=None, signal='C1C', min_measurements=6):
'''
Calculates gps fix using gauss newton method
To solve the problem a minimal of 4 measurements are required.
If Glonass is included 5 are required to solve for the additional free variable.
returns:
0 -> list with positions
'''
if x0 is None:
x0 = [0, 0, 0, 0, 0]
n = len(measurements)
if n < min_measurements:
return []
Fx_pos = pr_residual(measurements, posfix_functions, signal=signal)
x = gauss_newton(Fx_pos, x0)
residual = Fx_pos(x, weight=1.0)
print(residual[0])
return x, residual[0]
|
def calc_pos_fix_gauss_newton(measurements, posfix_functions, x0=None, signal='C1C', min_measurements=6):
'''
Calculates gps fix using gauss newton method
To solve the problem a minimal of 4 measurements are required.
If Glonass is included 5 are required to solve for the additional free variable.
returns:
0 -> list with positions
'''
if x0 is None:
x0 = [0, 0, 0, 0, 0]
n = len(measurements)
if n < min_measurements:
return []
Fx_pos = pr_residual(measurements, posfix_functions, signal=signal)
x = gauss_newton(Fx_pos, x0)
residual, _ = Fx_pos(x, weight=1.0)
print(residual[0])
return x, residual[0]
|
43,649 |
def xy_mixer(graph):
r""""Creates the generalized SWAP/XY mixer outlined in `this paper <https://arxiv.org/abs/1709.03489>`__, defined
as:
.. math:: H_M \ = \ \frac{1}{2} \displaystyle\sum_{(i, j) \in E(G)} X_i X_j \ + \ Y_i Y_j,
for some graph :math:`G`. :math:`X_i` and :math:`Y_i` denote the Pauli-X and Pauli-Y on the :math:`i`-th
qubit respectively.
Args:
graph (Iterable or networkx.Graph) A graph defining the pairs of wires on which each term of the Hamiltonian acts.
"""
##############
# Input checks
if isinstance(graph, networkx.Graph):
graph = graph.edges
elif isinstance(graph, Iterable):
check_iterable_graph(graph)
else:
raise ValueError(
"Inputted graph must be a networkx.Graph object or Iterable, got {}".format(
type(graph).__name__
)
)
##############
coeffs = 2 * [0.5 for i in graph]
obs = []
for e in graph:
obs.append(qml.PauliX(Wires(e[0])) @ qml.PauliX(Wires(e[1])))
obs.append(qml.PauliY(Wires(e[0])) @ qml.PauliY(Wires(e[1])))
return qml.Hamiltonian(coeffs, obs)
|
def xy_mixer(graph):
r""""Creates the generalized SWAP/XY mixer outlined in `this paper <https://arxiv.org/abs/1709.03489>`__, defined
as:
.. math:: H_M \ = \ \frac{1}{2} \displaystyle\sum_{(i, j) \in E(G)} X_i X_j \ + \ Y_i Y_j,
for some graph :math:`G`. :math:`X_i` and :math:`Y_i` denote the Pauli-X and Pauli-Y on the :math:`i`-th
qubit respectively.
Args:
graph (Iterable or networkx.Graph) A graph defining the pairs of wires on which each term of the Hamiltonian acts.
"""
##############
# Input checks
if isinstance(graph, networkx.Graph):
graph = graph.edges
elif isinstance(graph, Iterable):
check_iterable_graph(graph)
else:
raise ValueError(
"Input graph must be a networkx.Graph object or Iterable, got {}".format(
type(graph).__name__
)
)
##############
coeffs = 2 * [0.5 for i in graph]
obs = []
for e in graph:
obs.append(qml.PauliX(Wires(e[0])) @ qml.PauliX(Wires(e[1])))
obs.append(qml.PauliY(Wires(e[0])) @ qml.PauliY(Wires(e[1])))
return qml.Hamiltonian(coeffs, obs)
|
34,146 |
def get_valid_config(config: Text, mandatory_keys: List[Text]) -> Text:
config_path = get_validated_path(config, "config", FALLBACK_CONFIG_PATH)
missing_keys = missing_config_keys(config_path, mandatory_keys)
if missing_keys:
print_warning(
"Invalid config found '{}'. Missing mandatory parameters: "
"{}. Enrich config with fallback configuration from '{}'."
"".format(config, ", ".join(missing_keys), FALLBACK_CONFIG_PATH)
)
enrich_config(config_path, missing_keys, FALLBACK_CONFIG_PATH)
return config_path
|
def get_valid_config(config: Text, mandatory_keys: List[Text]) -> Text:
config_path = get_validated_path(config, "config", FALLBACK_CONFIG_PATH)
missing_keys = missing_config_keys(config_path, mandatory_keys)
if missing_keys:
print_warning(
"Configuration file '{}' is missing mandatory parameters: "
"{}. Enrich config with fallback configuration from '{}'."
"".format(config, ", ".join(missing_keys), FALLBACK_CONFIG_PATH)
)
enrich_config(config_path, missing_keys, FALLBACK_CONFIG_PATH)
return config_path
|
18,214 |
def get_specs(force=False, use_arch=False, names=None):
"""
Get spec.yaml's for build caches available on mirror
"""
global _cached_specs
arch = architecture.Arch(architecture.platform(),
'default_os', 'default_target')
arch_pattern = ('([^-]*-[^-]*-[^-]*)')
if use_arch:
arch_pattern = '(%s-%s-[^-]*)' % (arch.platform, arch.os)
if not names:
names=['']
names_or_hashes = [name.replace('/', '') for name in names]
names_pattern = '|'.join(names_or_hashes)
regex_pattern = '%s(.*)(%s)(.*)(spec.yaml$)' % (arch_pattern,
names_pattern)
name_re = re.compile(regex_pattern)
if _cached_specs:
tty.debug("Using previously-retrieved specs")
return _cached_specs
if not spack.mirror.MirrorCollection():
tty.debug("No Spack mirrors are currently configured")
return {}
urls = set()
for mirror in spack.mirror.MirrorCollection().values():
fetch_url_build_cache = url_util.join(
mirror.fetch_url, _build_cache_relative_path)
mirror_dir = url_util.local_file_path(fetch_url_build_cache)
if mirror_dir:
tty.msg("Finding buildcaches in %s" % mirror_dir)
if os.path.exists(mirror_dir):
files = os.listdir(mirror_dir)
for file in files:
m = name_re.search(file)
if m:
link = url_util.join(fetch_url_build_cache, file)
urls.add(link)
else:
tty.msg("Finding buildcaches at %s" %
url_util.format(fetch_url_build_cache))
p, links = web_util.spider(
url_util.join(fetch_url_build_cache, 'index.html'))
for link in links:
m = name_re.search(link)
if m:
urls.add(link)
_cached_specs = []
for link in urls:
with Stage(link, name="build_cache", keep=True) as stage:
if force and os.path.exists(stage.save_filename):
os.remove(stage.save_filename)
if not os.path.exists(stage.save_filename):
try:
stage.fetch()
except fs.FetchError:
continue
with open(stage.save_filename, 'r') as f:
# read the spec from the build cache file. All specs
# in build caches are concrete (as they are built) so
# we need to mark this spec concrete on read-in.
spec = Spec.from_yaml(f)
spec._mark_concrete()
_cached_specs.append(spec)
return _cached_specs
|
def get_specs(force=False, use_arch=False, names=None):
"""
Get spec.yaml's for build caches available on mirror
"""
global _cached_specs
arch = architecture.Arch(architecture.platform(),
'default_os', 'default_target')
arch_pattern = ('([^-]*-[^-]*-[^-]*)')
if use_arch:
arch_pattern = '(%s-%s-[^-]*)' % (arch.platform, arch.os)
if names is not None:
names=['']
names_or_hashes = [name.replace('/', '') for name in names]
names_pattern = '|'.join(names_or_hashes)
regex_pattern = '%s(.*)(%s)(.*)(spec.yaml$)' % (arch_pattern,
names_pattern)
name_re = re.compile(regex_pattern)
if _cached_specs:
tty.debug("Using previously-retrieved specs")
return _cached_specs
if not spack.mirror.MirrorCollection():
tty.debug("No Spack mirrors are currently configured")
return {}
urls = set()
for mirror in spack.mirror.MirrorCollection().values():
fetch_url_build_cache = url_util.join(
mirror.fetch_url, _build_cache_relative_path)
mirror_dir = url_util.local_file_path(fetch_url_build_cache)
if mirror_dir:
tty.msg("Finding buildcaches in %s" % mirror_dir)
if os.path.exists(mirror_dir):
files = os.listdir(mirror_dir)
for file in files:
m = name_re.search(file)
if m:
link = url_util.join(fetch_url_build_cache, file)
urls.add(link)
else:
tty.msg("Finding buildcaches at %s" %
url_util.format(fetch_url_build_cache))
p, links = web_util.spider(
url_util.join(fetch_url_build_cache, 'index.html'))
for link in links:
m = name_re.search(link)
if m:
urls.add(link)
_cached_specs = []
for link in urls:
with Stage(link, name="build_cache", keep=True) as stage:
if force and os.path.exists(stage.save_filename):
os.remove(stage.save_filename)
if not os.path.exists(stage.save_filename):
try:
stage.fetch()
except fs.FetchError:
continue
with open(stage.save_filename, 'r') as f:
# read the spec from the build cache file. All specs
# in build caches are concrete (as they are built) so
# we need to mark this spec concrete on read-in.
spec = Spec.from_yaml(f)
spec._mark_concrete()
_cached_specs.append(spec)
return _cached_specs
|
8,184 |
def test_no_sunpy_frame():
r = RotatedSunFrame(base=HeliocentricMeanEcliptic(obstime='2001-01-01'))
assert isinstance(r, RotatedSunFrame)
|
def test_non_sunpy_frame():
r = RotatedSunFrame(base=HeliocentricMeanEcliptic(obstime='2001-01-01'))
assert isinstance(r, RotatedSunFrame)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.