id
int64 11
59.9k
| original
stringlengths 33
150k
| modified
stringlengths 37
150k
|
---|---|---|
58,179 |
def run_command():
args = demisto.args()
host_ids = argToList(args.get('host_ids'))
command_type = args.get('command_type')
full_command = args.get('full_command')
scope = args.get('scope', 'read')
target = args.get('target', 'batch')
offline = args.get('queue_offline')
output = []
if target == 'batch':
batch_id = init_rtr_batch_session(host_ids, offline)
timer = Timer(300, batch_refresh_session, kwargs={'batch_id': batch_id})
timer.start()
try:
if scope == 'read':
response = run_batch_read_cmd(batch_id, command_type, full_command)
elif scope == 'write':
response = run_batch_write_cmd(batch_id, command_type, full_command)
else: # scope = admin
response = run_batch_admin_cmd(batch_id, command_type, full_command)
finally:
timer.cancel()
resources: dict = response.get('combined', {}).get('resources', {})
for _, resource in resources.items():
errors = resource.get('errors', [])
if errors:
error_message = errors[0].get('message', '')
if not error_message:
error_message = f'Could not run command\n{errors}'
return_error(error_message)
output.append({
'HostID': resource.get('aid'),
'SessionID': resource.get('session_id'),
'Stdout': resource.get('stdout'),
'Stderr': resource.get('stderr'),
'BaseCommand': resource.get('base_command'),
'Command': full_command
})
human_readable = tableToMarkdown(f'Command {full_command} results', output, removeNull=True)
entry_context_batch = {
'CrowdStrike': {
'Command': output
}
}
return create_entry_object(contents=response, ec=entry_context_batch, hr=human_readable)
else: # target = 'single'
responses = []
for host_id in host_ids:
if scope == 'read':
response1 = run_single_read_cmd(host_id, command_type, full_command)
elif scope == 'write':
response1 = run_single_write_cmd(host_id, command_type, full_command)
else: # scope = admin
response1 = run_single_admin_cmd(host_id, command_type, full_command)
responses.append(response1)
for resource in response1.get('resources', []):
errors = resource.get('errors', [])
if errors:
error_message = errors[0].get('message', '')
if not error_message:
error_message = f'Could not run command\n{errors}'
return_error(error_message)
output.append({
'HostID': host_id,
'TaskID': resource.get('cloud_request_id'),
'SessionID': resource.get('session_id'),
'BaseCommand': command_type,
'Command': full_command,
'Complete': False,
'NextSequenceID': 0
})
human_readable = tableToMarkdown(f'Command {full_command} results', output, removeNull=True)
entry_context_single = {
'CrowdStrike.Command(val.TaskID === obj.TaskID)': output
}
return create_entry_object(contents=responses, ec=entry_context_single, hr=human_readable)
|
def run_command():
args = demisto.args()
host_ids = argToList(args.get('host_ids'))
command_type = args.get('command_type')
full_command = args.get('full_command')
scope = args.get('scope', 'read')
target = args.get('target', 'batch')
offline = argToBoolean(args.get('queue_offline', False))
output = []
if target == 'batch':
batch_id = init_rtr_batch_session(host_ids, offline)
timer = Timer(300, batch_refresh_session, kwargs={'batch_id': batch_id})
timer.start()
try:
if scope == 'read':
response = run_batch_read_cmd(batch_id, command_type, full_command)
elif scope == 'write':
response = run_batch_write_cmd(batch_id, command_type, full_command)
else: # scope = admin
response = run_batch_admin_cmd(batch_id, command_type, full_command)
finally:
timer.cancel()
resources: dict = response.get('combined', {}).get('resources', {})
for _, resource in resources.items():
errors = resource.get('errors', [])
if errors:
error_message = errors[0].get('message', '')
if not error_message:
error_message = f'Could not run command\n{errors}'
return_error(error_message)
output.append({
'HostID': resource.get('aid'),
'SessionID': resource.get('session_id'),
'Stdout': resource.get('stdout'),
'Stderr': resource.get('stderr'),
'BaseCommand': resource.get('base_command'),
'Command': full_command
})
human_readable = tableToMarkdown(f'Command {full_command} results', output, removeNull=True)
entry_context_batch = {
'CrowdStrike': {
'Command': output
}
}
return create_entry_object(contents=response, ec=entry_context_batch, hr=human_readable)
else: # target = 'single'
responses = []
for host_id in host_ids:
if scope == 'read':
response1 = run_single_read_cmd(host_id, command_type, full_command)
elif scope == 'write':
response1 = run_single_write_cmd(host_id, command_type, full_command)
else: # scope = admin
response1 = run_single_admin_cmd(host_id, command_type, full_command)
responses.append(response1)
for resource in response1.get('resources', []):
errors = resource.get('errors', [])
if errors:
error_message = errors[0].get('message', '')
if not error_message:
error_message = f'Could not run command\n{errors}'
return_error(error_message)
output.append({
'HostID': host_id,
'TaskID': resource.get('cloud_request_id'),
'SessionID': resource.get('session_id'),
'BaseCommand': command_type,
'Command': full_command,
'Complete': False,
'NextSequenceID': 0
})
human_readable = tableToMarkdown(f'Command {full_command} results', output, removeNull=True)
entry_context_single = {
'CrowdStrike.Command(val.TaskID === obj.TaskID)': output
}
return create_entry_object(contents=responses, ec=entry_context_single, hr=human_readable)
|
38,875 |
def multi_device(test_method):
"""
Decorator that provides an argument `device` of type `str` to a test function.
If you have a CUDA capable GPU available, device will be "cuda:0", other the device will
be "cpu".
!!! Note
If you have a CUDA capable GPU available, but you want to run the test using CPU only,
just set the environment variable "CUDA_CAPABLE_DEVICES=''" before running pytest.
"""
return pytest.mark.parametrize("device", _available_devices)(pytest.mark.gpu(test_method))
|
def multi_device(test_method):
"""
Decorator that provides an argument `device` of type `str` to a test function.
If you have a CUDA capable GPU available, device will be "cuda:0", otherwise the device will
be "cpu".
!!! Note
If you have a CUDA capable GPU available, but you want to run the test using CPU only,
just set the environment variable "CUDA_CAPABLE_DEVICES=''" before running pytest.
"""
return pytest.mark.parametrize("device", _available_devices)(pytest.mark.gpu(test_method))
|
23,054 |
def isnonzero(a):
if a.dtype.kind in {"U", "S"}:
# NumPy treats all-whitespace strings as falsy in some places (where).
# but not in `.astype(bool)`. To match the behavior of numpy at least until
# 1.19, we is _isnonzero_vec. When NumPy changes behavior, we should just
# use the try block below.
# https://github.com/numpy/numpy/issues/9875
return a.map_blocks(_isnonzero_vec, dtype=bool)
try:
np.zeros(tuple(), dtype=a.dtype).astype(bool)
except ValueError:
######################################################
# Handle special cases where conversion to bool does #
# not work correctly. #
# #
# xref: https://github.com/numpy/numpy/issues/9479 #
######################################################
return a.map_blocks(_isnonzero_vec, dtype=bool)
else:
return a.astype(bool)
|
def isnonzero(a):
if a.dtype.kind in {"U", "S"}:
# NumPy treats all-whitespace strings as falsy in some places (where).
# but not in `.astype(bool)`. To match the behavior of numpy at least until
# 1.19, we use `_isnonzero_vec`. When NumPy changes behavior, we should just
# use the try block below.
# https://github.com/numpy/numpy/issues/9875
return a.map_blocks(_isnonzero_vec, dtype=bool)
try:
np.zeros(tuple(), dtype=a.dtype).astype(bool)
except ValueError:
######################################################
# Handle special cases where conversion to bool does #
# not work correctly. #
# #
# xref: https://github.com/numpy/numpy/issues/9479 #
######################################################
return a.map_blocks(_isnonzero_vec, dtype=bool)
else:
return a.astype(bool)
|
7,489 |
def fit_wcs_from_points(xy, world_coords, proj_point='center',
projection='TAN', sip_distortion=True, degree=4):
"""
Given two matching sets of coordinates on detector and sky,
compute the WCS. Optionally, a SIP can be fit to account for geometric
distortion. Returns an `~astropy.wcs.WCS` object with the best fit
parameters for mapping between input pixel and sky coordinates.
The projection type (default 'TAN') can passed in as a string, one of
the valid three-letter projection codes - or as a WCS object with
projection keywords already set. Note that if an input WCS has any
non-polynomial distortion, this will be applied and reflected in the
fit terms and coefficients. Passing in a WCS object in this way essentially
allows it to be refit based on the matched input coordinates and projection
point, but take care when using this option as non-projection related
keywords in the input might cause unexpected behavior.
Notes
------
- The fiducial point for the spherical projection can be set to 'center'
to use the mean position of input sky coordinates, or as an
`~astropy.coordinates.SkyCoord` object.
- All output will be in degrees.
- If the coordinate frame differs between `~astropy.coordinates.SkyCoord`
objects passed in for `world_coords` and `proj_point`, the frame for
`world_coords` will override.
- If `sip_distortion` is False, `degree` will be ignored.
Parameters
----------
xy : tuple of two `numpy.ndarray`
x & y pixel coordinates.
world_coords : `~astropy.coordinates.SkyCoord`
Skycoord object with world coordinates.
proj_point : 'center' or ~astropy.coordinates.SkyCoord`
Defaults to 'center', in which the geometric center of input world
coordinates will be used as the projection point. To specify an exact
point for the projection, a Skycoord object with a coordinate pair can
be passed in. For consistency, the units and frame of these coordinates
will be transformed to match 'world_coords' if they don't.
projection : str or `~astropy.wcs.WCS`
Three letter projection code, of any of standard projections defined
in the FITS WCS standard. Optionally, a WCS object with projection
keywords set may be passed in.
sip_distortion : bool
If True, will fit SIP of degree `degree` to points to account for
geometric distortion. If False, only linear terms are fit. Defaults to
True.
degree : int
Degree of polynomial to fit. Only used if `sip_distortion` is True.
Defaults to 4.
"""
from scipy.optimize import least_squares
from .wcs import Sip
import copy
xp, yp = xy
lon, lat = world_coords.data.lon.deg, world_coords.data.lat.deg
# verify input
if (proj_point != 'center') and (type(proj_point) != type(world_coords)):
raise ValueError("proj_point must be set to 'center', or an" +
"`~astropy.coordinates.SkyCoord` object with " +
"a pair of points.")
if proj_point != 'center':
assert proj_point.size == 1
proj_codes = [
'AZP', 'SZP', 'TAN', 'STG', 'SIN', 'ARC', 'ZEA', 'AIR', 'CYP',
'CEA', 'CAR', 'MER', 'SFL', 'PAR', 'MOL', 'AIT', 'COP', 'COE',
'COD', 'COO', 'BON', 'PCO', 'TSC', 'CSC', 'QSC', 'HPX', 'XPH'
]
if type(projection) == str:
if projection not in proj_codes:
raise ValueError("Must specify valid projection code from list of "
+ "supported types: ", ', '.join(proj_codes))
# empty wcs to fill in with fit values
wcs = celestial_frame_to_wcs(frame=world_coords.frame,
projection=projection)
else: #if projection is not string, should be wcs object. use as template.
wcs = copy.deepcopy(projection)
wcs.cdelt = (1., 1.) # make sure cdelt is 1
wcs.sip = None
# Change PC to CD, since cdelt will be set to 1
if wcs.wcs.has_pc():
wcs.wcs.cd = wcs.wcs.pc
wcs.wcs.__delattr__('pc')
if type(sip_distortion) != bool:
raise ValueError("sip_distortion must be set to True or False.")
if (sip_distortion is True) & (type(degree) != int):
raise ValueError("If sip_distorion is True, an integer value for " +
"the polynomial order must be provided.")
# set pixel_shape to span of input points
wcs.pixel_shape = (max(xp)-min(xp), max(yp)-min(yp))
# determine CRVAL from input
close = lambda l, p: p[np.where(np.abs(l) == min(np.abs(l)))[0][0]]
if str(proj_point) == 'center': # use center of input points
wcs.wcs.crval = ((max(lon)+min(lon))/2., (max(lat)+min(lat))/2.)
wcs.wcs.crpix = ((max(xp)+min(xp))/2., (max(yp)+min(yp))/2.)
elif proj_point is not None: # convert units, initial guess for crpix
proj_point.transform_to(world_coords)
wcs.wcs.crval = (proj_point.data.lon.deg, proj_point.data.lat.deg)
wcs.wcs.crpix = (close(lon-wcs.wcs.crval[0], xp),
close(lon-wcs.wcs.crval[1], yp))
# fit linear terms, assign to wcs
# use (1, 0, 0, 1) as initial guess, in case input wcs was passed in
# and cd terms are way off.
p0 = np.concatenate([[1., 0., 0., 1], wcs.wcs.crpix.flatten()])
fit = least_squares(_linear_wcs_fit, p0,
args=(lon, lat, xp, yp, wcs))
wcs.wcs.crpix = np.array(fit.x[4:6])
wcs.wcs.cd = np.array(fit.x[0:4].reshape((2, 2)))
# fit SIP, if specified. Only fit forward coefficients
if sip_distortion:
if '-SIP' not in wcs.wcs.ctype[0]:
wcs.wcs.ctype = [x + '-SIP' for x in wcs.wcs.ctype]
coef_names = ['{0}_{1}'.format(i, j) for i in range(degree+1)
for j in range(degree+1) if (i+j) < (degree+1) and
(i+j) > 1]
p0 = np.concatenate((np.array(wcs.wcs.crpix), wcs.wcs.cd.flatten(),
np.zeros(2*len(coef_names))))
fit = least_squares(_sip_fit, p0,
args=(lon, lat, xp, yp, wcs, degree, coef_names))
coef_fit = (list(fit.x[6:6+len(coef_names)]),
list(fit.x[6+len(coef_names):]))
# put fit values in wcs
wcs.wcs.cd = fit.x[2:6].reshape((2, 2))
wcs.wcs.crpix = fit.x[0:2]
a_vals = np.zeros((degree+1, degree+1))
b_vals = np.zeros((degree+1, degree+1))
for coef_name in coef_names:
a_vals[int(coef_name[0])][int(coef_name[2])] = coef_fit[0].pop(0)
b_vals[int(coef_name[0])][int(coef_name[2])] = coef_fit[1].pop(0)
wcs.sip = Sip(a_vals, b_vals, np.zeros((degree+1, degree+1)),
np.zeros((degree+1, degree+1)), wcs.wcs.crpix)
return wcs
|
def fit_wcs_from_points(xy, world_coords, proj_point='center',
projection='TAN', sip_distortion=True, degree=4):
"""
Given two matching sets of coordinates on detector and sky,
compute the WCS. Optionally, a SIP can be fit to account for geometric
distortion. Returns an `~astropy.wcs.WCS` object with the best fit
parameters for mapping between input pixel and sky coordinates.
The projection type (default 'TAN') can passed in as a string, one of
the valid three-letter projection codes - or as a WCS object with
projection keywords already set. Note that if an input WCS has any
non-polynomial distortion, this will be applied and reflected in the
fit terms and coefficients. Passing in a WCS object in this way essentially
allows it to be refit based on the matched input coordinates and projection
point, but take care when using this option as non-projection related
keywords in the input might cause unexpected behavior.
Notes
------
- The fiducial point for the spherical projection can be set to 'center'
to use the mean position of input sky coordinates, or as an
`~astropy.coordinates.SkyCoord` object.
- All output will be in degrees.
- If the coordinate frame differs between `~astropy.coordinates.SkyCoord`
objects passed in for `world_coords` and `proj_point`, the frame for
`world_coords` will override as the frame for the output WCS.
- If `sip_distortion` is False, `degree` will be ignored.
Parameters
----------
xy : tuple of two `numpy.ndarray`
x & y pixel coordinates.
world_coords : `~astropy.coordinates.SkyCoord`
Skycoord object with world coordinates.
proj_point : 'center' or ~astropy.coordinates.SkyCoord`
Defaults to 'center', in which the geometric center of input world
coordinates will be used as the projection point. To specify an exact
point for the projection, a Skycoord object with a coordinate pair can
be passed in. For consistency, the units and frame of these coordinates
will be transformed to match 'world_coords' if they don't.
projection : str or `~astropy.wcs.WCS`
Three letter projection code, of any of standard projections defined
in the FITS WCS standard. Optionally, a WCS object with projection
keywords set may be passed in.
sip_distortion : bool
If True, will fit SIP of degree `degree` to points to account for
geometric distortion. If False, only linear terms are fit. Defaults to
True.
degree : int
Degree of polynomial to fit. Only used if `sip_distortion` is True.
Defaults to 4.
"""
from scipy.optimize import least_squares
from .wcs import Sip
import copy
xp, yp = xy
lon, lat = world_coords.data.lon.deg, world_coords.data.lat.deg
# verify input
if (proj_point != 'center') and (type(proj_point) != type(world_coords)):
raise ValueError("proj_point must be set to 'center', or an" +
"`~astropy.coordinates.SkyCoord` object with " +
"a pair of points.")
if proj_point != 'center':
assert proj_point.size == 1
proj_codes = [
'AZP', 'SZP', 'TAN', 'STG', 'SIN', 'ARC', 'ZEA', 'AIR', 'CYP',
'CEA', 'CAR', 'MER', 'SFL', 'PAR', 'MOL', 'AIT', 'COP', 'COE',
'COD', 'COO', 'BON', 'PCO', 'TSC', 'CSC', 'QSC', 'HPX', 'XPH'
]
if type(projection) == str:
if projection not in proj_codes:
raise ValueError("Must specify valid projection code from list of "
+ "supported types: ", ', '.join(proj_codes))
# empty wcs to fill in with fit values
wcs = celestial_frame_to_wcs(frame=world_coords.frame,
projection=projection)
else: #if projection is not string, should be wcs object. use as template.
wcs = copy.deepcopy(projection)
wcs.cdelt = (1., 1.) # make sure cdelt is 1
wcs.sip = None
# Change PC to CD, since cdelt will be set to 1
if wcs.wcs.has_pc():
wcs.wcs.cd = wcs.wcs.pc
wcs.wcs.__delattr__('pc')
if type(sip_distortion) != bool:
raise ValueError("sip_distortion must be set to True or False.")
if (sip_distortion is True) & (type(degree) != int):
raise ValueError("If sip_distorion is True, an integer value for " +
"the polynomial order must be provided.")
# set pixel_shape to span of input points
wcs.pixel_shape = (max(xp)-min(xp), max(yp)-min(yp))
# determine CRVAL from input
close = lambda l, p: p[np.where(np.abs(l) == min(np.abs(l)))[0][0]]
if str(proj_point) == 'center': # use center of input points
wcs.wcs.crval = ((max(lon)+min(lon))/2., (max(lat)+min(lat))/2.)
wcs.wcs.crpix = ((max(xp)+min(xp))/2., (max(yp)+min(yp))/2.)
elif proj_point is not None: # convert units, initial guess for crpix
proj_point.transform_to(world_coords)
wcs.wcs.crval = (proj_point.data.lon.deg, proj_point.data.lat.deg)
wcs.wcs.crpix = (close(lon-wcs.wcs.crval[0], xp),
close(lon-wcs.wcs.crval[1], yp))
# fit linear terms, assign to wcs
# use (1, 0, 0, 1) as initial guess, in case input wcs was passed in
# and cd terms are way off.
p0 = np.concatenate([[1., 0., 0., 1], wcs.wcs.crpix.flatten()])
fit = least_squares(_linear_wcs_fit, p0,
args=(lon, lat, xp, yp, wcs))
wcs.wcs.crpix = np.array(fit.x[4:6])
wcs.wcs.cd = np.array(fit.x[0:4].reshape((2, 2)))
# fit SIP, if specified. Only fit forward coefficients
if sip_distortion:
if '-SIP' not in wcs.wcs.ctype[0]:
wcs.wcs.ctype = [x + '-SIP' for x in wcs.wcs.ctype]
coef_names = ['{0}_{1}'.format(i, j) for i in range(degree+1)
for j in range(degree+1) if (i+j) < (degree+1) and
(i+j) > 1]
p0 = np.concatenate((np.array(wcs.wcs.crpix), wcs.wcs.cd.flatten(),
np.zeros(2*len(coef_names))))
fit = least_squares(_sip_fit, p0,
args=(lon, lat, xp, yp, wcs, degree, coef_names))
coef_fit = (list(fit.x[6:6+len(coef_names)]),
list(fit.x[6+len(coef_names):]))
# put fit values in wcs
wcs.wcs.cd = fit.x[2:6].reshape((2, 2))
wcs.wcs.crpix = fit.x[0:2]
a_vals = np.zeros((degree+1, degree+1))
b_vals = np.zeros((degree+1, degree+1))
for coef_name in coef_names:
a_vals[int(coef_name[0])][int(coef_name[2])] = coef_fit[0].pop(0)
b_vals[int(coef_name[0])][int(coef_name[2])] = coef_fit[1].pop(0)
wcs.sip = Sip(a_vals, b_vals, np.zeros((degree+1, degree+1)),
np.zeros((degree+1, degree+1)), wcs.wcs.crpix)
return wcs
|
30,605 |
def convert_dict_snake_to_camel(dic: dict) -> dict:
"""Convert a dictionary of snake case to camel case.
Args:
dic: The dictionary that we would like to convert.
Returns:
converted dictionary.
"""
context_dict = {}
for snake_str in dic:
if type(dic[snake_str]) is dict:
inner_dict = convert_dict_snake_to_camel(dic[snake_str])
camel = convert_snake_to_camel(snake_str)
context_dict[camel] = inner_dict
elif snake_str == 'id' or snake_str == "Id":
context_dict['ID'] = dic.get(snake_str, '')
else:
camel = convert_snake_to_camel(snake_str)
context_dict[camel] = dic.get(snake_str, '')
return context_dict
|
def convert_dict_snake_to_camel(dic: dict) -> dict:
"""Convert a dictionary of snake case to camel case.
Args:
dic: The dictionary that we would like to convert.
Returns:
converted dictionary.
"""
context_dict = {}
for snake_str in dic:
if type(dic[snake_str]) is dict:
inner_dict = convert_dict_snake_to_camel(dic[snake_str])
camel = convert_snake_to_camel(snake_str)
context_dict[camel] = inner_dict
elif snake_str in ['id', 'Id"']:
context_dict['ID'] = dic.get(snake_str, '')
else:
camel = convert_snake_to_camel(snake_str)
context_dict[camel] = dic.get(snake_str, '')
return context_dict
|
4,462 |
def _get_montage_information(eeg, get_pos):
"""Get channel name, type and montage information from ['chanlocs']."""
ch_names, ch_types, pos_ch_names, pos = list(), list(), list(), list()
unknown_types = dict()
for chanloc in eeg.chanlocs:
# channel name
ch_names.append(chanloc['labels'])
# channel type
ch_type = 'eeg'
try_type = chanloc.get('type', None)
if isinstance(try_type, str):
try_type = try_type.strip().lower()
if try_type in _PICK_TYPES_KEYS:
ch_type = try_type
else:
if try_type in unknown_types:
unknown_types[try_type].append(chanloc['labels'])
else:
unknown_types[try_type] = [chanloc['labels']]
ch_types.append(ch_type)
# channel loc
if get_pos:
loc_x = _to_loc(chanloc['X'])
loc_y = _to_loc(chanloc['Y'])
loc_z = _to_loc(chanloc['Z'])
locs = np.r_[-loc_y, loc_x, loc_z]
if not np.any(np.isnan(locs)):
pos_ch_names.append(chanloc['labels'])
pos.append(locs)
# warn if unknown types were provided
if len(unknown_types):
warn('Unknown types found, setting as type EEG:\n' +
'\n'.join([f'{key}: {sorted(unknown_types[key])}'
for key in sorted(unknown_types)]))
lpa, rpa, nasion = None, None, None
if (hasattr(eeg, "nodatchans") and
"nodatchans" in eeg.chaninfo and
len(eeg.chaninfo['nodatchans'])):
for item in list(zip(*eeg.chaninfo['nodatchans'].values())):
d = dict(zip(eeg.chaninfo['nodatchans'].keys(), item))
if d["type"] != 'FID':
continue
if d['description'] == 'Nasion':
nasion = np.array([d["X"], d["Y"], d["Z"]])
if d['description'] == 'Right periauricular point':
rpa = np.array([d["X"], d["Y"], d["Z"]])
if d['description'] == 'Left periauricular point':
lpa = np.array([d["X"], d["Y"], d["Z"]])
if pos_ch_names:
montage = make_dig_montage(
ch_pos=dict(zip(ch_names, np.array(pos))),
coord_frame='head', lpa=lpa, rpa=rpa, nasion=nasion)
else:
montage = None
return ch_names, ch_types, montage
|
def _get_montage_information(eeg, get_pos):
"""Get channel name, type and montage information from ['chanlocs']."""
ch_names, ch_types, pos_ch_names, pos = list(), list(), list(), list()
unknown_types = dict()
for chanloc in eeg.chanlocs:
# channel name
ch_names.append(chanloc['labels'])
# channel type
ch_type = 'eeg'
try_type = chanloc.get('type', None)
if isinstance(try_type, str):
try_type = try_type.strip().lower()
if try_type in _PICK_TYPES_KEYS:
ch_type = try_type
else:
if try_type in unknown_types:
unknown_types[try_type].append(chanloc['labels'])
else:
unknown_types[try_type] = [chanloc['labels']]
ch_types.append(ch_type)
# channel loc
if get_pos:
loc_x = _to_loc(chanloc['X'])
loc_y = _to_loc(chanloc['Y'])
loc_z = _to_loc(chanloc['Z'])
locs = np.r_[-loc_y, loc_x, loc_z]
if not np.any(np.isnan(locs)):
pos_ch_names.append(chanloc['labels'])
pos.append(locs)
# warn if unknown types were provided
if len(unknown_types):
warn('Unknown types found, setting as type EEG:\n' +
'\n'.join([f'{key}: {sorted(unknown_types[key])}'
for key in sorted(unknown_types)]))
lpa, rpa, nasion = None, None, None
if (hasattr(eeg, "nodatchans") and
"nodatchans" in eeg.chaninfo and
eeg.chaninfo['nodatchans']):
for item in list(zip(*eeg.chaninfo['nodatchans'].values())):
d = dict(zip(eeg.chaninfo['nodatchans'].keys(), item))
if d["type"] != 'FID':
continue
if d['description'] == 'Nasion':
nasion = np.array([d["X"], d["Y"], d["Z"]])
if d['description'] == 'Right periauricular point':
rpa = np.array([d["X"], d["Y"], d["Z"]])
if d['description'] == 'Left periauricular point':
lpa = np.array([d["X"], d["Y"], d["Z"]])
if pos_ch_names:
montage = make_dig_montage(
ch_pos=dict(zip(ch_names, np.array(pos))),
coord_frame='head', lpa=lpa, rpa=rpa, nasion=nasion)
else:
montage = None
return ch_names, ch_types, montage
|
43,168 |
def to_cugraph(g):
"""Convert a DGL graph to a cugraph.Graph and return.
Parameters
----------
g : DGLGraph
A homogeneous graph.
Returns
-------
cugraph.Graph
The converted cugraph graph.
Notes
-----
The function only supports GPU graph input.
Examples
--------
The following example uses PyTorch backend.
>>> import dgl
>>> import cugraph
>>> import torch
>>> g = dgl.graph((torch.tensor([1, 2]), torch.tensor([1, 3]))).to('cuda')
>>> cugraph_g = g.to_cugraph()
>>> cugraph_g.edges()
src dst
0 2 3
1 1 1
"""
if g.device.type != 'cuda':
raise DGLError(f"Cannot convert a {g.device.type} graph to cugraph." +
"Call g.to('cuda') first.")
if not g.is_homogeneous:
raise DGLError("dgl.to_cugraph only supports homogeneous graphs.")
try:
import cugraph
import cudf
except ModuleNotFoundError:
raise ModuleNotFoundError("to_cugraph requires cugraph which could not be imported")
edgelist = g.edges()
src_ser = cudf.from_dlpack(F.zerocopy_to_dlpack(edgelist[0]))
dst_ser = cudf.from_dlpack(F.zerocopy_to_dlpack(edgelist[1]))
cudf_data = cudf.DataFrame({'source':src_ser, 'destination':dst_ser})
g_cugraph = cugraph.Graph(directed=True)
g_cugraph.from_cudf_edgelist(cudf_data,
source='source',
destination='destination')
return g_cugraph
|
def to_cugraph(g):
"""Convert a DGL graph to a :class:`cugraph.Graph` and return.
Parameters
----------
g : DGLGraph
A homogeneous graph.
Returns
-------
cugraph.Graph
The converted cugraph graph.
Notes
-----
The function only supports GPU graph input.
Examples
--------
The following example uses PyTorch backend.
>>> import dgl
>>> import cugraph
>>> import torch
>>> g = dgl.graph((torch.tensor([1, 2]), torch.tensor([1, 3]))).to('cuda')
>>> cugraph_g = g.to_cugraph()
>>> cugraph_g.edges()
src dst
0 2 3
1 1 1
"""
if g.device.type != 'cuda':
raise DGLError(f"Cannot convert a {g.device.type} graph to cugraph." +
"Call g.to('cuda') first.")
if not g.is_homogeneous:
raise DGLError("dgl.to_cugraph only supports homogeneous graphs.")
try:
import cugraph
import cudf
except ModuleNotFoundError:
raise ModuleNotFoundError("to_cugraph requires cugraph which could not be imported")
edgelist = g.edges()
src_ser = cudf.from_dlpack(F.zerocopy_to_dlpack(edgelist[0]))
dst_ser = cudf.from_dlpack(F.zerocopy_to_dlpack(edgelist[1]))
cudf_data = cudf.DataFrame({'source':src_ser, 'destination':dst_ser})
g_cugraph = cugraph.Graph(directed=True)
g_cugraph.from_cudf_edgelist(cudf_data,
source='source',
destination='destination')
return g_cugraph
|
43,831 |
def track(dev, version="default", **kwargs):
r"""Creates a tracking context and applies it to a device.
Args:
dev (~.Device): a PennyLane-compatible device
version (str): name of tracker to use. The current options are
`default` and `timing`.
Keyword Args:
reset_on_enter=True (bool): whether or not to reset information
entering the context
**Usage Information**
Note that with backpropagation, this functions should take ``qnode.device``
instead of the device used to create the QNode.
.. code-block:: python
dev = qml.device('default.qubit', wires=1)
@qml.qnode(dev, diff_method="parameter-shift")
def circuit(x):
qml.RX(x, wires=0)
return qml.expval(qml.PauliZ(0))
With the default version, total execution information is printed on
each device execution. The printed data depends on the device and tracker version,
but for standard PennyLane devices, the object will track executions and shots.
>>> with qml.track(circuit.device) as tracker:
... qml.grad(circuit)(0.1, shots=10)
Total: executions = 1 shots = 10
Total: executions = 2 shots = 20
Total: executions = 3 shots = 30
In with the ``'timing'`` implementation, the instance also tracks the time
between entering the context and the completion of an execution.
>>> with qml.track(circuit.device, version='timing') as timing_tracker:
... circuit(0.1)
... circuit(0.2)
Total: executions = 1 time = 0.0011134147644042969
Total: executions = 2 time = 0.0027322769165039062
After completion, one can also access the recorded information:
>>> timing_tracker.totals
defaultdict(int, {'executions': 2, 'shots': 30, 'time': 0.00311279296875})
>>> timing_tracker.history
defaultdict(list,
{'executions': [1, 1],
'shots': [None, None],
'time': [0.0012764930725097656, 0.0018362998962402344]})
By specifying ``reset_on_enter=False``, you can reuse the same tracker accross
multiple runtime contexts.
>>> with qml.track(circuit.device, reset_on_enter=False) as tracker:
... circuit(0.1)
Total: executions = 1
>>> with tracker:
... circuit(0.2)
Total: executions = 2
"""
if version == "timing":
return TimingTracker(dev, **kwargs)
elif version == "default":
return DefaultTracker(dev, **kwargs)
else:
raise qml.QuantumFunctionError(
f"version {version} supplied to track. " f"Current options are `timing` and `default`."
)
|
def track(dev, version="default", **kwargs):
r"""Creates a tracking context and applies it to a device.
Args:
dev (Device): a PennyLane-compatible device
version (str): name of tracker to use. The current options are
`default` and `timing`.
Keyword Args:
reset_on_enter=True (bool): whether or not to reset information
entering the context
**Usage Information**
Note that with backpropagation, this functions should take ``qnode.device``
instead of the device used to create the QNode.
.. code-block:: python
dev = qml.device('default.qubit', wires=1)
@qml.qnode(dev, diff_method="parameter-shift")
def circuit(x):
qml.RX(x, wires=0)
return qml.expval(qml.PauliZ(0))
With the default version, total execution information is printed on
each device execution. The printed data depends on the device and tracker version,
but for standard PennyLane devices, the object will track executions and shots.
>>> with qml.track(circuit.device) as tracker:
... qml.grad(circuit)(0.1, shots=10)
Total: executions = 1 shots = 10
Total: executions = 2 shots = 20
Total: executions = 3 shots = 30
In with the ``'timing'`` implementation, the instance also tracks the time
between entering the context and the completion of an execution.
>>> with qml.track(circuit.device, version='timing') as timing_tracker:
... circuit(0.1)
... circuit(0.2)
Total: executions = 1 time = 0.0011134147644042969
Total: executions = 2 time = 0.0027322769165039062
After completion, one can also access the recorded information:
>>> timing_tracker.totals
defaultdict(int, {'executions': 2, 'shots': 30, 'time': 0.00311279296875})
>>> timing_tracker.history
defaultdict(list,
{'executions': [1, 1],
'shots': [None, None],
'time': [0.0012764930725097656, 0.0018362998962402344]})
By specifying ``reset_on_enter=False``, you can reuse the same tracker accross
multiple runtime contexts.
>>> with qml.track(circuit.device, reset_on_enter=False) as tracker:
... circuit(0.1)
Total: executions = 1
>>> with tracker:
... circuit(0.2)
Total: executions = 2
"""
if version == "timing":
return TimingTracker(dev, **kwargs)
elif version == "default":
return DefaultTracker(dev, **kwargs)
else:
raise qml.QuantumFunctionError(
f"version {version} supplied to track. " f"Current options are `timing` and `default`."
)
|
56,938 |
def command(
*,
name: str = MISSING,
description: str = MISSING,
nsfw: bool = False,
extras: dict = MISSING,
) -> Callable[[CommandCallback[GroupT, P, T]], Command[GroupT, P, T]]:
"""Creates an application command from a regular function.
Parameters
------------
name: :class:`str`
The name of the application command. If not given, it defaults to a lower-case
version of the callback name.
description: :class:`str`
The description of the application command. This shows up in the UI to describe
the application command. If not given, it defaults to the first line of the docstring
of the callback shortened to 100 characters.
nsfw: :class:`bool`
Whether the command is NSFW and should only work in NSFW channels. Defaults to ``False``.
Due to a Discord limitation, this does not work on subcommands.
extras: :class:`dict`
A dictionary of user provided extras to attach to this command.
"""
def decorator(func: CommandCallback[GroupT, P, T]) -> Command[GroupT, P, T]:
if not inspect.iscoroutinefunction(func):
raise TypeError('command function must be a coroutine function')
if description is MISSING:
if func.__doc__ is None:
desc = '…'
else:
desc = _shorten(func.__doc__)
else:
desc = description
return Command(
name=name if name is not MISSING else func.__name__,
description=desc,
callback=func,
parent=None,
nsfw=nsfw,
extras=extras,
)
return decorator
|
def command(
*,
name: str = MISSING,
description: str = MISSING,
nsfw: bool = False,
extras: dict = MISSING,
) -> Callable[[CommandCallback[GroupT, P, T]], Command[GroupT, P, T]]:
"""Creates an application command from a regular function.
Parameters
------------
name: :class:`str`
The name of the application command. If not given, it defaults to a lower-case
version of the callback name.
description: :class:`str`
The description of the application command. This shows up in the UI to describe
the application command. If not given, it defaults to the first line of the docstring
of the callback shortened to 100 characters.
nsfw: :class:`bool`
Whether the command is NSFW and should only work in NSFW channels. Defaults to ``False``.
Due to a Discord limitation, this does not work on subcommands.
extras: :class:`dict`
A dictionary that can be used to store extraneous data.
The library will not touch any values or keys within this dictionary.
"""
def decorator(func: CommandCallback[GroupT, P, T]) -> Command[GroupT, P, T]:
if not inspect.iscoroutinefunction(func):
raise TypeError('command function must be a coroutine function')
if description is MISSING:
if func.__doc__ is None:
desc = '…'
else:
desc = _shorten(func.__doc__)
else:
desc = description
return Command(
name=name if name is not MISSING else func.__name__,
description=desc,
callback=func,
parent=None,
nsfw=nsfw,
extras=extras,
)
return decorator
|
45,993 |
def bihome_loss(
patch_1: torch.Tensor,
patch_2: torch.Tensor,
delta_hat_12: torch.Tensor,
delta_hat_21: torch.Tensor,
triplet_mu: float,
loss_network: nn.Module,
) -> torch.Tensor:
r"""biHomE loss implementation.
Based on: :cite:`koguciuk2021perceptual` and https://github.com/NeurAI-Lab/biHomE.
Args:
patch_1: image tensor with shape :math:`(B, C, H, W)` where B = batch size,
C = number of classes
patch_2: image tensor with shape :math:`(B, C, H, W)` where B = batch size,
C = number of classes
delta_hat_12: predicted corner differences from image 1 to image 2 with shape
:math:`(B, 4, 2)`, where B = batch size.
delta_hat_21: predicted corner differences from image 2 to image 1 with shape
:math:`(B, 4, 2)`, where B = batch size.
triplet_mu: Homography matrix regularization weight.
loss_network: loss network used.
Return:
the computed loss.
"""
if not isinstance(patch_1, torch.Tensor):
raise TypeError(f"patch_1 type is not a torch.Tensor. Got {type(patch_1)}")
if not len(patch_1.shape) == 4:
raise ValueError(f"Invalid input shape of patch_1, we expect BxCxHxW. Got: {patch_1.shape}")
if not isinstance(patch_2, torch.Tensor):
raise TypeError(f"patch_2 type is not a torch.Tensor. Got {type(patch_2)}")
if not len(patch_2.shape) == 4:
raise ValueError(f"Invalid input shape of patch_2, we expect BxCxHxW. Got: {patch_2.shape}")
if patch_1.shape != patch_2.shape:
raise ValueError(f'Expected patch_1 shape ({patch_1.shape}) to match patch_2 shape ({patch_2.shape}).')
if not isinstance(delta_hat_12, torch.Tensor):
raise TypeError(f"delta_hat_12 type is not a torch.Tensor. Got {type(delta_hat_12)}")
if not len(delta_hat_12.shape) == 3 or not delta_hat_12.shape[1] == 4 or not delta_hat_12.shape[2] == 2:
raise ValueError(f"Invalid input shape of delta_hat_12, we expect Bx4x2. Got: {delta_hat_12.shape}")
if not delta_hat_12.size(0) == patch_1.size(0):
raise ValueError(f'Expected delta_hat_12 batch_size ({delta_hat_12.size(0)}) to match patch_1 batch size '
f'({patch_1.size(0)}).')
if not isinstance(delta_hat_21, torch.Tensor):
raise TypeError(f"delta_hat_21 type is not a torch.Tensor. Got {type(delta_hat_21)}")
if not len(delta_hat_21.shape) == 3 or not delta_hat_21.shape[1] == 4 or not delta_hat_21.shape[2] == 2:
raise ValueError(f"Invalid input shape of delta_hat_21, we expect Bx4x2. Got: {delta_hat_21.shape}")
if not delta_hat_21.size(0) == patch_1.size(0):
raise ValueError(f'Expected delta_hat_21 batch_size ({delta_hat_21.size(0)}) to match patch_1 batch size '
f'({patch_1.size(0)}).')
if not isinstance(loss_network, nn.Module):
raise TypeError(f"loss_network type is not a str. Got {type(loss_network)}")
# Compute features of both patches
patch_1_f = loss_network(patch_1)
patch_2_f = loss_network(patch_2)
# Warp patch 1 with delta hat_12
patch_1_prime, h1 = _warp(patch_1, delta_hat=delta_hat_12)
patch_1_prime_f = loss_network(patch_1_prime)
# Warp patch 2 with delta hat_21
patch_2_prime, h2 = _warp(patch_2, delta_hat=delta_hat_21)
patch_2_prime_f = loss_network(patch_2_prime)
# Create and warp masks
patch_1_m = torch.ones_like(patch_1)
patch_2_m = torch.ones_like(patch_2)
patch_1_m_prime, _ = _warp(patch_1_m, delta_hat=delta_hat_12)
patch_2_m_prime, _ = _warp(patch_2_m, delta_hat=delta_hat_21)
# Mask size mismatch downsampling
_, _, f_h, _ = patch_1_prime_f.shape
downsample_factor = patch_1_m.shape[-1] // f_h
downsample_layer = torch.nn.AvgPool2d(kernel_size=downsample_factor, stride=downsample_factor, padding=0)
patch_1_m = torch.squeeze(downsample_layer(patch_1_m), dim=1)
patch_2_m = torch.squeeze(downsample_layer(patch_2_m), dim=1)
patch_1_m_prime = torch.squeeze(downsample_layer(patch_1_m_prime), dim=1)
patch_2_m_prime = torch.squeeze(downsample_layer(patch_2_m_prime), dim=1)
# Triplet Margin Loss
l1 = torch.sum(torch.abs(patch_1_prime_f - patch_2_f), dim=1)
l2 = torch.sum(torch.abs(patch_1_f - patch_2_prime_f), dim=1)
l3 = torch.sum(torch.abs(patch_1_f - patch_2_f), dim=1)
ln1_nom = torch.sum(torch.sum(patch_1_m_prime * patch_2_m * (l1 - l3), dim=-1), dim=-1)
ln1_den = torch.sum(torch.sum(patch_1_m_prime * patch_2_m, dim=-1), dim=-1)
ln1_den = torch.max(ln1_den, torch.ones_like(ln1_den))
ln2_nom = torch.sum(torch.sum(patch_1_m * patch_2_m_prime * (l2 - l3), dim=-1), dim=-1)
ln2_den = torch.sum(torch.sum(patch_1_m * patch_2_m_prime, dim=-1), dim=-1)
ln2_den = torch.max(ln2_den, torch.ones_like(ln2_den))
ln1 = torch.sum(ln1_nom / ln1_den)
ln2 = torch.sum(ln2_nom / ln2_den)
# Regularization
batch_size = patch_1.size(0)
eye = torch.eye(3, dtype=h1.dtype, device=h1.device).unsqueeze(dim=0).repeat(batch_size, 1, 1)
ln3 = torch.sum((torch.matmul(h1, h2) - eye) ** 2) * triplet_mu
loss = ln1 + ln2 + ln3
return loss
|
def bihome_loss(
patch_1: torch.Tensor,
patch_2: torch.Tensor,
delta_hat_12: torch.Tensor,
delta_hat_21: torch.Tensor,
triplet_mu: float,
features_fcn: Optional[Callable] = None,
) -> torch.Tensor:
r"""biHomE loss implementation.
Based on: :cite:`koguciuk2021perceptual` and https://github.com/NeurAI-Lab/biHomE.
Args:
patch_1: image tensor with shape :math:`(B, C, H, W)` where B = batch size,
C = number of classes
patch_2: image tensor with shape :math:`(B, C, H, W)` where B = batch size,
C = number of classes
delta_hat_12: predicted corner differences from image 1 to image 2 with shape
:math:`(B, 4, 2)`, where B = batch size.
delta_hat_21: predicted corner differences from image 2 to image 1 with shape
:math:`(B, 4, 2)`, where B = batch size.
triplet_mu: Homography matrix regularization weight.
loss_network: loss network used.
Return:
the computed loss.
"""
if not isinstance(patch_1, torch.Tensor):
raise TypeError(f"patch_1 type is not a torch.Tensor. Got {type(patch_1)}")
if not len(patch_1.shape) == 4:
raise ValueError(f"Invalid input shape of patch_1, we expect BxCxHxW. Got: {patch_1.shape}")
if not isinstance(patch_2, torch.Tensor):
raise TypeError(f"patch_2 type is not a torch.Tensor. Got {type(patch_2)}")
if not len(patch_2.shape) == 4:
raise ValueError(f"Invalid input shape of patch_2, we expect BxCxHxW. Got: {patch_2.shape}")
if patch_1.shape != patch_2.shape:
raise ValueError(f'Expected patch_1 shape ({patch_1.shape}) to match patch_2 shape ({patch_2.shape}).')
if not isinstance(delta_hat_12, torch.Tensor):
raise TypeError(f"delta_hat_12 type is not a torch.Tensor. Got {type(delta_hat_12)}")
if not len(delta_hat_12.shape) == 3 or not delta_hat_12.shape[1] == 4 or not delta_hat_12.shape[2] == 2:
raise ValueError(f"Invalid input shape of delta_hat_12, we expect Bx4x2. Got: {delta_hat_12.shape}")
if not delta_hat_12.size(0) == patch_1.size(0):
raise ValueError(f'Expected delta_hat_12 batch_size ({delta_hat_12.size(0)}) to match patch_1 batch size '
f'({patch_1.size(0)}).')
if not isinstance(delta_hat_21, torch.Tensor):
raise TypeError(f"delta_hat_21 type is not a torch.Tensor. Got {type(delta_hat_21)}")
if not len(delta_hat_21.shape) == 3 or not delta_hat_21.shape[1] == 4 or not delta_hat_21.shape[2] == 2:
raise ValueError(f"Invalid input shape of delta_hat_21, we expect Bx4x2. Got: {delta_hat_21.shape}")
if not delta_hat_21.size(0) == patch_1.size(0):
raise ValueError(f'Expected delta_hat_21 batch_size ({delta_hat_21.size(0)}) to match patch_1 batch size '
f'({patch_1.size(0)}).')
if not isinstance(loss_network, nn.Module):
raise TypeError(f"loss_network type is not a str. Got {type(loss_network)}")
# Compute features of both patches
patch_1_f = loss_network(patch_1)
patch_2_f = loss_network(patch_2)
# Warp patch 1 with delta hat_12
patch_1_prime, h1 = _warp(patch_1, delta_hat=delta_hat_12)
patch_1_prime_f = loss_network(patch_1_prime)
# Warp patch 2 with delta hat_21
patch_2_prime, h2 = _warp(patch_2, delta_hat=delta_hat_21)
patch_2_prime_f = loss_network(patch_2_prime)
# Create and warp masks
patch_1_m = torch.ones_like(patch_1)
patch_2_m = torch.ones_like(patch_2)
patch_1_m_prime, _ = _warp(patch_1_m, delta_hat=delta_hat_12)
patch_2_m_prime, _ = _warp(patch_2_m, delta_hat=delta_hat_21)
# Mask size mismatch downsampling
_, _, f_h, _ = patch_1_prime_f.shape
downsample_factor = patch_1_m.shape[-1] // f_h
downsample_layer = torch.nn.AvgPool2d(kernel_size=downsample_factor, stride=downsample_factor, padding=0)
patch_1_m = torch.squeeze(downsample_layer(patch_1_m), dim=1)
patch_2_m = torch.squeeze(downsample_layer(patch_2_m), dim=1)
patch_1_m_prime = torch.squeeze(downsample_layer(patch_1_m_prime), dim=1)
patch_2_m_prime = torch.squeeze(downsample_layer(patch_2_m_prime), dim=1)
# Triplet Margin Loss
l1 = torch.sum(torch.abs(patch_1_prime_f - patch_2_f), dim=1)
l2 = torch.sum(torch.abs(patch_1_f - patch_2_prime_f), dim=1)
l3 = torch.sum(torch.abs(patch_1_f - patch_2_f), dim=1)
ln1_nom = torch.sum(torch.sum(patch_1_m_prime * patch_2_m * (l1 - l3), dim=-1), dim=-1)
ln1_den = torch.sum(torch.sum(patch_1_m_prime * patch_2_m, dim=-1), dim=-1)
ln1_den = torch.max(ln1_den, torch.ones_like(ln1_den))
ln2_nom = torch.sum(torch.sum(patch_1_m * patch_2_m_prime * (l2 - l3), dim=-1), dim=-1)
ln2_den = torch.sum(torch.sum(patch_1_m * patch_2_m_prime, dim=-1), dim=-1)
ln2_den = torch.max(ln2_den, torch.ones_like(ln2_den))
ln1 = torch.sum(ln1_nom / ln1_den)
ln2 = torch.sum(ln2_nom / ln2_den)
# Regularization
batch_size = patch_1.size(0)
eye = torch.eye(3, dtype=h1.dtype, device=h1.device).unsqueeze(dim=0).repeat(batch_size, 1, 1)
ln3 = torch.sum((torch.matmul(h1, h2) - eye) ** 2) * triplet_mu
loss = ln1 + ln2 + ln3
return loss
|
13,884 |
def pytest_generate_tests(metafunc):
"""generate a list of all available integration tests."""
is_windows = platform.system() == "Windows"
global skip_clean
skip_clean = metafunc.config.getoption("skip_clean")
generate_reference = metafunc.config.getoption("generate_reference")
update_reference = metafunc.config.getoption("update_reference")
archive_differences = metafunc.config.getoption("archive_differences")
collected_params = []
if archive_differences: # pragma: no cover
diffs_zip = os.path.join(basedir, "diff.zip")
# Create an empty ZIP
zipfile.ZipFile(diffs_zip, mode="w").close()
for name in findtests(basedir):
targets = parse_makefile_for_available_targets(
os.path.join(basedir, name, "Makefile")
)
# check that the "run" target lists no unknown formats
target_run = targets.get("run", set())
unknown_formats = target_run.difference(KNOWN_FORMATS)
if unknown_formats:
raise ValueError(
"{}/Makefile target 'run' references unknown format {}".format(
name, unknown_formats
)
)
# check that all "run" targets are actually available
unresolved_prereqs = target_run.difference(targets)
if unresolved_prereqs:
raise ValueError(
"{}/Makefile target 'run' has unresolved prerequisite {}".format(
name, unresolved_prereqs
)
)
# check that all available known formats are also listed in the "run" target
unreferenced_formats = (
set(KNOWN_FORMATS).intersection(targets).difference(target_run)
)
if unreferenced_formats:
raise ValueError(
"{}/Makefile target 'run' doesn't reference available target {}".format(
name, unreferenced_formats
)
)
for format in KNOWN_FORMATS:
# only test formats where the Makefile provides a target
if format not in targets:
continue
needs_symlinks = any(
[
name == "linked" and format == "html",
name == "filter-relative-lib",
name == "filter-relative-lib-from-unfiltered-tracefile",
]
)
marks = [
pytest.mark.xfail(
needs_symlinks and is_windows,
reason="have yet to figure out symlinks on Windows",
),
pytest.mark.xfail(
name == "exclude-throw-branches"
and format == "html"
and is_windows,
reason="branch coverage details seem to be platform-dependent",
),
pytest.mark.xfail(
name == "rounding" and is_windows,
reason="branch coverage seem to be platform-dependent",
),
]
collected_params.append(
pytest.param(
name,
format,
targets,
generate_reference,
update_reference,
archive_differences,
marks=marks,
id="-".join([name, format]),
)
)
metafunc.parametrize(
"name, format, available_targets, generate_reference, update_reference, archive_differences",
collected_params,
indirect=False,
scope="module",
)
|
def pytest_generate_tests(metafunc):
"""generate a list of all available integration tests."""
is_windows = platform.system() == "Windows"
global skip_clean
skip_clean = metafunc.config.getoption("skip_clean")
generate_reference = metafunc.config.getoption("generate_reference")
update_reference = metafunc.config.getoption("update_reference")
archive_differences = metafunc.config.getoption("archive_differences")
collected_params = []
if archive_differences: # pragma: no cover
diffs_zip = os.path.join(basedir, "diff.zip")
# Create an empty ZIP
zipfile.ZipFile(diffs_zip, mode="w").close()
for name in findtests(basedir):
targets = parse_makefile_for_available_targets(
os.path.join(basedir, name, "Makefile")
)
# check that the "run" target lists no unknown formats
target_run = targets.get("run", set())
unknown_formats = target_run.difference(KNOWN_FORMATS)
if unknown_formats:
raise ValueError(
"{}/Makefile target 'run' references unknown format {}".format(
name, unknown_formats
)
)
# check that all "run" targets are actually available
unresolved_prereqs = target_run.difference(targets)
if unresolved_prereqs:
raise ValueError(
"{}/Makefile target 'run' has unresolved prerequisite {}".format(
name, unresolved_prereqs
)
)
# check that all available known formats are also listed in the "run" target
unreferenced_formats = (
set(KNOWN_FORMATS).intersection(targets).difference(target_run)
)
if unreferenced_formats: # pragma: no cover
raise ValueError(
"{}/Makefile target 'run' doesn't reference available target {}".format(
name, unreferenced_formats
)
)
for format in KNOWN_FORMATS:
# only test formats where the Makefile provides a target
if format not in targets:
continue
needs_symlinks = any(
[
name == "linked" and format == "html",
name == "filter-relative-lib",
name == "filter-relative-lib-from-unfiltered-tracefile",
]
)
marks = [
pytest.mark.xfail(
needs_symlinks and is_windows,
reason="have yet to figure out symlinks on Windows",
),
pytest.mark.xfail(
name == "exclude-throw-branches"
and format == "html"
and is_windows,
reason="branch coverage details seem to be platform-dependent",
),
pytest.mark.xfail(
name == "rounding" and is_windows,
reason="branch coverage seem to be platform-dependent",
),
]
collected_params.append(
pytest.param(
name,
format,
targets,
generate_reference,
update_reference,
archive_differences,
marks=marks,
id="-".join([name, format]),
)
)
metafunc.parametrize(
"name, format, available_targets, generate_reference, update_reference, archive_differences",
collected_params,
indirect=False,
scope="module",
)
|
43,267 |
def test_normalized_laplacian():
Aadj = pytest.G.to_adjacency_matrix()
laplacian = normalized_laplacian(Aadj).todense()
eigenvalues, _ = np.linalg.eig(laplacian)
# min eigenvalue of normalized laplacian is 0
# max eigenvalue of normalized laplacian is <= 2
assert 0 == pytest.approx(eigenvalues.min(), abs=1e-7)
assert (2 + 1e-7) >= eigenvalues.max()
assert laplacian.shape == Aadj.get_shape()
laplacian = normalized_laplacian(Aadj, symmetric=False)
assert 1 == pytest.approx(laplacian.sum(), abs=1e-7)
assert laplacian.get_shape() == Aadj.get_shape()
|
def test_normalized_laplacian():
Aadj = pytest.G.to_adjacency_matrix()
laplacian = normalized_laplacian(Aadj).todense()
eigenvalues, _ = np.linalg.eig(laplacian)
# min eigenvalue of normalized laplacian is 0
# max eigenvalue of normalized laplacian is <= 2
assert eigenvalues.min() == pytest.approx(0, abs=1e-7)
assert (2 + 1e-7) >= eigenvalues.max()
assert laplacian.shape == Aadj.get_shape()
laplacian = normalized_laplacian(Aadj, symmetric=False)
assert 1 == pytest.approx(laplacian.sum(), abs=1e-7)
assert laplacian.get_shape() == Aadj.get_shape()
|
58,566 |
def pip_run(build_ext):
build(True, BUILD_JAVA, True)
if setup_spec.type == SetupType.RAY:
setup_spec.files_to_include += ray_files
# We also need to install pickle5 along with Ray, so make sure that the
# relevant non-Python pickle5 files get copied.
pickle5_dir = os.path.join(ROOT_DIR, PICKLE5_SUBDIR)
setup_spec.files_to_include += walk_directory(
os.path.join(pickle5_dir, "pickle5"))
thirdparty_dir = os.path.join(ROOT_DIR, THIRDPARTY_SUBDIR)
setup_spec.files_to_include += walk_directory(thirdparty_dir)
# Copy over the autogenerated protobuf Python bindings.
for directory in generated_python_directories:
for filename in os.listdir(directory):
if filename[-3:] == ".py":
setup_spec.files_to_include.append(
os.path.join(directory, filename))
copied_files = 0
for filename in setup_spec.files_to_include:
copied_files += copy_file(build_ext.build_lib, filename, ROOT_DIR)
if sys.platform == 'win32':
print('copy_file uses', build_ext.build_lib, ROOT_DIR)
shutil.copy(r'c:\Windows\system32\msvcp140.dll',
os.path.join(build_ext.build_lib, 'ray'))
copied_files += 1
print("# of files copied to {}: {}".format(build_ext.build_lib,
copied_files))
|
def pip_run(build_ext):
build(True, BUILD_JAVA, True)
if setup_spec.type == SetupType.RAY:
setup_spec.files_to_include += ray_files
# We also need to install pickle5 along with Ray, so make sure that the
# relevant non-Python pickle5 files get copied.
pickle5_dir = os.path.join(ROOT_DIR, PICKLE5_SUBDIR)
setup_spec.files_to_include += walk_directory(
os.path.join(pickle5_dir, "pickle5"))
thirdparty_dir = os.path.join(ROOT_DIR, THIRDPARTY_SUBDIR)
setup_spec.files_to_include += walk_directory(thirdparty_dir)
# Copy over the autogenerated protobuf Python bindings.
for directory in generated_python_directories:
for filename in os.listdir(directory):
if filename[-3:] == ".py":
setup_spec.files_to_include.append(
os.path.join(directory, filename))
copied_files = 0
for filename in setup_spec.files_to_include:
copied_files += copy_file(build_ext.build_lib, filename, ROOT_DIR)
if sys.platform == 'win32':
print("copy_file uses", build_ext.build_lib, ROOT_DIR)
shutil.copy(r"c:\Windows\system32\msvcp140.dll",
os.path.join(build_ext.build_lib, "ray"))
copied_files += 1
print("# of files copied to {}: {}".format(build_ext.build_lib,
copied_files))
|
21,815 |
def parse_and_validate_server_name(server_name: str) -> Tuple[str, Optional[int]]:
"""Split a server name into host/port parts and do some basic validation.
Args:
server_name: server name to parse
Returns:
host/port parts.
Raises:
ValueError if the server name could not be parsed.
"""
host, port = parse_server_name(server_name)
# these tests don't need to be bulletproof as we'll find out soon enough
# if somebody is giving us invalid data. What we *do* need is to be sure
# that nobody is sneaking IP literals in that look like hostnames, etc.
# look for ipv6 literals
if not host:
raise ValueError(f"Server name '{server_name}' has an invalid format.")
if host[0] == "[":
if host[-1] != "]":
raise ValueError("Mismatched [...] in server name '%s'" % (server_name,))
# valid_ipv6 raises when given an empty string
ipv6_address = host[1:-1]
if not ipv6_address or not valid_ipv6(ipv6_address):
raise ValueError(
"Server name '%s' is not a valid IPv6 address" % (server_name,)
)
elif not VALID_HOST_REGEX.match(host):
raise ValueError("Server name '%s' has an invalid format" % (server_name,))
return host, port
|
def parse_and_validate_server_name(server_name: str) -> Tuple[str, Optional[int]]:
"""Split a server name into host/port parts and do some basic validation.
Args:
server_name: server name to parse
Returns:
host/port parts.
Raises:
ValueError if the server name could not be parsed.
"""
host, port = parse_server_name(server_name)
# these tests don't need to be bulletproof as we'll find out soon enough
# if somebody is giving us invalid data. What we *do* need is to be sure
# that nobody is sneaking IP literals in that look like hostnames, etc.
# look for ipv6 literals
if not host:
raise ValueError("Server name '%s' has an invalid format" % (server_name,))
if host[0] == "[":
if host[-1] != "]":
raise ValueError("Mismatched [...] in server name '%s'" % (server_name,))
# valid_ipv6 raises when given an empty string
ipv6_address = host[1:-1]
if not ipv6_address or not valid_ipv6(ipv6_address):
raise ValueError(
"Server name '%s' is not a valid IPv6 address" % (server_name,)
)
elif not VALID_HOST_REGEX.match(host):
raise ValueError("Server name '%s' has an invalid format" % (server_name,))
return host, port
|
27,856 |
def backward(outputs, grad_outputs=None, **kwargs):
"""backward(outputs, grad_outputs=None, *, enable_double_backprop=False)
Runs backpropagation from variables simultaneously.
.. warning::
This feature is experimental. The interface can change in the future.
Args:
outputs (tuple or list of :class:`~chainer.Variable`):
A sequence of output variables from which backprop starts.
grad_outputs (None or tuple or list of :class:`~chainer.Variable`):
A sequence of variables that gives the initial value of each output
gradient.
If this argument is ``None``, backprop uses
:attr:`~chainer.Variable.grad_var` of ``outputs``.
enable_double_backprop (bool): If ``True``,
computational trace of the whole backpropagation procedure is
recorded to the computational graph so that one can further do
backpropagation from the resulting gradients. Note that
enabling it results in larger memory consumption needed to
store the gradients w.r.t intermediate variables that are
required for the second gradient computation.
.. seealso::
:meth:`chainer.Variable.backward`
:func:`chainer.grad`
"""
enable_double_backprop, = argument.parse_kwargs(
kwargs, ('enable_double_backprop', False),
retain_grad='semantics for retain_grad=True is under discussion',
loss_scale='chainer.backward does not support loss_scale option',
)
if not isinstance(outputs, (tuple, list)):
raise TypeError(
'outputs must be a tuple or a list, not {}.'.format(type(outputs)))
for v in outputs:
if not isinstance(v, chainer.Variable):
raise TypeError(
'each output must be a Variable, not {}'.format(type(v)))
if grad_outputs is not None:
if not isinstance(grad_outputs, (tuple, list)):
raise TypeError(
'grad_outputs must be None, a tuple, or a list, not {}.'
.format(type(grad_outputs)))
if len(outputs) != len(grad_outputs):
raise ValueError(
'grad_outputs must be of the same length as outputs.\n'
'len(outputs) = {}, len(grad_outputs) = {}'
.format(len(outputs), len(grad_outputs)))
is_chainerx = [v._has_chainerx_array for v in outputs]
if any(is_chainerx):
if not all(is_chainerx):
# The restriction is required as soon as the workarounds below
# are removed.
raise ValueError('cannot mix chainerx and other backends')
# Cannot use chainerx.backward directly, because it does not follow
# retain_grad=False
# TODO(kataoka): Fix chainerx.backward and remove this workaround
if grad_outputs is None:
grad_outputs = []
for y in outputs:
grad_outputs.append(y.grad_var)
y.grad_var = None
# The check is required because chainerx.backward sets default grads.
# TODO(kataoka): Fix chainerx.backward and remove this workaround
indices = [i for i, gy in enumerate(grad_outputs) if gy is not None]
outputs = [outputs[i] for i in indices]
grad_outputs = [grad_outputs[i] for i in indices]
# Use new variables to start backprop
# TODO(kataoka): Implement chainerx.backward(output, grad_outputs)
# and remove this workaround.
outputs = chainer.functions.identity(*outputs)
if not isinstance(outputs, tuple):
outputs = outputs,
grad_outputs = chainer.functions.identity(*grad_outputs)
if not isinstance(grad_outputs, tuple):
grad_outputs = grad_outputs,
# TODO(kataoka): Even after F.identity, non-float grad cannot be set.
# Move the check to elsewhere and remove this workaround.
outputs_ = []
for y, gy in zip(outputs, grad_outputs):
if not y.requires_grad and gy is not None:
warnings.warn(
'Some of grads are ignored by chainer.backward.\n'
'backend: ChainerX, '
'output.dtype: {}, grad_output.dtype: {}'.format(
y.dtype, gy.dtype),
RuntimeWarning)
continue
y.grad_var = gy
outputs_.append(y)
outputs = outputs_
del outputs_
# See also the ChainerX case of Variable.backward
arrs = []
for y in outputs:
arr = y._data[0]
assert isinstance(arr, chainerx.ndarray)
arrs.append(arr)
chainerx.backward(
arrs, enable_double_backprop=enable_double_backprop)
return
if grad_outputs is None:
grad_outputs = []
for y in outputs:
grad_var = y.grad_var
if grad_var is None:
warnings.warn(
'outputs contains a Variable without grad, or '
'duplicate outputs. Note that'
'chainer.backward does not have default grad.',
RuntimeWarning)
y.grad_var = None
grad_outputs.append(grad_var)
outputs = [
(y.node, gy) for y, gy in zip(outputs, grad_outputs) if gy is not None]
with chainer.using_config('enable_backprop', enable_double_backprop):
_backprop_to_all(outputs, False, None)
|
def backward(outputs, grad_outputs=None, **kwargs):
"""backward(outputs, grad_outputs=None, *, enable_double_backprop=False)
Runs backpropagation from variables simultaneously.
.. warning::
This feature is experimental. The interface can change in the future.
Args:
outputs (tuple or list of :class:`~chainer.Variable`):
A sequence of output variables from which backprop starts.
grad_outputs (None or tuple or list of :class:`~chainer.Variable`):
A sequence of variables that gives the initial value of each output
gradient.
If this argument is ``None``, backprop uses
:attr:`~chainer.Variable.grad_var` of ``outputs``.
enable_double_backprop (bool): If ``True``,
computational trace of the whole backpropagation procedure is
recorded to the computational graph so that one can further do
backpropagation from the resulting gradients. Note that
enabling it results in larger memory consumption needed to
store the gradients w.r.t intermediate variables that are
required for the second gradient computation.
.. seealso::
:meth:`chainer.Variable.backward`
:func:`chainer.grad`
"""
enable_double_backprop, = argument.parse_kwargs(
kwargs, ('enable_double_backprop', False),
retain_grad='semantics for retain_grad=True is under discussion',
loss_scale='chainer.backward does not support loss_scale option',
)
if not isinstance(outputs, (tuple, list)):
raise TypeError(
'outputs must be a tuple or a list, not {}.'.format(type(outputs)))
for v in outputs:
if not isinstance(v, chainer.Variable):
raise TypeError(
'each output must be a Variable, not {}'.format(type(v)))
if grad_outputs is not None:
if not isinstance(grad_outputs, (tuple, list)):
raise TypeError(
'grad_outputs must be None, a tuple, or a list, not {}.'
.format(type(grad_outputs)))
if len(outputs) != len(grad_outputs):
raise ValueError(
'grad_outputs must be of the same length as outputs.\n'
'len(outputs) = {}, len(grad_outputs) = {}'
.format(len(outputs), len(grad_outputs)))
is_chainerx = [v._has_chainerx_array for v in outputs]
if any(is_chainerx):
if not all(is_chainerx):
# The restriction is required as soon as the workarounds below
# are removed.
raise ValueError('cannot mix chainerx and other backends')
# Cannot use chainerx.backward directly, because it does not follow
# retain_grad=False
# TODO(kataoka): Fix chainerx.backward and remove this workaround
if grad_outputs is None:
grad_outputs = []
for y in outputs:
grad_outputs.append(y.grad_var)
y.grad_var = None
# The check is required because chainerx.backward sets default grads.
# TODO(kataoka): Fix chainerx.backward and remove this workaround
indices = [i for i, gy in enumerate(grad_outputs) if gy is not None]
outputs = [outputs[i] for i in indices]
grad_outputs = [grad_outputs[i] for i in indices]
# Use new variables to start backprop
# TODO(kataoka): Implement chainerx.backward(output, grad_outputs)
# and remove this workaround.
outputs = chainer.functions.identity(*outputs)
if not isinstance(outputs, tuple):
outputs = outputs,
grad_outputs = chainer.functions.identity(*grad_outputs)
if not isinstance(grad_outputs, tuple):
grad_outputs = grad_outputs,
# TODO(kataoka): Even after F.identity, non-float grad cannot be set.
# Move the check to elsewhere and remove this workaround.
outputs_ = []
for y, gy in zip(outputs, grad_outputs):
if not y.requires_grad and gy is not None:
warnings.warn(
'Some of grads are ignored by chainer.backward.\n'
'backend: ChainerX, '
'output.dtype: {}, grad_output.dtype: {}'.format(
y.dtype, gy.dtype),
RuntimeWarning)
continue
y.grad_var = gy
outputs_.append(y)
outputs = outputs_
del outputs_
# See also the ChainerX case of Variable.backward
arrs = []
for y in outputs:
arr = y._data[0]
assert isinstance(arr, chainerx.ndarray)
arrs.append(arr)
chainerx.backward(
arrs, enable_double_backprop=enable_double_backprop)
return
if grad_outputs is None:
grad_outputs = []
for y in outputs:
grad_var = y.grad_var
if grad_var is None:
warnings.warn(
'outputs contains a Variable without grad, or '
'duplicate outputs. Note that'
'chainer.backward does not set default grad.',
RuntimeWarning)
y.grad_var = None
grad_outputs.append(grad_var)
outputs = [
(y.node, gy) for y, gy in zip(outputs, grad_outputs) if gy is not None]
with chainer.using_config('enable_backprop', enable_double_backprop):
_backprop_to_all(outputs, False, None)
|
31,580 |
def get_useragents_command(client, args):
ip_address = args.get('ipaddress')
page = int(args.get('page', 1))
params = {
"page": page
}
res = client.get_useragents(ip_address=ip_address, params=params)
records = res.get('records', [])
record_count = res.get('record_count', 0)
table_data = [{
"User Agent": x.get('user_agent'),
"OS Name": x.get('os', {}).get('name'),
"OS Platform": x.get('os', {}).get('platform'),
"OS Version": x.get('os', {}).get('version'),
"Browser Family": x.get('browser_family'),
"Last Seen": x.get('lastseen'),
"Device Type": x.get('device', {}).get('type'),
"Device Brand": x.get('device', {}).get('brand'),
"Device Model": x.get('device', {}).get('model'),
"Client Type": x.get('client', {}).get('type'),
"Client Name": x.get('client', {}).get('name'),
"Client Version": x.get('client', {}).get('version'),
"Client Engine": x.get('client', {}).get('engine'),
"Client Engine Verison": x.get('client', {}).get('engine_version'),
} for x in records]
md = tableToMarkdown(f"User Agents for {ip_address}:", table_data, [
'User Agent',
'OS Name',
'OS Platform',
'OS Version',
'Browser Family',
'Last Seen',
'Device Type',
'Device Brand',
'Device Model',
'Client Type',
'Client Name',
'Client Version',
'Client Engine',
'Client Engine Verison'
])
output_data = {
"ip": ip_address,
"useragents": records,
"useragent_records_count": record_count
}
command_results = CommandResults(
outputs_prefix=f"SecurityTrails.IP",
outputs_key_field="ip",
outputs=output_data,
readable_output=md
)
return_results(command_results)
|
def get_useragents_command(client, args):
ip_address = args.get('ipaddress')
page = int(args.get('page', 1))
params = {
"page": page
}
res = client.get_useragents(ip_address=ip_address, params=params)
records = res.get('records', [])
record_count = res.get('record_count', 0)
table_data = [{
"User Agent": x.get('user_agent'),
"OS Name": x.get('os', {}).get('name'),
"OS Platform": x.get('os', {}).get('platform'),
"OS Version": x.get('os', {}).get('version'),
"Browser Family": x.get('browser_family'),
"Last Seen": x.get('lastseen'),
"Device Type": x.get('device', {}).get('type'),
"Device Brand": x.get('device', {}).get('brand'),
"Device Model": x.get('device', {}).get('model'),
"Client Type": x.get('client', {}).get('type'),
"Client Name": x.get('client', {}).get('name'),
"Client Version": x.get('client', {}).get('version'),
"Client Engine": x.get('client', {}).get('engine'),
"Client Engine Verison": x.get('client', {}).get('engine_version'),
} for x in records]
md = tableToMarkdown(f"User Agents for {ip_address}:", table_data)
output_data = {
"ip": ip_address,
"useragents": records,
"useragent_records_count": record_count
}
command_results = CommandResults(
outputs_prefix=f"SecurityTrails.IP",
outputs_key_field="ip",
outputs=output_data,
readable_output=md
)
return_results(command_results)
|
31,387 |
def bang_url(client: Client, score_calculator: ScoreCalculator, args: dict) -> CommandResults:
"""
1 API Call for regular
1-4 API Calls for premium subscriptions
"""
url = args['url']
raw_response = client.url(
url
)
return build_url_output(client, score_calculator, url, raw_response)
|
def url_command(client: Client, score_calculator: ScoreCalculator, args: dict) -> CommandResults:
"""
1 API Call for regular
1-4 API Calls for premium subscriptions
"""
url = args['url']
raw_response = client.url(
url
)
return build_url_output(client, score_calculator, url, raw_response)
|
42,081 |
def _get_skipped_trial_numbers(
trials: List[FrozenTrial], used_param_names: Sequence[str]
) -> Set[int]:
"""Utility function for ``plot_parallel_coordinate``.
If trial's parameters does not contain a parameter in ``used_param_names``,
``plot_parallel_coordinate`` methods do not use such trails.
Args:
trials:
List of ``FrozenTrials``.
used_param_names:
The parameter names used in ``plot_parallel_coordinate``.
Returns:
A list of invalid trial numbers.
"""
skipped_trial_numbers = set()
for trial in trials:
for used_param in used_param_names:
if used_param not in trial.params.keys():
skipped_trial_numbers.add(trial.number)
break
return skipped_trial_numbers
|
def _get_skipped_trial_numbers(
trials: List[FrozenTrial], used_param_names: Sequence[str]
) -> Set[int]:
"""Utility function for ``plot_parallel_coordinate``.
If trial's parameters does not contain a parameter in ``used_param_names``,
``plot_parallel_coordinate`` methods do not use such trails.
Args:
trials:
List of ``FrozenTrial``s.
used_param_names:
The parameter names used in ``plot_parallel_coordinate``.
Returns:
A list of invalid trial numbers.
"""
skipped_trial_numbers = set()
for trial in trials:
for used_param in used_param_names:
if used_param not in trial.params.keys():
skipped_trial_numbers.add(trial.number)
break
return skipped_trial_numbers
|
5,722 |
def hermite(n, monic=False):
r"""Physicist's Hermite polynomial.
Defined by
.. math::
H_n(x) = (-1)^ne^{x^2}\frac{d^n}{dx^n}e^{-x^2};
:math:`H_n` is a polynomial of degree :math:`n`.
Parameters
----------
n : int
Degree of the polynomial.
monic : bool, optional
If `True`, scale the leading coefficient to be 1. Default is
`False`.
Returns
-------
H : orthopoly1d
Hermite polynomial.
Notes
-----
The polynomials :math:`H_n` are orthogonal over :math:`(-\infty,
\infty)` with weight function :math:`e^{-x^2}`.
Examples
--------
>>> from scipy import special
>>> import matplotlib.pyplot as plt
>>> import numpy as np
>>> p_monic = special.hermite(3, monic=True)
>>> p_monic
poly1d([ 1. , 0. , -1.5, 0. ])
>>> p_monic(1) # Evaluate to a point
-0.49999999999999983
>>> x = np.linspace(-3,3,400) # Choose x in (-3,3)
>>> y = p_monic(x) # Evaluate p_monic to x
>>> plt.plot(x,y) # Generate plot
>>> plt.title("Monic Hermite polynomial of degree 3")
>>> plt.xlabel("x")
>>> plt.ylabel("H_3(x)")
>>> plt.show()
"""
if n < 0:
raise ValueError("n must be nonnegative.")
if n == 0:
n1 = n + 1
else:
n1 = n
x, w = roots_hermite(n1)
wfunc = lambda x: exp(-x * x)
if n == 0:
x, w = [], []
hn = 2**n * _gam(n + 1) * sqrt(pi)
kn = 2**n
p = orthopoly1d(x, w, hn, kn, wfunc, (-inf, inf), monic,
lambda x: eval_hermite(n, x))
return p
|
def hermite(n, monic=False):
r"""Physicist's Hermite polynomial.
Defined by
.. math::
H_n(x) = (-1)^ne^{x^2}\frac{d^n}{dx^n}e^{-x^2};
:math:`H_n` is a polynomial of degree :math:`n`.
Parameters
----------
n : int
Degree of the polynomial.
monic : bool, optional
If `True`, scale the leading coefficient to be 1. Default is
`False`.
Returns
-------
H : orthopoly1d
Hermite polynomial.
Notes
-----
The polynomials :math:`H_n` are orthogonal over :math:`(-\infty,
\infty)` with weight function :math:`e^{-x^2}`.
Examples
--------
>>> from scipy import special
>>> import matplotlib.pyplot as plt
>>> import numpy as np
>>> p_monic = special.hermite(3, monic=True)
>>> p_monic
poly1d([ 1. , 0. , -1.5, 0. ])
>>> p_monic(1) # Evaluate to a point
-0.49999999999999983
>>> x = np.linspace(-3, 3, 400)
>>> y = p_monic(x) # Evaluate p_monic to x
>>> plt.plot(x,y) # Generate plot
>>> plt.title("Monic Hermite polynomial of degree 3")
>>> plt.xlabel("x")
>>> plt.ylabel("H_3(x)")
>>> plt.show()
"""
if n < 0:
raise ValueError("n must be nonnegative.")
if n == 0:
n1 = n + 1
else:
n1 = n
x, w = roots_hermite(n1)
wfunc = lambda x: exp(-x * x)
if n == 0:
x, w = [], []
hn = 2**n * _gam(n + 1) * sqrt(pi)
kn = 2**n
p = orthopoly1d(x, w, hn, kn, wfunc, (-inf, inf), monic,
lambda x: eval_hermite(n, x))
return p
|
46,544 |
def zero_activation_threshold(spec):
"""
Helper method to use the default balance activation threshold for state creation for tests.
Usage: `@with_custom_state(threshold_fn=one_gwei_activation_threshold, ...)`
"""
return 0
|
def zero_activation_threshold(spec):
"""
Helper method to use the default balance activation threshold for state creation for tests.
Usage: `@with_custom_state(threshold_fn=zero_activation_threshold, ...)`
"""
return 0
|
25,699 |
def test_log_prob():
"""
heteroskedastic likelihood where the variance parameter is alwaData.Ys constant
giving the same answers for variational_expectations, predict_mean_and_var,
etc as the regular Gaussian likelihood
"""
l1 = gpflow.likelihoods.Gaussian(variance=Data.g_var)
l2 = HeteroskedasticTFPDistribution(tfp.distributions.Normal)
np.testing.assert_allclose(
l1.log_prob(Data.f_mean, Data.Y),
l2.log_prob(Data.F2_mean, Data.Y),
)
|
def test_log_prob():
"""
heteroskedastic likelihood where the variance parameter is always constant
giving the same answers for variational_expectations, predict_mean_and_var,
etc as the regular Gaussian likelihood
"""
l1 = gpflow.likelihoods.Gaussian(variance=Data.g_var)
l2 = HeteroskedasticTFPDistribution(tfp.distributions.Normal)
np.testing.assert_allclose(
l1.log_prob(Data.f_mean, Data.Y),
l2.log_prob(Data.F2_mean, Data.Y),
)
|
52,015 |
def files_upload_to_callback(instance, filename):
"""upload_to callback for File instances.
It is called automatically when calling save() on a File, since it's a
upload_to callback.
The returned paths are in the format of:
{addon_id}/{addon_name}-{version}.{extension}
By convention, newly signed files after 2022-03-31 get a .xpi extension,
unsigned get .zip. This helps ensure CDN cache is busted when we sign
something.
Note that per Django requirements this gets passed the object instance and
a filename, but the filename is completely ignored here (it's meant to
represent the user-provided filename in user uploads).
"""
parts = []
addon = instance.version.addon
# slugify drops unicode so we may end up with an empty string.
# Apache did not like serving unicode filenames (bug 626587).
name = slugify(addon.name).replace('-', '_') or 'addon'
parts.append(name)
parts.append(instance.version.version)
file_extension = '.xpi' if instance.is_signed else '.zip'
return os.path.join(str(instance.addon.pk), '-'.join(parts) + file_extension)
|
def files_upload_to_callback(instance, filename):
"""upload_to callback for File instances.
It is called automatically when calling save() on a File, since it's a
upload_to callback.
The returned paths are in the format of:
{addon_id}/{addon_name}-{version}.{extension}
By convention, newly signed files after 2022-03-31 get a .xpi extension,
unsigned get .zip. This helps ensure CDN cache is busted when we sign
something.
Note that per Django requirements this gets passed the object instance and
a filename, but the filename is completely ignored here (it's meant to
represent the user-provided filename in user uploads).
"""
# slugify drops unicode so we may end up with an empty string.
# Apache did not like serving unicode filenames (bug 626587).
name = slugify(instance.addon.name).replace('-', '_') or 'addon'
parts = (name, instance.version.version)
file_extension = '.xpi' if instance.is_signed else '.zip'
return os.path.join(str(instance.addon.pk), '-'.join(parts) + file_extension)
|
27,449 |
def migrate_combined_spec(combined_spec, forge_dir, config):
"""CFEP-9 variant migrations
Apply the list of migrations configurations to the build (in the correct sequence)
This will be used to change the variant within the list of MetaData instances,
and return the migrated variants.
This has to happend before the final variant files are computed.
The method for application is determined by the variant algebra as defined by CFEP-9
"""
combined_spec = combined_spec.copy()
migrations_root = os.path.join(forge_dir, "migrations", "*.yaml")
migrations = glob.glob(migrations_root)
from .variant_algebra import parse_variant, variant_add
migration_variants = [
(fn, parse_variant(open(fn, "r").read(), config=config))
for fn in migrations
]
migration_variants.sort(
key=lambda fn_v: (fn_v[1]["migration_ts"], fn_v[0])
)
if len(migration_variants):
logger.info(
f"Applying migrations: {','.join(k for k, v in migration_variants)}"
)
for migrator_file, migration in migration_variants:
if "migration_ts" in migration:
del migration["migration_ts"]
if len(migration):
combined_spec = variant_add(combined_spec, migration)
return combined_spec
|
def migrate_combined_spec(combined_spec, forge_dir, config):
"""CFEP-9 variant migrations
Apply the list of migrations configurations to the build (in the correct sequence)
This will be used to change the variant within the list of MetaData instances,
and return the migrated variants.
This has to happend before the final variant files are computed.
The method for application is determined by the variant algebra as defined by CFEP-9
"""
combined_spec = combined_spec.copy()
migrations_root = os.path.join(forge_dir, ".ci_support", "migrations", "*.yaml")
migrations = glob.glob(migrations_root)
from .variant_algebra import parse_variant, variant_add
migration_variants = [
(fn, parse_variant(open(fn, "r").read(), config=config))
for fn in migrations
]
migration_variants.sort(
key=lambda fn_v: (fn_v[1]["migration_ts"], fn_v[0])
)
if len(migration_variants):
logger.info(
f"Applying migrations: {','.join(k for k, v in migration_variants)}"
)
for migrator_file, migration in migration_variants:
if "migration_ts" in migration:
del migration["migration_ts"]
if len(migration):
combined_spec = variant_add(combined_spec, migration)
return combined_spec
|
29,623 |
def info_frame(frame):
co = frame.f_code
line = linecache.getline(co.co_filename, frame.f_lineno, frame.f_globals).lstrip()
return {
"filename": co.co_filename.replace(sys.exec_prefix, ""),
"name": co.co_name,
"line_number": frame.f_lineno,
"line": line,
}
|
def info_frame(frame):
co = frame.f_code
line = linecache.getline(co.co_filename, frame.f_lineno, frame.f_globals).lstrip()
return {
"filename": co.co_filename.replace(sys.exec_prefix, "..."),
"name": co.co_name,
"line_number": frame.f_lineno,
"line": line,
}
|
56,833 |
def update_redis_location_for_tests(settings_caches):
if not is_testing():
raise Exception("Attempt to update Redis settings outside of tests")
for name, config in settings_caches.items():
if not config.get("BACKEND", "").startswith("django_redis"):
continue
test_location = config.get("TEST_LOCATION")
if not test_location:
logging.warning(
"Unable to set Redis DB in '%(name)s' cache for tests. Using '%(location)s'.\n"
"\tTo configure a separate Redis DB for tests add a 'TEST_LOCATION' to the"
" '%(name)s' cache configuration.", {"name": name, "location": config["LOCATION"]}
)
logging.info("Using '%s' connection for Redis", test_location)
config["LOCATION"] = test_location
|
def update_redis_location_for_tests(settings_caches):
if not is_testing():
raise Exception("Attempt to update Redis settings outside of tests")
for name, config in settings_caches.items():
if not config.get("BACKEND", "").startswith("django_redis"):
continue
test_location = config.get("TEST_LOCATION")
if test_location:
config["LOCATION"] = test_location
else:
logging.warning(
"Unable to set Redis DB in '%(name)s' cache for tests. Using '%(location)s'.\n"
"\tTo configure a separate Redis DB for tests add a 'TEST_LOCATION' to the"
" '%(name)s' cache configuration.", {"name": name, "location": config["LOCATION"]}
)
|
6,357 |
def get_palette(may_use_fancy_formats: bool, theme: str = "classic") -> list:
"""
Load the requested theme and return a list containing all palette entries
needed to highlight the debugger UI, including syntax highlighting.
"""
inheritance_overrides = {}
if may_use_fancy_formats:
def add_setting(color, setting):
return f"{color}, {setting}"
else:
def add_setting(color, setting):
return color
def link(child: str, parent: str):
inheritance_overrides[child] = parent
# {{{ themes
if theme == "classic":
# {{{ classic theme
link("current breakpoint", "current frame name")
link("focused current breakpoint", "focused current frame name")
palette_dict = {
# {{{ base styles
"background": ("black", "light gray"),
"selectable": ("black", "dark cyan"),
"focused selectable": ("black", "light cyan"),
"highlighted": ("dark blue", "yellow"),
"hotkey": (add_setting("black", "underline"), "light gray"),
# }}}
# {{{ general ui
"header": ("dark blue", "light gray"),
"dialog title": (add_setting("white", "bold"), "dark blue"),
"warning": (add_setting("white", "bold"), "dark red"),
# }}}
# {{{ source view
"source": ("yellow", "dark blue"),
"current source": ("dark blue", "dark green"),
"breakpoint source": (
add_setting("yellow", "bold"), "dark red"),
"line number": ("light gray", "dark blue"),
"breakpoint marker": (
add_setting("dark red", "bold"), "dark blue"),
# }}}
# {{{ sidebar
"sidebar two": ("dark blue", "dark cyan"),
"sidebar three": ("dark gray", "dark cyan"),
"focused sidebar two": ("dark blue", "light cyan"),
"focused sidebar three": ("dark gray", "light cyan"),
# }}}
# {{{ variables view
"return label": ("white", "dark blue"),
"focused return label": ("light gray", "dark blue"),
# }}}
# {{{ stack
"current frame name": (
add_setting("white", "bold"), "dark cyan"),
"focused current frame name": (
add_setting("black", "bold"), "light cyan"),
# }}}
# {{{ shell
"command line output": ("light cyan", "dark blue"),
"command line prompt": (
add_setting("white", "bold"), "dark blue"),
"command line error": (
add_setting("light green", "bold"), "dark blue"),
"command line clear button": (
add_setting("white", "bold"), "dark blue"),
"command line focused button": ("dark blue", "dark cyan"),
# }}}
# {{{ Code syntax
"keyword": (add_setting("white", "bold"), "dark blue"),
"function": ("light cyan", "dark blue"),
"literal": (add_setting("light green", "bold"), "dark blue"),
"punctuation": ("light gray", "dark blue"),
"comment": ("dark cyan", "dark blue"),
# }}}
}
# }}}
elif theme == "vim":
# {{{ vim theme
link("current breakpoint", "current frame name")
link("focused current breakpoint", "focused current frame name")
palette_dict = {
# {{{ base styles
"background": ("black", "light gray"),
"selectable": ("black", "dark cyan"),
"focused selectable": ("black", "light cyan"),
"hotkey": (add_setting("black", "bold, underline"), "light gray"),
"highlighted": ("black", "yellow"),
# }}}
# {{{ general ui
"header": (add_setting("black", "bold"), "light gray"),
"group head": ("dark blue", "light gray"),
"dialog title": (add_setting("white", "bold"), "dark blue"),
"input": ("black", "dark cyan"),
"focused input": ("black", "light cyan"),
"warning": (add_setting("dark red", "bold"), "white"),
"header warning": (add_setting("dark red", "bold"), "light gray"),
# }}}
# {{{ source view
"source": ("black", "white"),
"current source": ("black", "dark cyan"),
"breakpoint source": ("dark red", "light gray"),
"line number": ("dark gray", "white"),
"current line marker": ("dark red", "white"),
"breakpoint marker": ("dark red", "white"),
# }}}
# {{{ sidebar
"sidebar one": ("black", "dark cyan"),
"sidebar two": ("dark blue", "dark cyan"),
"sidebar three": ("dark gray", "dark cyan"),
"focused sidebar one": ("black", "light cyan"),
"focused sidebar two": ("dark blue", "light cyan"),
"focused sidebar three": ("dark gray", "light cyan"),
# }}}
# {{{ variables view
"highlighted var label": ("dark blue", "yellow"),
"return label": ("white", "dark blue"),
"focused return label": ("light gray", "dark blue"),
# }}}
# {{{ stack
"current frame name": (
add_setting("white", "bold"), "dark cyan"),
"focused current frame name": (
add_setting("black", "bold"), "light cyan"),
# }}}
# {{{ shell
"command line output": (
add_setting("dark gray", "bold"), "white"),
# }}}
# {{{ Code syntax
"keyword2": ("dark magenta", "white"),
"namespace": ("dark magenta", "white"),
"literal": ("dark red", "white"),
"exception": ("dark red", "white"),
"comment": ("dark gray", "white"),
"function": ("dark blue", "white"),
"pseudo": ("dark gray", "white"),
"builtin": ("light blue", "white"),
# }}}
}
# }}}
elif theme == "dark vim":
# {{{ dark vim
link("current breakpoint", "current frame name")
link("focused current breakpoint", "focused current frame name")
palette_dict = {
# {{{ base styles
"background": ("black", "light gray"),
"selectable": ("white", "dark gray"),
"focused selectable": (add_setting("white", "bold"), "light blue"),
"highlighted": ("black", "dark green"),
"hotkey": (add_setting("dark blue", "underline"), "light gray"),
# }}}
# {{{ general ui
"header": ("dark blue", "light gray"),
"dialog title": (add_setting("white", "bold"), "black"),
"warning": (add_setting("light red", "bold"), "black"),
"header warning": (add_setting("light red", "bold"), "light gray"),
# }}}
# {{{ source view
"source": ("white", "black"),
"current source": (add_setting("white", "bold"), "dark gray"),
"line number": (add_setting("dark gray", "bold"), "black"),
"breakpoint marker": (add_setting("light red", "bold"), "black"),
"breakpoint source": (add_setting("white", "bold"), "dark red"),
# }}}
# {{{ sidebar
"sidebar two": ("yellow", "dark gray"),
"focused sidebar two": ("light cyan", "light blue"),
"sidebar three": ("light gray", "dark gray"),
"focused sidebar three": ("yellow", "light blue"),
# }}}
# {{{ stack
"current frame name": (
add_setting("white", "bold"), "dark gray"),
# }}}
# {{{ shell
"command line output": (add_setting("yellow", "bold"), "black"),
# }}}
# {{{ Code syntax
"keyword": ("yellow", "black"),
"literal": ("light magenta", "black"),
"function": (add_setting("light cyan", "bold"), "black"),
"punctuation": ("yellow", "black"),
"comment": ("dark cyan", "black"),
"exception": ("light red", "black"),
"builtin": ("light green", "black"),
"pseudo": ("dark green", "black"),
# }}}
}
# }}}
elif theme == "midnight":
# {{{ midnight
# Based on XCode's midnight theme
# Looks best in a console with green text against black background
link("current breakpoint", "current frame name")
link("focused current breakpoint", "focused current frame name")
palette_dict = {
# {{{ base styles
"background": ("black", "light gray"),
"selectable": ("black", "dark cyan"),
"focused selectable": ("black", "dark green"),
"hotkey": (add_setting("black", "underline, italics"), "light gray"),
"highlighted": ("white", "dark cyan"),
# }}}
# {{{ general ui
"input": (add_setting("yellow", "bold"), "dark blue"),
"warning": (add_setting("white", "bold"), "dark red"),
"search box": ("white", "black"),
"dialog title": (add_setting("white", "bold"), "dark cyan"),
"group head": (add_setting("dark blue", "bold"), "light gray"),
"focused sidebar": ("black", "white"),
"button": (add_setting("white", "bold"), "dark blue"),
"focused button": ("light cyan", "black"),
"value": (add_setting("yellow", "bold"), "dark blue"),
"fixed value": ("light gray", "dark blue"),
# }}}
# {{{ source view
"source": ("dark green", "black"),
"highlighted source": ("black", "dark green"),
"current source": ("black", "brown"),
"current focused source": ("black", "yellow"),
"focused source": ("white", "dark blue"),
"breakpoint source": (add_setting("yellow", "bold"), "dark red"),
"current breakpoint source": ("black", "dark red"),
"line number": ("light gray", "black"),
"current line marker": ("dark red", "black"),
"breakpoint marker": ("dark red", "black"),
# }}}
# {{{ sidebar
# }}}
# {{{ variables view
"variables": ("white", "black"),
"var label": ("light blue", "black"),
"var value": ("white", "black"),
"variable separator": ("dark cyan", "light gray"),
"focused var label": ("white", "dark blue"),
"focused var value": ("white", "dark blue"),
"highlighted var label": ("black", "dark green"),
"highlighted var value": ("black", "dark green"),
"focused highlighted var label": ("black", "light green"),
"focused highlighted var value": ("black", "light green"),
"return label": ("white", "dark blue"),
"return value": ("black", "dark cyan"),
"focused return label": ("light gray", "dark blue"),
"focused return value": ("black", "dark blue"),
# }}}
# {{{ stack
"stack": ("white", "black"),
"frame name": ("white", "black"),
"frame class": ("light blue", "black"),
"frame location": ("light cyan", "black"),
"current frame name": (add_setting("white", "bold"), "black"),
"current frame class": (add_setting("light blue", "bold"), "black"),
"current frame location": (add_setting("light cyan", "bold"), "black"),
"focused frame name": ("white", "dark blue"),
"focused frame class": ("white", "dark blue"),
"focused frame location": ("white", "dark blue"),
"focused current frame name": (
add_setting("white", "bold"), "dark blue"),
"focused current frame class": (
add_setting("white", "bold"), "dark blue"),
"focused current frame location": (
add_setting("white", "bold"), "dark blue"),
# }}}
# {{{ breakpoints view
"breakpoint": ("white", "black"),
"disabled breakpoint": ("dark gray", "black"),
"focused breakpoint": ("white", "dark blue"),
"focused disabled breakpoint": ("light gray", "dark blue"),
"current breakpoint": (add_setting("white", "bold"), "black"),
"disabled current breakpoint": (
add_setting("dark gray", "bold"), "black"),
"focused current breakpoint": (
add_setting("white", "bold"), "dark blue"),
"focused disabled current breakpoint": (
add_setting("light gray", "bold"), "dark blue"),
# }}}
# {{{ shell
"command line edit": ("white", "black"),
"command line prompt": (add_setting("white", "bold"), "black"),
"command line output": ("white", "black"),
"command line input": ("white", "black"),
"command line error": (add_setting("light red", "bold"), "black"),
"focused command line output": ("white", "dark blue"),
"focused command line input": (
"white", "dark blue"),
"focused command line error": ("black", "light red"),
"command line clear button": (add_setting("white", "bold"), "black"),
"command line focused button": ("black", "light gray"),
# }}}
# {{{ Code syntax
"keyword": ("dark magenta", "black"),
"pseudo": ("light magenta", "black"),
"function": (add_setting("light blue", "bold"), "black"),
"builtin": ("dark gray", "black"),
"literal": ("dark cyan", "black"),
"string": ("dark red", "black"),
"doublestring": ("dark red", "black"),
"docstring": ("yellow", "black"),
"backtick": ("light green", "black"),
"punctuation": ("white", "black"),
"comment": ("white", "black"),
"exception": ("light green", "black"),
# }}}
}
# }}}
elif theme == "solarized":
# {{{ solarized
palette_dict = {
# {{{ base styles
"background": ("light green", "light gray"),
"selectable": ("light green", "white"),
"focused selectable": ("white", "dark blue"),
"highlighted": ("white", "dark cyan"),
"hotkey": (add_setting("black", "underline"), "light gray"),
# }}}
# {{{ general ui
"dialog title": (add_setting("white", "bold"), "dark cyan"),
"warning": (add_setting("light red", "bold"), "white"),
"header warning": (add_setting("light red", "bold"), "light gray"),
"focused sidebar": ("dark red", "light gray"),
"group head": (add_setting("yellow", "bold"), "light gray"),
# }}}
# {{{ source view
"source": ("yellow", "white"),
"breakpoint source": ("light red", "light gray"),
"current source": ("light gray", "light blue"),
"line number": ("light blue", "white"),
"current line marker": (
add_setting("light blue", "bold"), "white"),
"breakpoint marker": (
add_setting("light red", "bold"), "white"),
# }}}
# {{{ sidebar
"sidebar two": ("dark blue", "white"),
"sidebar three": ("light cyan", "white"),
"focused sidebar three": ("light gray", "dark blue"),
# }}}
# {{{ variables view
"return label": ("white", "yellow"),
"focused return label": ("white", "yellow"),
# }}}
# {{{ stack
"current frame name": (
add_setting("light green", "bold"), "white"),
"focused current frame name": (
add_setting("white", "bold"), "dark blue"),
# }}}
# {{{ shell
"command line output": ("light green", "white"),
# }}}
# {{{ Code syntax
"namespace": ("dark red", "white"),
"exception": ("light red", "white"),
"keyword": ("brown", "white"),
"keyword2": ("dark magenta", "white"),
"function": ("dark green", "white"),
"literal": ("dark cyan", "white"),
"builtin": ("dark blue", "white"),
"comment": ("light cyan", "white"),
"pseudo": ("light cyan", "white"),
# }}}
}
# }}}
elif theme == "agr-256":
# {{{ agr-256
# Give the colors some comprehensible names
black = "h235"
blacker = "h233"
dark_cyan = "h24"
dark_gray = "h241"
dark_green = "h22"
dark_red = "h88"
dark_teal = "h23"
light_blue = "h111"
light_cyan = "h80"
light_gray = "h252"
light_green = "h113"
light_red = "h160"
medium_gray = "h246"
salmon = "h223"
orange = "h173"
white = "h255"
yellow = "h192"
link("focused breakpoint", "focused selectable")
link("current breakpoint", "current frame name")
link("focused current breakpoint", "focused current frame name")
palette_dict = {
# {{{ base styles
"background": (black, light_gray),
"selectable": (white, blacker),
"focused selectable": (yellow, dark_cyan),
"hotkey": (add_setting(black, "underline"), light_gray),
"highlighted": (white, dark_green),
# }}}
# {{{ general ui
"focused sidebar": (dark_cyan, light_gray),
"group head": (add_setting(dark_cyan, "bold"), light_gray),
"dialog title": (add_setting(light_gray, "bold"), black),
"warning": (add_setting(white, "bold"), dark_red),
"fixed value": (add_setting(white, "bold"), dark_gray),
"button": (add_setting(white, "bold"), black),
"focused button": (add_setting(yellow, "bold"), dark_cyan),
# }}}
# {{{ source view
"line number": (dark_gray, black),
"current line marker": (add_setting(yellow, "bold"), black),
"breakpoint marker": (add_setting(light_red, "bold"), black),
"source": (white, black),
"breakpoint source": (add_setting(white, "bold"), dark_red),
"current source": (add_setting(light_gray, "bold"), dark_teal),
# }}}
# {{{ sidebar
"sidebar two": (light_blue, blacker),
"focused sidebar two": (light_gray, dark_cyan),
"sidebar three": (medium_gray, blacker),
"focused sidebar three": (salmon, dark_cyan),
# }}}
# {{{ variables view
"highlighted var label": (light_gray, dark_green),
"return label": (light_green, blacker),
"focused return label": (
add_setting(light_gray, "bold"), dark_cyan),
# }}}
# {{{ stack
"current frame name": (yellow, blacker),
"focused current frame name": (
add_setting(yellow, "bold"), dark_cyan),
# }}}
# {{{ shell
"command line prompt": (add_setting(yellow, "bold"), black),
"command line output": (light_cyan, black),
"command line error": (light_red, black),
# }}}
# {{{ Code syntax
"comment": (medium_gray, black),
"exception": (orange, black),
"function": (yellow, black),
"keyword": (light_blue, black),
"literal": (orange, black),
"operator": (yellow, black),
"pseudo": (medium_gray, black),
"punctuation": (salmon, black),
"string": (light_green, black),
# }}}
}
# }}}
elif theme == "monokai":
# {{{ monokai
link("current breakpoint", "current frame name")
link("focused current breakpoint", "focused current frame name")
palette_dict = {
# {{{ base styles
"background": ("black", "light gray"),
"selectable": ("white", "black"),
"focused selectable": ("white", "dark gray"),
"highlighted": ("black", "dark green"),
"hotkey": (add_setting("black", "underline"), "light gray"),
# }}}
# {{{ general ui
"input": ("white", "black"),
"button": (add_setting("white", "bold"), "black"),
"focused button": (add_setting("white", "bold"), "dark gray"),
"focused sidebar": ("dark blue", "light gray"),
"warning": (add_setting("white", "bold"), "dark red"),
"group head": (add_setting("black", "bold"), "light gray"),
"dialog title": (add_setting("white", "bold"), "black"),
# }}}
# {{{ source view
"current source": ("black", "dark cyan"),
"breakpoint source": (add_setting("white", "bold"), "dark red"),
"line number": ("dark gray", "black"),
"current line marker": (add_setting("dark cyan", "bold"), "black"),
"breakpoint marker": (add_setting("dark red", "bold"), "black"),
# }}}
# {{{ sidebar
"sidebar two": ("light cyan", "black"),
"focused sidebar two": ("light cyan", "dark gray"),
"sidebar three": ("light magenta", "black"),
"focused sidebar three": ("light magenta", "dark gray"),
# }}}
# {{{ variables view
"return label": ("light green", "black"),
"focused return label": ("light green", "dark gray"),
# }}}
# {{{ stack
"current frame name": ("light green", "black"),
"focused current frame name": ("light green", "dark gray"),
# }}}
# {{{ shell
"command line prompt": (add_setting("yellow", "bold"), "black"),
"command line output": ("light cyan", "black"),
"command line error": ("yellow", "black"),
"focused command line output": ("light cyan", "dark gray"),
"focused command line error": (
add_setting("yellow", "bold"), "dark gray"),
# }}}
# {{{ Code syntax
"literal": ("light magenta", "black"),
"builtin": ("light cyan", "black"),
"exception": ("light cyan", "black"),
"keyword2": ("light cyan", "black"),
"function": ("light green", "black"),
"class": (add_setting("light green", "underline"), "black"),
"keyword": ("light red", "black"),
"operator": ("light red", "black"),
"comment": ("dark gray", "black"),
"docstring": ("dark gray", "black"),
"argument": ("brown", "black"),
"pseudo": ("brown", "black"),
"string": ("yellow", "black"),
# }}}
}
# }}}
elif theme == "monokai-256":
# {{{ monokai-256
# Give the colors some comprehensible names
black = "h236"
blacker = "h234"
dark_gray = "h240"
dark_green = "h28"
dark_red = "h124"
dark_teal = "h30"
dark_magenta = "h141"
light_blue = "h111"
light_cyan = "h51"
light_gray = "h252"
light_green = "h155"
light_red = "h160"
light_magenta = "h198"
medium_gray = "h243"
orange = "h208"
white = "h255"
yellow = "h228"
link("current breakpoint", "current frame name")
link("focused current breakpoint", "focused current frame name")
palette_dict = {
# {{{ base styles
"background": (black, light_gray),
"selectable": (white, blacker),
"focused selectable": (white, dark_gray),
"highlighted": (white, dark_green),
"hotkey": (add_setting(black, "underline"), light_gray),
# }}}
# {{{ general ui
"input": (white, black),
"button": (add_setting(white, "bold"), black),
"focused button": (add_setting(white, "bold"), dark_gray),
"focused sidebar": (dark_teal, light_gray),
"warning": (add_setting(white, "bold"), dark_red),
"group head": (add_setting(black, "bold"), light_gray),
"dialog title": (add_setting(white, "bold"), blacker),
# }}}
# {{{ source view
"source": (white, black),
"current source": (add_setting(light_gray, "bold"), dark_teal),
"breakpoint source": (add_setting(white, "bold"), dark_red),
"line number": (dark_gray, black),
"current line marker": (add_setting(light_cyan, "bold"), black),
"breakpoint marker": (add_setting(light_red, "bold"), black),
# }}}
# {{{ sidebar
"sidebar two": (light_cyan, blacker),
"focused sidebar two": (light_cyan, dark_gray),
"sidebar three": (dark_magenta, blacker),
"focused sidebar three": (dark_magenta, dark_gray),
# }}}
# {{{ variables view
"highlighted var label": (light_gray, dark_green),
"return label": (light_green, blacker),
"focused return label": (light_green, dark_gray),
# }}}
# {{{ stack
"current frame name": (light_green, blacker),
"focused current frame name": (light_green, dark_gray),
# }}}
# {{{ shell
"command line prompt": (
add_setting(yellow, "bold"), black),
"command line output": (light_cyan, black),
"command line error": (orange, black),
"focused command line output": (light_cyan, dark_gray),
"focused command line error": (
add_setting(orange, "bold"), dark_gray),
# }}}
# {{{ Code syntax
"literal": (dark_magenta, black),
"builtin": (light_cyan, black),
"exception": (light_cyan, black),
"keyword2": (light_cyan, black),
"function": (light_green, black),
"class": (add_setting(light_green, "underline"), black),
"keyword": (light_magenta, black),
"operator": (light_magenta, black),
"comment": (medium_gray, black),
"docstring": (medium_gray, black),
"argument": (orange, black),
"pseudo": (orange, black),
"string": (yellow, black),
# }}}
}
# }}}
elif theme == "mono":
# {{{ mono
palette_dict = {
"background": ("standout",),
"selectable": (),
"focused selectable": ("underline",),
"highlighted": ("bold",),
"hotkey": ("underline, standout",),
}
# }}}
else:
# {{{ custom
try:
# {{{ base styles
palette_dict = {
"background": ("black", "light gray"),
"hotkey": (add_setting("black", "underline"), "light gray"),
"selectable": ("black", "dark cyan"),
"focused selectable": ("black", "dark green"),
"input": (add_setting("yellow", "bold"), "dark blue"),
"warning": (add_setting("white", "bold"), "dark red"),
"highlighted": ("white", "dark cyan"),
"source": ("white", "dark blue"),
}
# }}}
symbols = {
"palette": palette_dict,
"add_setting": add_setting,
"link": link,
}
from os.path import expanduser, expandvars
fname = expanduser(expandvars(theme))
with open(fname) as inf:
exec(compile(inf.read(), fname, "exec"), symbols)
except FileNotFoundError:
ui_log.error("Unable to locate custom theme file {!r}"
.format(theme))
return None
except Exception:
ui_log.exception("Error when importing theme:")
return None
# }}}
# }}}
# Apply style inheritance
for style_name in set(INHERITANCE_MAP.keys()).union(BASE_STYLES.keys()):
get_style(palette_dict, style_name, inheritance_overrides)
palette_list = [
astuple(entry)
for entry in palette_dict.values()
if isinstance(entry, PaletteEntry)
]
return palette_list
|
def get_palette(may_use_fancy_formats: bool, theme: str = "classic") -> list:
"""
Load the requested theme and return a list containing all palette entries
needed to highlight the debugger UI, including syntax highlighting.
"""
inheritance_overrides = {}
if may_use_fancy_formats:
def add_setting(color, setting):
return f"{color}, {setting}"
else:
def add_setting(color, setting):
return color
def link(child: str, parent: str):
inheritance_overrides[child] = parent
# {{{ themes
if theme == "classic":
# {{{ classic theme
link("current breakpoint", "current frame name")
link("focused current breakpoint", "focused current frame name")
palette_dict = {
# {{{ base styles
"background": ("black", "light gray"),
"selectable": ("black", "dark cyan"),
"focused selectable": ("black", "light cyan"),
"highlighted": ("dark blue", "yellow"),
"hotkey": (add_setting("black", "underline"), "light gray"),
# }}}
# {{{ general ui
"header": ("dark blue", "light gray"),
"dialog title": (add_setting("white", "bold"), "dark blue"),
"warning": (add_setting("white", "bold"), "dark red"),
# }}}
# {{{ source view
"source": ("yellow", "dark blue"),
"current source": ("dark blue", "dark green"),
"breakpoint source": (
add_setting("yellow", "bold"), "dark red"),
"line number": ("light gray", "dark blue"),
"breakpoint marker": (
add_setting("dark red", "bold"), "dark blue"),
# }}}
# {{{ sidebar
"sidebar two": ("dark blue", "dark cyan"),
"sidebar three": ("dark gray", "dark cyan"),
"focused sidebar two": ("dark blue", "light cyan"),
"focused sidebar three": ("dark gray", "light cyan"),
# }}}
# {{{ variables view
"return label": ("white", "dark blue"),
"focused return label": ("light gray", "dark blue"),
# }}}
# {{{ stack
"current frame name": (
add_setting("white", "bold"), "dark cyan"),
"focused current frame name": (
add_setting("black", "bold"), "light cyan"),
# }}}
# {{{ shell
"command line output": ("light cyan", "dark blue"),
"command line prompt": (
add_setting("white", "bold"), "dark blue"),
"command line error": (
add_setting("light green", "bold"), "dark blue"),
"command line clear button": (
add_setting("white", "bold"), "dark blue"),
"command line focused button": ("dark blue", "dark cyan"),
# }}}
# {{{ Code syntax
"keyword": (add_setting("white", "bold"), "dark blue"),
"function": ("light cyan", "dark blue"),
"literal": (add_setting("light green", "bold"), "dark blue"),
"punctuation": ("light gray", "dark blue"),
"comment": ("dark cyan", "dark blue"),
# }}}
}
# }}}
elif theme == "vim":
# {{{ vim theme
link("current breakpoint", "current frame name")
link("focused current breakpoint", "focused current frame name")
palette_dict = {
# {{{ base styles
"background": ("black", "light gray"),
"selectable": ("black", "dark cyan"),
"focused selectable": ("black", "light cyan"),
"hotkey": (add_setting("black", "bold, underline"), "light gray"),
"highlighted": ("black", "yellow"),
# }}}
# {{{ general ui
"header": (add_setting("black", "bold"), "light gray"),
"group head": ("dark blue", "light gray"),
"dialog title": (add_setting("white", "bold"), "dark blue"),
"input": ("black", "dark cyan"),
"focused input": ("black", "light cyan"),
"warning": (add_setting("dark red", "bold"), "white"),
"header warning": (add_setting("dark red", "bold"), "light gray"),
# }}}
# {{{ source view
"source": ("black", "white"),
"current source": ("black", "dark cyan"),
"breakpoint source": ("dark red", "light gray"),
"line number": ("dark gray", "white"),
"current line marker": ("dark red", "white"),
"breakpoint marker": ("dark red", "white"),
# }}}
# {{{ sidebar
"sidebar one": ("black", "dark cyan"),
"sidebar two": ("dark blue", "dark cyan"),
"sidebar three": ("dark gray", "dark cyan"),
"focused sidebar one": ("black", "light cyan"),
"focused sidebar two": ("dark blue", "light cyan"),
"focused sidebar three": ("dark gray", "light cyan"),
# }}}
# {{{ variables view
"highlighted var label": ("dark blue", "yellow"),
"return label": ("white", "dark blue"),
"focused return label": ("light gray", "dark blue"),
# }}}
# {{{ stack
"current frame name": (
add_setting("white", "bold"), "dark cyan"),
"focused current frame name": (
add_setting("black", "bold"), "light cyan"),
# }}}
# {{{ shell
"command line output": (
add_setting("dark gray", "bold"), "white"),
# }}}
# {{{ Code syntax
"keyword2": ("dark magenta", "white"),
"namespace": ("dark magenta", "white"),
"literal": ("dark red", "white"),
"exception": ("dark red", "white"),
"comment": ("dark gray", "white"),
"function": ("dark blue", "white"),
"pseudo": ("dark gray", "white"),
"builtin": ("light blue", "white"),
# }}}
}
# }}}
elif theme == "dark vim":
# {{{ dark vim
link("current breakpoint", "current frame name")
link("focused current breakpoint", "focused current frame name")
palette_dict = {
# {{{ base styles
"background": ("black", "light gray"),
"selectable": ("white", "dark gray"),
"focused selectable": (add_setting("white", "bold"), "light blue"),
"highlighted": ("black", "dark green"),
"hotkey": (add_setting("dark blue", "underline"), "light gray"),
# }}}
# {{{ general ui
"header": ("dark blue", "light gray"),
"dialog title": (add_setting("white", "bold"), "black"),
"warning": (add_setting("light red", "bold"), "black"),
"header warning": (add_setting("light red", "bold"), "light gray"),
# }}}
# {{{ source view
"source": ("white", "black"),
"current source": (add_setting("white", "bold"), "dark gray"),
"line number": (add_setting("dark gray", "bold"), "black"),
"breakpoint marker": (add_setting("light red", "bold"), "black"),
"breakpoint source": (add_setting("white", "bold"), "dark red"),
# }}}
# {{{ sidebar
"sidebar two": ("yellow", "dark gray"),
"focused sidebar two": ("light cyan", "light blue"),
"sidebar three": ("light gray", "dark gray"),
"focused sidebar three": ("yellow", "light blue"),
# }}}
# {{{ stack
"current frame name": (
add_setting("white", "bold"), "dark gray"),
# }}}
# {{{ shell
"command line output": (add_setting("yellow", "bold"), "black"),
# }}}
# {{{ Code syntax
"keyword": ("yellow", "black"),
"literal": ("light magenta", "black"),
"function": (add_setting("light cyan", "bold"), "black"),
"punctuation": ("yellow", "black"),
"comment": ("dark cyan", "black"),
"exception": ("light red", "black"),
"builtin": ("light green", "black"),
"pseudo": ("dark green", "black"),
# }}}
}
# }}}
elif theme == "midnight":
# {{{ midnight
# Based on XCode's midnight theme
# Looks best in a console with green text against black background
link("current breakpoint", "current frame name")
link("focused current breakpoint", "focused current frame name")
palette_dict = {
# {{{ base styles
"background": ("black", "light gray"),
"selectable": ("black", "dark cyan"),
"focused selectable": ("black", "dark green"),
"hotkey": (add_setting("black", "underline, italics"), "light gray"),
"highlighted": ("white", "dark cyan"),
# }}}
# {{{ general ui
"input": ("black", "dark cyan"),
"warning": (add_setting("white", "bold"), "dark red"),
"search box": ("white", "black"),
"dialog title": (add_setting("white", "bold"), "dark cyan"),
"group head": (add_setting("dark blue", "bold"), "light gray"),
"focused sidebar": ("black", "white"),
"button": (add_setting("white", "bold"), "dark blue"),
"focused button": ("light cyan", "black"),
"value": (add_setting("yellow", "bold"), "dark blue"),
"fixed value": ("light gray", "dark blue"),
# }}}
# {{{ source view
"source": ("dark green", "black"),
"highlighted source": ("black", "dark green"),
"current source": ("black", "brown"),
"current focused source": ("black", "yellow"),
"focused source": ("white", "dark blue"),
"breakpoint source": (add_setting("yellow", "bold"), "dark red"),
"current breakpoint source": ("black", "dark red"),
"line number": ("light gray", "black"),
"current line marker": ("dark red", "black"),
"breakpoint marker": ("dark red", "black"),
# }}}
# {{{ sidebar
# }}}
# {{{ variables view
"variables": ("white", "black"),
"var label": ("light blue", "black"),
"var value": ("white", "black"),
"variable separator": ("dark cyan", "light gray"),
"focused var label": ("white", "dark blue"),
"focused var value": ("white", "dark blue"),
"highlighted var label": ("black", "dark green"),
"highlighted var value": ("black", "dark green"),
"focused highlighted var label": ("black", "light green"),
"focused highlighted var value": ("black", "light green"),
"return label": ("white", "dark blue"),
"return value": ("black", "dark cyan"),
"focused return label": ("light gray", "dark blue"),
"focused return value": ("black", "dark blue"),
# }}}
# {{{ stack
"stack": ("white", "black"),
"frame name": ("white", "black"),
"frame class": ("light blue", "black"),
"frame location": ("light cyan", "black"),
"current frame name": (add_setting("white", "bold"), "black"),
"current frame class": (add_setting("light blue", "bold"), "black"),
"current frame location": (add_setting("light cyan", "bold"), "black"),
"focused frame name": ("white", "dark blue"),
"focused frame class": ("white", "dark blue"),
"focused frame location": ("white", "dark blue"),
"focused current frame name": (
add_setting("white", "bold"), "dark blue"),
"focused current frame class": (
add_setting("white", "bold"), "dark blue"),
"focused current frame location": (
add_setting("white", "bold"), "dark blue"),
# }}}
# {{{ breakpoints view
"breakpoint": ("white", "black"),
"disabled breakpoint": ("dark gray", "black"),
"focused breakpoint": ("white", "dark blue"),
"focused disabled breakpoint": ("light gray", "dark blue"),
"current breakpoint": (add_setting("white", "bold"), "black"),
"disabled current breakpoint": (
add_setting("dark gray", "bold"), "black"),
"focused current breakpoint": (
add_setting("white", "bold"), "dark blue"),
"focused disabled current breakpoint": (
add_setting("light gray", "bold"), "dark blue"),
# }}}
# {{{ shell
"command line edit": ("white", "black"),
"command line prompt": (add_setting("white", "bold"), "black"),
"command line output": ("white", "black"),
"command line input": ("white", "black"),
"command line error": (add_setting("light red", "bold"), "black"),
"focused command line output": ("white", "dark blue"),
"focused command line input": (
"white", "dark blue"),
"focused command line error": ("black", "light red"),
"command line clear button": (add_setting("white", "bold"), "black"),
"command line focused button": ("black", "light gray"),
# }}}
# {{{ Code syntax
"keyword": ("dark magenta", "black"),
"pseudo": ("light magenta", "black"),
"function": (add_setting("light blue", "bold"), "black"),
"builtin": ("dark gray", "black"),
"literal": ("dark cyan", "black"),
"string": ("dark red", "black"),
"doublestring": ("dark red", "black"),
"docstring": ("yellow", "black"),
"backtick": ("light green", "black"),
"punctuation": ("white", "black"),
"comment": ("white", "black"),
"exception": ("light green", "black"),
# }}}
}
# }}}
elif theme == "solarized":
# {{{ solarized
palette_dict = {
# {{{ base styles
"background": ("light green", "light gray"),
"selectable": ("light green", "white"),
"focused selectable": ("white", "dark blue"),
"highlighted": ("white", "dark cyan"),
"hotkey": (add_setting("black", "underline"), "light gray"),
# }}}
# {{{ general ui
"dialog title": (add_setting("white", "bold"), "dark cyan"),
"warning": (add_setting("light red", "bold"), "white"),
"header warning": (add_setting("light red", "bold"), "light gray"),
"focused sidebar": ("dark red", "light gray"),
"group head": (add_setting("yellow", "bold"), "light gray"),
# }}}
# {{{ source view
"source": ("yellow", "white"),
"breakpoint source": ("light red", "light gray"),
"current source": ("light gray", "light blue"),
"line number": ("light blue", "white"),
"current line marker": (
add_setting("light blue", "bold"), "white"),
"breakpoint marker": (
add_setting("light red", "bold"), "white"),
# }}}
# {{{ sidebar
"sidebar two": ("dark blue", "white"),
"sidebar three": ("light cyan", "white"),
"focused sidebar three": ("light gray", "dark blue"),
# }}}
# {{{ variables view
"return label": ("white", "yellow"),
"focused return label": ("white", "yellow"),
# }}}
# {{{ stack
"current frame name": (
add_setting("light green", "bold"), "white"),
"focused current frame name": (
add_setting("white", "bold"), "dark blue"),
# }}}
# {{{ shell
"command line output": ("light green", "white"),
# }}}
# {{{ Code syntax
"namespace": ("dark red", "white"),
"exception": ("light red", "white"),
"keyword": ("brown", "white"),
"keyword2": ("dark magenta", "white"),
"function": ("dark green", "white"),
"literal": ("dark cyan", "white"),
"builtin": ("dark blue", "white"),
"comment": ("light cyan", "white"),
"pseudo": ("light cyan", "white"),
# }}}
}
# }}}
elif theme == "agr-256":
# {{{ agr-256
# Give the colors some comprehensible names
black = "h235"
blacker = "h233"
dark_cyan = "h24"
dark_gray = "h241"
dark_green = "h22"
dark_red = "h88"
dark_teal = "h23"
light_blue = "h111"
light_cyan = "h80"
light_gray = "h252"
light_green = "h113"
light_red = "h160"
medium_gray = "h246"
salmon = "h223"
orange = "h173"
white = "h255"
yellow = "h192"
link("focused breakpoint", "focused selectable")
link("current breakpoint", "current frame name")
link("focused current breakpoint", "focused current frame name")
palette_dict = {
# {{{ base styles
"background": (black, light_gray),
"selectable": (white, blacker),
"focused selectable": (yellow, dark_cyan),
"hotkey": (add_setting(black, "underline"), light_gray),
"highlighted": (white, dark_green),
# }}}
# {{{ general ui
"focused sidebar": (dark_cyan, light_gray),
"group head": (add_setting(dark_cyan, "bold"), light_gray),
"dialog title": (add_setting(light_gray, "bold"), black),
"warning": (add_setting(white, "bold"), dark_red),
"fixed value": (add_setting(white, "bold"), dark_gray),
"button": (add_setting(white, "bold"), black),
"focused button": (add_setting(yellow, "bold"), dark_cyan),
# }}}
# {{{ source view
"line number": (dark_gray, black),
"current line marker": (add_setting(yellow, "bold"), black),
"breakpoint marker": (add_setting(light_red, "bold"), black),
"source": (white, black),
"breakpoint source": (add_setting(white, "bold"), dark_red),
"current source": (add_setting(light_gray, "bold"), dark_teal),
# }}}
# {{{ sidebar
"sidebar two": (light_blue, blacker),
"focused sidebar two": (light_gray, dark_cyan),
"sidebar three": (medium_gray, blacker),
"focused sidebar three": (salmon, dark_cyan),
# }}}
# {{{ variables view
"highlighted var label": (light_gray, dark_green),
"return label": (light_green, blacker),
"focused return label": (
add_setting(light_gray, "bold"), dark_cyan),
# }}}
# {{{ stack
"current frame name": (yellow, blacker),
"focused current frame name": (
add_setting(yellow, "bold"), dark_cyan),
# }}}
# {{{ shell
"command line prompt": (add_setting(yellow, "bold"), black),
"command line output": (light_cyan, black),
"command line error": (light_red, black),
# }}}
# {{{ Code syntax
"comment": (medium_gray, black),
"exception": (orange, black),
"function": (yellow, black),
"keyword": (light_blue, black),
"literal": (orange, black),
"operator": (yellow, black),
"pseudo": (medium_gray, black),
"punctuation": (salmon, black),
"string": (light_green, black),
# }}}
}
# }}}
elif theme == "monokai":
# {{{ monokai
link("current breakpoint", "current frame name")
link("focused current breakpoint", "focused current frame name")
palette_dict = {
# {{{ base styles
"background": ("black", "light gray"),
"selectable": ("white", "black"),
"focused selectable": ("white", "dark gray"),
"highlighted": ("black", "dark green"),
"hotkey": (add_setting("black", "underline"), "light gray"),
# }}}
# {{{ general ui
"input": ("white", "black"),
"button": (add_setting("white", "bold"), "black"),
"focused button": (add_setting("white", "bold"), "dark gray"),
"focused sidebar": ("dark blue", "light gray"),
"warning": (add_setting("white", "bold"), "dark red"),
"group head": (add_setting("black", "bold"), "light gray"),
"dialog title": (add_setting("white", "bold"), "black"),
# }}}
# {{{ source view
"current source": ("black", "dark cyan"),
"breakpoint source": (add_setting("white", "bold"), "dark red"),
"line number": ("dark gray", "black"),
"current line marker": (add_setting("dark cyan", "bold"), "black"),
"breakpoint marker": (add_setting("dark red", "bold"), "black"),
# }}}
# {{{ sidebar
"sidebar two": ("light cyan", "black"),
"focused sidebar two": ("light cyan", "dark gray"),
"sidebar three": ("light magenta", "black"),
"focused sidebar three": ("light magenta", "dark gray"),
# }}}
# {{{ variables view
"return label": ("light green", "black"),
"focused return label": ("light green", "dark gray"),
# }}}
# {{{ stack
"current frame name": ("light green", "black"),
"focused current frame name": ("light green", "dark gray"),
# }}}
# {{{ shell
"command line prompt": (add_setting("yellow", "bold"), "black"),
"command line output": ("light cyan", "black"),
"command line error": ("yellow", "black"),
"focused command line output": ("light cyan", "dark gray"),
"focused command line error": (
add_setting("yellow", "bold"), "dark gray"),
# }}}
# {{{ Code syntax
"literal": ("light magenta", "black"),
"builtin": ("light cyan", "black"),
"exception": ("light cyan", "black"),
"keyword2": ("light cyan", "black"),
"function": ("light green", "black"),
"class": (add_setting("light green", "underline"), "black"),
"keyword": ("light red", "black"),
"operator": ("light red", "black"),
"comment": ("dark gray", "black"),
"docstring": ("dark gray", "black"),
"argument": ("brown", "black"),
"pseudo": ("brown", "black"),
"string": ("yellow", "black"),
# }}}
}
# }}}
elif theme == "monokai-256":
# {{{ monokai-256
# Give the colors some comprehensible names
black = "h236"
blacker = "h234"
dark_gray = "h240"
dark_green = "h28"
dark_red = "h124"
dark_teal = "h30"
dark_magenta = "h141"
light_blue = "h111"
light_cyan = "h51"
light_gray = "h252"
light_green = "h155"
light_red = "h160"
light_magenta = "h198"
medium_gray = "h243"
orange = "h208"
white = "h255"
yellow = "h228"
link("current breakpoint", "current frame name")
link("focused current breakpoint", "focused current frame name")
palette_dict = {
# {{{ base styles
"background": (black, light_gray),
"selectable": (white, blacker),
"focused selectable": (white, dark_gray),
"highlighted": (white, dark_green),
"hotkey": (add_setting(black, "underline"), light_gray),
# }}}
# {{{ general ui
"input": (white, black),
"button": (add_setting(white, "bold"), black),
"focused button": (add_setting(white, "bold"), dark_gray),
"focused sidebar": (dark_teal, light_gray),
"warning": (add_setting(white, "bold"), dark_red),
"group head": (add_setting(black, "bold"), light_gray),
"dialog title": (add_setting(white, "bold"), blacker),
# }}}
# {{{ source view
"source": (white, black),
"current source": (add_setting(light_gray, "bold"), dark_teal),
"breakpoint source": (add_setting(white, "bold"), dark_red),
"line number": (dark_gray, black),
"current line marker": (add_setting(light_cyan, "bold"), black),
"breakpoint marker": (add_setting(light_red, "bold"), black),
# }}}
# {{{ sidebar
"sidebar two": (light_cyan, blacker),
"focused sidebar two": (light_cyan, dark_gray),
"sidebar three": (dark_magenta, blacker),
"focused sidebar three": (dark_magenta, dark_gray),
# }}}
# {{{ variables view
"highlighted var label": (light_gray, dark_green),
"return label": (light_green, blacker),
"focused return label": (light_green, dark_gray),
# }}}
# {{{ stack
"current frame name": (light_green, blacker),
"focused current frame name": (light_green, dark_gray),
# }}}
# {{{ shell
"command line prompt": (
add_setting(yellow, "bold"), black),
"command line output": (light_cyan, black),
"command line error": (orange, black),
"focused command line output": (light_cyan, dark_gray),
"focused command line error": (
add_setting(orange, "bold"), dark_gray),
# }}}
# {{{ Code syntax
"literal": (dark_magenta, black),
"builtin": (light_cyan, black),
"exception": (light_cyan, black),
"keyword2": (light_cyan, black),
"function": (light_green, black),
"class": (add_setting(light_green, "underline"), black),
"keyword": (light_magenta, black),
"operator": (light_magenta, black),
"comment": (medium_gray, black),
"docstring": (medium_gray, black),
"argument": (orange, black),
"pseudo": (orange, black),
"string": (yellow, black),
# }}}
}
# }}}
elif theme == "mono":
# {{{ mono
palette_dict = {
"background": ("standout",),
"selectable": (),
"focused selectable": ("underline",),
"highlighted": ("bold",),
"hotkey": ("underline, standout",),
}
# }}}
else:
# {{{ custom
try:
# {{{ base styles
palette_dict = {
"background": ("black", "light gray"),
"hotkey": (add_setting("black", "underline"), "light gray"),
"selectable": ("black", "dark cyan"),
"focused selectable": ("black", "dark green"),
"input": (add_setting("yellow", "bold"), "dark blue"),
"warning": (add_setting("white", "bold"), "dark red"),
"highlighted": ("white", "dark cyan"),
"source": ("white", "dark blue"),
}
# }}}
symbols = {
"palette": palette_dict,
"add_setting": add_setting,
"link": link,
}
from os.path import expanduser, expandvars
fname = expanduser(expandvars(theme))
with open(fname) as inf:
exec(compile(inf.read(), fname, "exec"), symbols)
except FileNotFoundError:
ui_log.error("Unable to locate custom theme file {!r}"
.format(theme))
return None
except Exception:
ui_log.exception("Error when importing theme:")
return None
# }}}
# }}}
# Apply style inheritance
for style_name in set(INHERITANCE_MAP.keys()).union(BASE_STYLES.keys()):
get_style(palette_dict, style_name, inheritance_overrides)
palette_list = [
astuple(entry)
for entry in palette_dict.values()
if isinstance(entry, PaletteEntry)
]
return palette_list
|
34,462 |
def fix_yaml_representer() -> None:
"""Ensure that `OrderedDict`s are dumped so that the order of keys is respected."""
def _order_rep(dumper: yaml.Representer, _data: Dict[Any, Any]) -> Any:
return dumper.represent_mapping(
"tag:yaml.org,2002:map", _data.items(), flow_style=False
)
yaml.add_representer(OrderedDict, _order_rep)
|
def _enable_ordered_dict_yaml_dumping() -> None:
"""Ensure that `OrderedDict`s are dumped so that the order of keys is respected."""
def _order_rep(dumper: yaml.Representer, _data: Dict[Any, Any]) -> Any:
return dumper.represent_mapping(
"tag:yaml.org,2002:map", _data.items(), flow_style=False
)
yaml.add_representer(OrderedDict, _order_rep)
|
41,540 |
def dice_score(im1, im2, empty_score=np.nan):
"""Computes the Dice coefficient between im1 and im2.
Compute a soft Dice coefficient between im1 and im2, ie equals twice the sum of the two masks product, divided by
the sum of each mask sum.
If both images are empty, then it returns empty_score.
Args:
im1 (ndarray): First array.
im2 (ndarray): Second array.
empty_score (float): Returned value if both input array are empty.
Returns:
float: Dice coefficient.
"""
im1 = np.asarray(im1)
im2 = np.asarray(im2)
if im1.shape != im2.shape:
raise ValueError("Shape mismatch: im1 and im2 must have the same shape.")
im_sum = im1.sum() + im2.sum()
if im_sum == 0:
return empty_score
intersection = (im1 * im2).sum()
return (2. * intersection) / im_sum
|
def dice_score(im1, im2, empty_score=np.nan):
"""Computes the Dice coefficient between im1 and im2.
Compute a soft Dice coefficient between im1 and im2, it equals twice the sum of the two masks product, divided by
the sum of each mask sum.
If both images are empty, then it returns empty_score.
Args:
im1 (ndarray): First array.
im2 (ndarray): Second array.
empty_score (float): Returned value if both input array are empty.
Returns:
float: Dice coefficient.
"""
im1 = np.asarray(im1)
im2 = np.asarray(im2)
if im1.shape != im2.shape:
raise ValueError("Shape mismatch: im1 and im2 must have the same shape.")
im_sum = im1.sum() + im2.sum()
if im_sum == 0:
return empty_score
intersection = (im1 * im2).sum()
return (2. * intersection) / im_sum
|
30,401 |
def users_to_entry(title, response, nex_page_token=None):
context = []
for user_data in response:
username = user_data.get('name').get('givenName') if user_data.get('name') \
and 'givenName' in user_data.get('name') else None
display = user_data.get('name').get('fullName') if user_data.get('name') \
and 'fullName' in user_data.get('name') else None
context.append({
'Type': 'Google',
'ID': user_data.get('id'),
'UserName': username,
'Username': username, # adding to fit the new context standard
'DisplayName': display,
'Email': {'Address': user_data.get('primaryEmail')},
'Gmail': {'Address': user_data.get('primaryEmail')},
'Group': user_data.get('kind'),
'Groups': user_data.get('kind'), # adding to fit the new context standard
'CustomerId': user_data.get('customerId'),
'Domain': user_data.get('primaryEmail').split('@')[1],
'VisibleInDirectory': user_data.get('includeInGlobalAddressList'),
})
headers = ['Type', 'ID', 'Username',
'DisplayName', 'Groups', 'CustomerId', 'Domain', 'OrganizationUnit', 'Email', 'VisibleInDirectory',
'nextPageToken']
human_readable = tableToMarkdown(title, context, headers, removeNull=True)
if nex_page_token:
human_readable += "\nTo get further results, rerun the command with this page-token:\n" + nex_page_token
return {
'ContentsFormat': formats['json'],
'Type': entryTypes['note'],
'Contents': response,
'ReadableContentsFormat': formats['markdown'],
'HumanReadable': human_readable,
'EntryContext': {'Account(val.ID && val.Type && val.ID == obj.ID && val.Type == obj.Type)': context}
}
|
def users_to_entry(title, response, nex_page_token=None):
context = []
for user_data in response:
username = user_data.get('name').get('givenName') if user_data.get('name') \
and 'givenName' in user_data.get('name') else None
display = user_data.get('name').get('fullName') if user_data.get('name') \
and 'fullName' in user_data.get('name') else None
context.append({
'Type': 'Google',
'ID': user_data.get('id'),
'UserName': username,
'Username': username, # adding to fit the new context standard
'DisplayName': display,
'Email': {'Address': user_data.get('primaryEmail')},
'Gmail': {'Address': user_data.get('primaryEmail')},
'Group': user_data.get('kind'),
'Groups': user_data.get('kind'), # adding to fit the new context standard
'CustomerId': user_data.get('customerId'),
'Domain': user_data.get('primaryEmail').split('@')[1],
'VisibleInDirectory': user_data.get('includeInGlobalAddressList'),
})
headers = ['Type', 'ID', 'Username',
'DisplayName', 'Groups', 'CustomerId', 'Domain', 'OrganizationUnit', 'Email', 'VisibleInDirectory',
'nextPageToken']
human_readable = tableToMarkdown(title, context, headers, removeNull=True)
if next_page_token:
human_readable += "\nTo get further results, rerun the command with this page-token:\n" + nex_page_token
return {
'ContentsFormat': formats['json'],
'Type': entryTypes['note'],
'Contents': response,
'ReadableContentsFormat': formats['markdown'],
'HumanReadable': human_readable,
'EntryContext': {'Account(val.ID && val.Type && val.ID == obj.ID && val.Type == obj.Type)': context}
}
|
47,380 |
def require_retrieval(test_case):
"""
Decorator marking a test that requires a set of dependencies necessary for pefrorm retrieval with
[`~transformers.RagRetriever`].
These tests are skipped when respective libraries are not installed.
"""
if not (is_tf_available() and is_datasets_available() and is_faiss_available()):
test_case = unittest.skip("test requires tensorflow, datasets and faiss")(test_case)
return test_case
|
def require_retrieval(test_case):
"""
Decorator marking a test that requires a set of dependencies necessary for pefrorm retrieval with
[`RagRetriever`].
These tests are skipped when respective libraries are not installed.
"""
if not (is_tf_available() and is_datasets_available() and is_faiss_available()):
test_case = unittest.skip("test requires tensorflow, datasets and faiss")(test_case)
return test_case
|
2,861 |
def inplace_swap_row(X, m, n):
"""
To swap two rows of a CSC/CSR matrix in-place.
Parameters
----------
X : sparse matrix of shape (n_samples, n_features)
Matrix whose two rows are to be swapped. It should be of CSR or
CSC format.
m : int
Index of the row of X to be swapped.
n : int
Index of the row of X to be swapped.
"""
if isinstance(X, sp.csc_matrix):
inplace_swap_row_csc(X, m, n)
elif isinstance(X, sp.csr_matrix):
inplace_swap_row_csr(X, m, n)
else:
_raise_typeerror(X)
|
def inplace_swap_row(X, m, n):
"""
Swap two rows of a CSC/CSR matrix in-place.
Parameters
----------
X : sparse matrix of shape (n_samples, n_features)
Matrix whose two rows are to be swapped. It should be of CSR or
CSC format.
m : int
Index of the row of X to be swapped.
n : int
Index of the row of X to be swapped.
"""
if isinstance(X, sp.csc_matrix):
inplace_swap_row_csc(X, m, n)
elif isinstance(X, sp.csr_matrix):
inplace_swap_row_csr(X, m, n)
else:
_raise_typeerror(X)
|
58,046 |
def fetch_incidents(client, last_run, first_fetch_time):
"""
This function will execute each interval (default is 1 minute).
Args:
client (Client): HelloWorld client
last_run (dateparser.time): The greatest incident created_time we fetched from last fetch
first_fetch_time (dateparser.time): If last_run is None then fetch all incidents since first_fetch_time
Returns:
next_run: This will be last_run in the next fetch-incidents
incidents: Incidents that will be created in Demisto
"""
# Get the last fetch time, if exists
last_fetch = last_run.get('last_fetch')
# Handle first time fetch
if last_fetch is None:
last_fetch = dateparser.parse(first_fetch_time)
else:
last_fetch = dateparser.parse(last_fetch)
latest_created_time = dateparser.parse(last_fetch.strftime('%Y-%m-%d %H:%M:%S'))
search_date = last_fetch.strftime('%Y-%m-%d %H:%M:%S')
incidents = []
demisto.info(f'Fetching GLPI tickets since: {str(search_date)}')
items = client.list_incidents(search_date)
for item in items:
ticket_id = item.get('2')
ticket = client.get_ticket(ticket_id)
requester, assigned, watcher = get_ticket_users_helper(client, ticket_id)
ticket['requester_users'] = requester
ticket['assigned_users'] = assigned
ticket['watcher_users'] = watcher
grequester, gassigned, gwatcher = get_ticket_groups_helper(client, ticket_id)
ticket['requester_groups'] = grequester
ticket['assigned_groups'] = gassigned
ticket['watcher_groups'] = gwatcher
htmldetails = unescape(ticket['content'])
ticket['content'] = htmldetails
files = []
files_entries = get_ticket_docs_helper(client, ticket_id)
for file in files_entries:
files.append({
'path': file.get('FileID', ''),
'name': file.get('File', '')
})
incident_created_time = dateparser.parse(ticket['date'])
ticket['mirror_direction'] = MIRROR_DIRECTION.get(demisto.params().get('mirror_direction'))
ticket['mirror_instance'] = demisto.integrationInstance()
ticket['mirror_tags'] = [
demisto.params().get('comment_tag'),
demisto.params().get('file_tag'),
demisto.params().get('work_notes_tag')
]
demisto.debug('incident occured : ' + str(incident_created_time.strftime(DATE_FORMAT))) # type: ignore[union-attr]
incident = {
'name': ticket['name'],
'occurred': incident_created_time.strftime(DATE_FORMAT), # type: ignore[union-attr]
'attachment': files,
'rawJSON': json.dumps(ticket)
}
incidents.append(incident)
# Update last run and add incident if the incident is newer than last fetch
if incident_created_time > latest_created_time: # type: ignore[operator]
latest_created_time = incident_created_time
next_run = {'last_fetch': latest_created_time.strftime(DATE_FORMAT)} # type: ignore[union-attr]
return next_run, incidents
|
def fetch_incidents(client, last_run, first_fetch_time):
"""
This function will execute each interval (default is 1 minute).
Args:
client (Client): HelloWorld client
last_run (dateparser.time): The greatest incident created_time we fetched from last fetch
first_fetch_time (dateparser.time): If last_run is None then fetch all incidents since first_fetch_time
Returns:
next_run: This will be last_run in the next fetch-incidents
incidents: Incidents that will be created in Demisto
"""
# Get the last fetch time, if exists
last_fetch = last_run.get('last_fetch')
# Handle first time fetch
if last_fetch is None:
last_fetch = dateparser.parse(first_fetch_time)
else:
last_fetch = dateparser.parse(last_fetch)
latest_created_time = dateparser.parse(last_fetch.strftime('%Y-%m-%d %H:%M:%S'))
search_date = last_fetch.strftime('%Y-%m-%d %H:%M:%S')
incidents = []
demisto.info(f'Fetching GLPI tickets since: {str(search_date)}')
items = client.list_incidents(search_date)
for item in items:
ticket_id = item.get('2')
ticket = client.get_ticket(ticket_id)
ticket['requester_users'], ticket['assigned_users'], ticket['watcher_users'] = get_ticket_users_helper(client, ticket_id)
ticket['requester_groups'], ticket['assigned_groups'], ticket['watcher_groups'] = get_ticket_groups_helper(client, ticket_id)
ticket['content'] = unescape(ticket['content'])
files = []
files_entries = get_ticket_docs_helper(client, ticket_id)
for file in files_entries:
files.append({
'path': file.get('FileID', ''),
'name': file.get('File', '')
})
incident_created_time = dateparser.parse(ticket['date'])
ticket['mirror_direction'] = MIRROR_DIRECTION.get(demisto.params().get('mirror_direction'))
ticket['mirror_instance'] = demisto.integrationInstance()
ticket['mirror_tags'] = [
demisto.params().get('comment_tag'),
demisto.params().get('file_tag'),
demisto.params().get('work_notes_tag')
]
demisto.debug('incident occured : ' + str(incident_created_time.strftime(DATE_FORMAT))) # type: ignore[union-attr]
incident = {
'name': ticket['name'],
'occurred': incident_created_time.strftime(DATE_FORMAT), # type: ignore[union-attr]
'attachment': files,
'rawJSON': json.dumps(ticket)
}
incidents.append(incident)
# Update last run and add incident if the incident is newer than last fetch
if incident_created_time > latest_created_time: # type: ignore[operator]
latest_created_time = incident_created_time
next_run = {'last_fetch': latest_created_time.strftime(DATE_FORMAT)} # type: ignore[union-attr]
return next_run, incidents
|
41,950 |
def get_extras_require() -> Dict[str, List[str]]:
requirements = {
# TODO(HideakiImamura) Unpin mypy version after fixing "Duplicate modules" error in
# examples and tutorials.
"checking": ["black", "hacking", "isort", "mypy==0.790", "blackdoc"],
"codecov": ["codecov", "pytest-cov"],
"doctest": [
"cma",
"matplotlib>=3.0.0",
"pandas",
"plotly>=4.0.0",
"scikit-learn>=0.19.0,<0.23.0",
"scikit-optimize",
"mlflow",
],
"document": [
"sphinx",
"sphinx_rtd_theme",
"sphinx-copybutton",
"sphinx-gallery",
"sphinx-plotly-directive",
"pillow",
"matplotlib",
"scikit-learn",
"plotly>=4.0.0", # optuna/visualization.
"pandas",
"lightgbm",
"torch==1.7.1",
"torchvision==0.8.2",
"torchaudio==0.7.2",
"thop",
],
"example": [
"catboost",
"chainer",
"lightgbm",
"mlflow",
"mpi4py",
"mxnet",
"nbval",
"scikit-image",
"scikit-learn>=0.19.0,<0.23.0", # optuna/visualization/param_importances.py.
"xgboost",
"keras",
"tensorflow>=2.0.0",
"tensorflow-datasets",
"pytorch-ignite",
"pytorch-lightning>=1.0.2",
"thop",
"skorch",
"stable-baselines3>=0.7.0",
"catalyst",
"torch==1.7.1 ; sys_platform=='darwin'",
"torch==1.7.1+cpu ; sys_platform!='darwin'",
"torchvision==0.8.2 ; sys_platform=='darwin'",
"torchvision==0.8.2+cpu ; sys_platform!='darwin'",
"torchaudio==0.7.2",
"allennlp",
"dask[dataframe]",
"dask-ml",
# TODO(crcrpar): Support botorch v0.4.0.
# See: https://github.com/optuna/optuna/issues/2381
"botorch<0.4.0 ; python_version>'3.6'",
"fastai",
"optax",
"dm-haiku",
"hydra-optuna-sweeper",
],
"experimental": ["redis"],
"testing": [
# TODO(toshihikoyanase): Remove the version constraint after resolving the issue
# https://github.com/optuna/optuna/issues/1000.
"bokeh<2.0.0",
"chainer>=5.0.0",
"cma",
"fakeredis",
"lightgbm",
"matplotlib>=3.0.0",
"mlflow",
"mpi4py",
"mxnet",
"pandas",
"plotly>=4.0.0",
"pytest",
"scikit-learn>=0.19.0,<0.23.0",
"scikit-optimize",
"xgboost",
"keras",
"tensorflow",
"tensorflow-datasets",
"pytorch-ignite",
"pytorch-lightning>=1.0.2",
"skorch",
"catalyst",
"torch==1.7.1 ; sys_platform=='darwin'",
"torch==1.7.1+cpu ; sys_platform!='darwin'",
"torchvision==0.8.2 ; sys_platform=='darwin'",
"torchvision==0.8.2+cpu ; sys_platform!='darwin'",
"torchaudio==0.7.2",
"allennlp",
# TODO(crcrpar): Support botorch v0.4.0.
# See: https://github.com/optuna/optuna/issues/2381
"botorch<0.4.0 ; python_version>'3.6'",
"fastai",
],
"tests": ["fakeredis", "pytest"],
"optional": [
"bokeh<2.0.0", # optuna/cli.py, optuna/dashboard.py.
"matplotlib>=3.0.0", # optuna/visualization/matplotlib
"pandas", # optuna/study.py
"plotly>=4.0.0", # optuna/visualization.
"redis", # optuna/storages/redis.py.
"scikit-learn>=0.19.0,<0.23.0", # optuna/visualization/param_importances.py.
],
"integration": [
# TODO(toshihikoyanase): Remove the version constraint after resolving the issue
# https://github.com/optuna/optuna/issues/1000.
"chainer>=5.0.0",
"cma",
"lightgbm",
"mlflow",
"mpi4py",
"mxnet",
"pandas",
"scikit-learn>=0.19.0,<0.23.0",
"scikit-optimize",
"xgboost",
"keras",
"tensorflow",
"tensorflow-datasets",
"pytorch-ignite",
"pytorch-lightning>=1.0.2",
"skorch",
"catalyst",
"torch==1.7.1 ; sys_platform=='darwin'",
"torch==1.7.1+cpu ; sys_platform!='darwin'",
"torchvision==0.8.2 ; sys_platform=='darwin'",
"torchvision==0.8.2+cpu ; sys_platform!='darwin'",
"torchaudio==0.7.2",
"allennlp",
# TODO(crcrpar): Support botorch v0.4.0.
# See: https://github.com/optuna/optuna/issues/2381
"botorch<0.4.0 ; python_version>'3.6'",
"fastai",
],
}
return requirements
|
def get_extras_require() -> Dict[str, List[str]]:
requirements = {
# TODO(HideakiImamura) Unpin mypy version after fixing "Duplicate modules" error in
# examples and tutorials.
"checking": ["black", "hacking", "isort", "mypy==0.790", "blackdoc"],
"codecov": ["codecov", "pytest-cov"],
"doctest": [
"cma",
"matplotlib>=3.0.0",
"pandas",
"plotly>=4.0.0",
"scikit-learn>=0.19.0,<0.23.0",
"scikit-optimize",
"mlflow",
],
"document": [
"sphinx",
"sphinx_rtd_theme",
"sphinx-copybutton",
"sphinx-gallery",
"sphinx-plotly-directive",
"pillow",
"matplotlib",
"scikit-learn",
"plotly>=4.0.0", # optuna/visualization.
"pandas",
"lightgbm",
"torch==1.7.1",
"torchvision==0.8.2",
"torchaudio==0.7.2",
"thop",
],
"example": [
"catboost",
"chainer",
"lightgbm",
"mlflow",
"mpi4py",
"mxnet",
"nbval",
"scikit-image",
"scikit-learn>=0.19.0,<0.23.0", # optuna/visualization/param_importances.py.
"xgboost",
"keras",
"tensorflow>=2.0.0",
"tensorflow-datasets",
"pytorch-ignite",
"pytorch-lightning>=1.0.2",
"thop",
"skorch",
"stable-baselines3>=0.7.0",
"catalyst",
"torch==1.7.1 ; sys_platform=='darwin'",
"torch==1.7.1+cpu ; sys_platform!='darwin'",
"torchvision==0.8.2 ; sys_platform=='darwin'",
"torchvision==0.8.2+cpu ; sys_platform!='darwin'",
"torchaudio==0.7.2",
"allennlp>=1.0.0",
"dask[dataframe]",
"dask-ml",
# TODO(crcrpar): Support botorch v0.4.0.
# See: https://github.com/optuna/optuna/issues/2381
"botorch<0.4.0 ; python_version>'3.6'",
"fastai",
"optax",
"dm-haiku",
"hydra-optuna-sweeper",
],
"experimental": ["redis"],
"testing": [
# TODO(toshihikoyanase): Remove the version constraint after resolving the issue
# https://github.com/optuna/optuna/issues/1000.
"bokeh<2.0.0",
"chainer>=5.0.0",
"cma",
"fakeredis",
"lightgbm",
"matplotlib>=3.0.0",
"mlflow",
"mpi4py",
"mxnet",
"pandas",
"plotly>=4.0.0",
"pytest",
"scikit-learn>=0.19.0,<0.23.0",
"scikit-optimize",
"xgboost",
"keras",
"tensorflow",
"tensorflow-datasets",
"pytorch-ignite",
"pytorch-lightning>=1.0.2",
"skorch",
"catalyst",
"torch==1.7.1 ; sys_platform=='darwin'",
"torch==1.7.1+cpu ; sys_platform!='darwin'",
"torchvision==0.8.2 ; sys_platform=='darwin'",
"torchvision==0.8.2+cpu ; sys_platform!='darwin'",
"torchaudio==0.7.2",
"allennlp",
# TODO(crcrpar): Support botorch v0.4.0.
# See: https://github.com/optuna/optuna/issues/2381
"botorch<0.4.0 ; python_version>'3.6'",
"fastai",
],
"tests": ["fakeredis", "pytest"],
"optional": [
"bokeh<2.0.0", # optuna/cli.py, optuna/dashboard.py.
"matplotlib>=3.0.0", # optuna/visualization/matplotlib
"pandas", # optuna/study.py
"plotly>=4.0.0", # optuna/visualization.
"redis", # optuna/storages/redis.py.
"scikit-learn>=0.19.0,<0.23.0", # optuna/visualization/param_importances.py.
],
"integration": [
# TODO(toshihikoyanase): Remove the version constraint after resolving the issue
# https://github.com/optuna/optuna/issues/1000.
"chainer>=5.0.0",
"cma",
"lightgbm",
"mlflow",
"mpi4py",
"mxnet",
"pandas",
"scikit-learn>=0.19.0,<0.23.0",
"scikit-optimize",
"xgboost",
"keras",
"tensorflow",
"tensorflow-datasets",
"pytorch-ignite",
"pytorch-lightning>=1.0.2",
"skorch",
"catalyst",
"torch==1.7.1 ; sys_platform=='darwin'",
"torch==1.7.1+cpu ; sys_platform!='darwin'",
"torchvision==0.8.2 ; sys_platform=='darwin'",
"torchvision==0.8.2+cpu ; sys_platform!='darwin'",
"torchaudio==0.7.2",
"allennlp",
# TODO(crcrpar): Support botorch v0.4.0.
# See: https://github.com/optuna/optuna/issues/2381
"botorch<0.4.0 ; python_version>'3.6'",
"fastai",
],
}
return requirements
|
32,040 |
def create_list(args: dict, sg):
listName = args.get('list_name')
data = {"name": listName}
response = sg.client.marketing.lists.post(request_body=data)
if response.status_code == 201:
rBody = response.body
body = json.loads(rBody.decode("utf-8"))
ec = {'Sendgrid.NewList': body}
md = tableToMarkdown('New List has been created successfully ', body)
return {
'ContentsFormat': formats['json'],
'Type': entryTypes['note'],
'Contents': body,
'HumanReadable': md,
'EntryContext': ec
}
else:
return 'New List creation has been failed: ' + str(response.body)
|
def create_list(args: dict, sg):
listName = args.get('list_name')
data = {"name": listName}
response = sg.client.marketing.lists.post(request_body=data)
if response.status_code == 201:
rBody = response.body
body = json.loads(rBody.decode("utf-8"))
ec = {'Sendgrid.NewList': body}
md = tableToMarkdown('New List has been successfully created ', body)
return {
'ContentsFormat': formats['json'],
'Type': entryTypes['note'],
'Contents': body,
'HumanReadable': md,
'EntryContext': ec
}
else:
return 'New List creation has been failed: ' + str(response.body)
|
24,906 |
def run_pylint(argv: Optional[Sequence[str]] = None):
"""Run pylint
Arguments can be a list of strings normally supplied as arguments on the command line
"""
from pylint.lint import Run as PylintRun
try:
PylintRun(argv or sys.argv[1:])
except KeyboardInterrupt:
sys.exit(1)
|
def run_pylint(argv: Optional[Sequence[str]] = None):
"""Run pylint
argv can be a list of strings normally supplied as arguments on the command line
"""
from pylint.lint import Run as PylintRun
try:
PylintRun(argv or sys.argv[1:])
except KeyboardInterrupt:
sys.exit(1)
|
40,974 |
def _get_translation(object, LicenceTranslation):
translations = LicenceTranslation.objects.filter(master_id=object.pk)
try:
# Try default translation
return translations.get(language_code=settings.LANGUAGE_CODE)
except ObjectDoesNotExist:
try:
# Try default language
return translations.get(language_code=settings.PARLER_DEFAULT_LANGUAGE_CODE)
except ObjectDoesNotExist:
# Maybe the object was translated only in a specific language?
# Hope there is a single translation
return translations.get()
|
def _get_translation(object, LicenceTranslation):
translations = LicenceTranslation.objects.filter(master_id=object.pk)
try:
# Try default translation
return translations.get(language_code=settings.LANGUAGE_CODE)
except ObjectDoesNotExist:
try:
# Try default language
return translations.get(language_code=settings.PARLER_DEFAULT_LANGUAGE_CODE)
except ObjectDoesNotExist:
# Maybe the object was translated only in a specific language?
# Hope there is a single translation
return translations.first()
|
23,653 |
def get_sky_diffuse(surface_tilt, surface_azimuth,
solar_zenith, solar_azimuth,
dni, ghi, dhi, dni_extra=None, airmass=None,
model='isotropic',
model_perez='allsitescomposite1990'):
r"""
Determine in-plane sky diffuse irradiance component
using the specified sky diffuse irradiance model.
Sky diffuse models include:
* isotropic (default)
* klucher
* haydavies
* reindl
* king
* perez
Parameters
----------
surface_tilt : numeric
Panel tilt from horizontal.[degree]
surface_azimuth : numeric
Panel azimuth from north. [degree]
solar_zenith : numeric
Solar zenith angle. [degree]
solar_azimuth : numeric
Solar azimuth angle. [degree]
dni : numeric
Direct Normal Irradiance. [W/m2]
ghi : numeric
Global horizontal irradiance. [W/m2]
dhi : numeric
Diffuse horizontal irradiance. [W/m2]
dni_extra : None or numeric, default None
Extraterrestrial direct normal irradiance. [W/m2]
airmass : None or numeric, default None
Relative airmass (not adjusted for pressure). [unitless]
model : String, default 'isotropic'
Irradiance model. Can be one of 'isotropic', 'klucher', 'haydavies',
'reindl', 'king', 'perez'.
model_perez : String, default 'allsitescomposite1990'
Used only if model='perez'. See :py:func:`~pvlib.irradiance.perez`.
Returns
-------
poa_sky_diffuse : numeric
Sky diffuse irradiance in the plane of array. [W/m2]
Raises
------
ValueError
If model is one of 'haydavies', 'reindl', or 'perez' and dni_extra
is None.
Notes
-----
Models 'haydavies', 'reindl', or 'perez' require 'dni_extra'. Values can
be calculated using :py:func:`~pvlib.irradiance.get_extra_radiation`.
The 'perez' model requires relative airmass ('airmass') as input. If
'airmass' is not provided, it is calculated usign the defaults in
:py:func:`~pvlib.irradiance.get_relative_airmass`.
"""
model = model.lower()
if (model in {'haydavies', 'reindl', 'perez'}) and (dni_extra is None):
raise ValueError(f'dni_extra is required for model {model}')
if model == 'isotropic':
sky = isotropic(surface_tilt, dhi)
elif model == 'klucher':
sky = klucher(surface_tilt, surface_azimuth, dhi, ghi,
solar_zenith, solar_azimuth)
elif model == 'haydavies':
sky = haydavies(surface_tilt, surface_azimuth, dhi, dni, dni_extra,
solar_zenith, solar_azimuth)
elif model == 'reindl':
sky = reindl(surface_tilt, surface_azimuth, dhi, dni, ghi, dni_extra,
solar_zenith, solar_azimuth)
elif model == 'king':
sky = king(surface_tilt, dhi, ghi, solar_zenith)
elif model == 'perez':
if airmass is None:
airmass = atmosphere.get_relative_airmass(solar_zenith)
sky = perez(surface_tilt, surface_azimuth, dhi, dni, dni_extra,
solar_zenith, solar_azimuth, airmass,
model=model_perez)
else:
raise ValueError(f'invalid model selection {model}')
return sky
|
def get_sky_diffuse(surface_tilt, surface_azimuth,
solar_zenith, solar_azimuth,
dni, ghi, dhi, dni_extra=None, airmass=None,
model='isotropic',
model_perez='allsitescomposite1990'):
r"""
Determine in-plane sky diffuse irradiance component
using the specified sky diffuse irradiance model.
Sky diffuse models include:
* isotropic (default)
* klucher
* haydavies
* reindl
* king
* perez
Parameters
----------
surface_tilt : numeric
Panel tilt from horizontal. [degree]
surface_azimuth : numeric
Panel azimuth from north. [degree]
solar_zenith : numeric
Solar zenith angle. [degree]
solar_azimuth : numeric
Solar azimuth angle. [degree]
dni : numeric
Direct Normal Irradiance. [W/m2]
ghi : numeric
Global horizontal irradiance. [W/m2]
dhi : numeric
Diffuse horizontal irradiance. [W/m2]
dni_extra : None or numeric, default None
Extraterrestrial direct normal irradiance. [W/m2]
airmass : None or numeric, default None
Relative airmass (not adjusted for pressure). [unitless]
model : String, default 'isotropic'
Irradiance model. Can be one of 'isotropic', 'klucher', 'haydavies',
'reindl', 'king', 'perez'.
model_perez : String, default 'allsitescomposite1990'
Used only if model='perez'. See :py:func:`~pvlib.irradiance.perez`.
Returns
-------
poa_sky_diffuse : numeric
Sky diffuse irradiance in the plane of array. [W/m2]
Raises
------
ValueError
If model is one of 'haydavies', 'reindl', or 'perez' and dni_extra
is None.
Notes
-----
Models 'haydavies', 'reindl', or 'perez' require 'dni_extra'. Values can
be calculated using :py:func:`~pvlib.irradiance.get_extra_radiation`.
The 'perez' model requires relative airmass ('airmass') as input. If
'airmass' is not provided, it is calculated usign the defaults in
:py:func:`~pvlib.irradiance.get_relative_airmass`.
"""
model = model.lower()
if (model in {'haydavies', 'reindl', 'perez'}) and (dni_extra is None):
raise ValueError(f'dni_extra is required for model {model}')
if model == 'isotropic':
sky = isotropic(surface_tilt, dhi)
elif model == 'klucher':
sky = klucher(surface_tilt, surface_azimuth, dhi, ghi,
solar_zenith, solar_azimuth)
elif model == 'haydavies':
sky = haydavies(surface_tilt, surface_azimuth, dhi, dni, dni_extra,
solar_zenith, solar_azimuth)
elif model == 'reindl':
sky = reindl(surface_tilt, surface_azimuth, dhi, dni, ghi, dni_extra,
solar_zenith, solar_azimuth)
elif model == 'king':
sky = king(surface_tilt, dhi, ghi, solar_zenith)
elif model == 'perez':
if airmass is None:
airmass = atmosphere.get_relative_airmass(solar_zenith)
sky = perez(surface_tilt, surface_azimuth, dhi, dni, dni_extra,
solar_zenith, solar_azimuth, airmass,
model=model_perez)
else:
raise ValueError(f'invalid model selection {model}')
return sky
|
2,630 |
def fetch_california_housing(
*, data_home=None, download_if_missing=True, return_X_y=False, as_frame=False
):
"""Load the California housing dataset (regression).
============== ==============
Samples total 20640
Dimensionality 8
Features real
Target real 0.15 - 5.
============== ==============
Read more in the :ref:`User Guide <california_housing_dataset>`.
Parameters
----------
data_home : str, default=None
Specify another download and cache folder for the datasets. By default
all scikit-learn data is stored in '~/scikit_learn_data' subfolders.
download_if_missing : bool, default=True
If False, raise a IOError if the data is not locally available
instead of trying to download the data from the source site.
return_X_y : bool, default=False
If True, returns ``(data.data, data.target)`` instead of a Bunch
object.
.. versionadded:: 0.20
as_frame : bool, default=False
If True, the data is a pandas DataFrame including columns with
appropriate dtypes (numeric, string or categorical). The target is
a pandas DataFrame or Series depending on the number of target_columns.
.. versionadded:: 0.23
Returns
-------
dataset : Class:`~sklearn.utils.Bunch`
Dictionary-like object, with the following attributes.
data : ndarray, shape (20640, 8)
Each row corresponding to the 8 feature values in order.
If ``as_frame`` is True, ``data`` is a pandas object.
target : numpy array of shape (20640,)
Each value corresponds to the average
house value in units of 100,000.
If ``as_frame`` is True, ``target`` is a pandas object.
feature_names : list of length 8
Array of ordered feature names used in the dataset.
DESCR : str
Description of the California housing dataset.
frame : pandas DataFrame
Only present when `as_frame=True`. DataFrame with ``data`` and
``target``.
.. versionadded:: 0.23
(data, target) : tuple if ``return_X_y`` is True
A tuple of two ndarray. The first containing a 2D array of
shape (n_samples, n_features) with each row representing one
sample and each column representing the features. The second
ndarray of shape (n_samples,) containing the target samples.
.. versionadded:: 0.20
Notes
-----
This dataset consists of 20,640 samples and 9 features.
"""
data_home = get_data_home(data_home=data_home)
if not exists(data_home):
makedirs(data_home)
filepath = _pkl_filepath(data_home, "cal_housing.pkz")
if not exists(filepath):
if not download_if_missing:
raise IOError("Data not found and `download_if_missing` is False")
logger.info(
"Downloading Cal. housing from {} to {}".format(ARCHIVE.url, data_home)
)
archive_path = _fetch_remote(ARCHIVE, dirname=data_home)
with tarfile.open(mode="r:gz", name=archive_path) as f:
cal_housing = np.loadtxt(
f.extractfile("CaliforniaHousing/cal_housing.data"), delimiter=","
)
# Columns are not in the same order compared to the previous
# URL resource on lib.stat.cmu.edu
columns_index = [8, 7, 2, 3, 4, 5, 6, 1, 0]
cal_housing = cal_housing[:, columns_index]
joblib.dump(cal_housing, filepath, compress=6)
remove(archive_path)
else:
cal_housing = joblib.load(filepath)
feature_names = [
"MedInc",
"HouseAge",
"AveRooms",
"AveBedrms",
"Population",
"AveOccup",
"Latitude",
"Longitude",
]
target, data = cal_housing[:, 0], cal_housing[:, 1:]
# avg rooms = total rooms / households
data[:, 2] /= data[:, 5]
# avg bed rooms = total bed rooms / households
data[:, 3] /= data[:, 5]
# avg occupancy = population / households
data[:, 5] = data[:, 4] / data[:, 5]
# target in units of 100,000
target = target / 100000.0
descr = load_descr("california_housing.rst")
X = data
y = target
frame = None
target_names = [
"MedHouseVal",
]
if as_frame:
frame, X, y = _convert_data_dataframe(
"fetch_california_housing", data, target, feature_names, target_names
)
if return_X_y:
return X, y
return Bunch(
data=X,
target=y,
frame=frame,
target_names=target_names,
feature_names=feature_names,
DESCR=descr,
)
|
def fetch_california_housing(
*, data_home=None, download_if_missing=True, return_X_y=False, as_frame=False
):
"""Load the California housing dataset (regression).
============== ==============
Samples total 20640
Dimensionality 8
Features real
Target real 0.15 - 5.
============== ==============
Read more in the :ref:`User Guide <california_housing_dataset>`.
Parameters
----------
data_home : str, default=None
Specify another download and cache folder for the datasets. By default
all scikit-learn data is stored in '~/scikit_learn_data' subfolders.
download_if_missing : bool, default=True
If False, raise a IOError if the data is not locally available
instead of trying to download the data from the source site.
return_X_y : bool, default=False
If True, returns ``(data.data, data.target)`` instead of a Bunch
object.
.. versionadded:: 0.20
as_frame : bool, default=False
If True, the data is a pandas DataFrame including columns with
appropriate dtypes (numeric, string or categorical). The target is
a pandas DataFrame or Series depending on the number of target_columns.
.. versionadded:: 0.23
Returns
-------
dataset : :class:`~sklearn.utils.Bunch`
Dictionary-like object, with the following attributes.
data : ndarray, shape (20640, 8)
Each row corresponding to the 8 feature values in order.
If ``as_frame`` is True, ``data`` is a pandas object.
target : numpy array of shape (20640,)
Each value corresponds to the average
house value in units of 100,000.
If ``as_frame`` is True, ``target`` is a pandas object.
feature_names : list of length 8
Array of ordered feature names used in the dataset.
DESCR : str
Description of the California housing dataset.
frame : pandas DataFrame
Only present when `as_frame=True`. DataFrame with ``data`` and
``target``.
.. versionadded:: 0.23
(data, target) : tuple if ``return_X_y`` is True
A tuple of two ndarray. The first containing a 2D array of
shape (n_samples, n_features) with each row representing one
sample and each column representing the features. The second
ndarray of shape (n_samples,) containing the target samples.
.. versionadded:: 0.20
Notes
-----
This dataset consists of 20,640 samples and 9 features.
"""
data_home = get_data_home(data_home=data_home)
if not exists(data_home):
makedirs(data_home)
filepath = _pkl_filepath(data_home, "cal_housing.pkz")
if not exists(filepath):
if not download_if_missing:
raise IOError("Data not found and `download_if_missing` is False")
logger.info(
"Downloading Cal. housing from {} to {}".format(ARCHIVE.url, data_home)
)
archive_path = _fetch_remote(ARCHIVE, dirname=data_home)
with tarfile.open(mode="r:gz", name=archive_path) as f:
cal_housing = np.loadtxt(
f.extractfile("CaliforniaHousing/cal_housing.data"), delimiter=","
)
# Columns are not in the same order compared to the previous
# URL resource on lib.stat.cmu.edu
columns_index = [8, 7, 2, 3, 4, 5, 6, 1, 0]
cal_housing = cal_housing[:, columns_index]
joblib.dump(cal_housing, filepath, compress=6)
remove(archive_path)
else:
cal_housing = joblib.load(filepath)
feature_names = [
"MedInc",
"HouseAge",
"AveRooms",
"AveBedrms",
"Population",
"AveOccup",
"Latitude",
"Longitude",
]
target, data = cal_housing[:, 0], cal_housing[:, 1:]
# avg rooms = total rooms / households
data[:, 2] /= data[:, 5]
# avg bed rooms = total bed rooms / households
data[:, 3] /= data[:, 5]
# avg occupancy = population / households
data[:, 5] = data[:, 4] / data[:, 5]
# target in units of 100,000
target = target / 100000.0
descr = load_descr("california_housing.rst")
X = data
y = target
frame = None
target_names = [
"MedHouseVal",
]
if as_frame:
frame, X, y = _convert_data_dataframe(
"fetch_california_housing", data, target, feature_names, target_names
)
if return_X_y:
return X, y
return Bunch(
data=X,
target=y,
frame=frame,
target_names=target_names,
feature_names=feature_names,
DESCR=descr,
)
|
42,965 |
def energies(samples: np.ndarray, wp: np.ndarray) -> np.ndarray:
r"""Computes the energy of GBS samples in :math:`\text{cm}^{-1}` unit.
**Example usage:**
>>> samples = np.array([[1, 1, 0], [1, 0, 2]])
>>> wp = np.array([700.0, 600.0, 500.0])
>>> energies(samples, wp)
[1300.0, 1700.0]
Args:
samples (array): GBS samples
wp (array): normal mode frequencies in :math:`\text{cm}^{-1}`
Returns:
E (list): list of GBS sample energies in :math:`\text{cm}^{-1}`
"""
E = []
for sample in samples:
e = sum(sample * wp)
E.append(e)
return E
|
def energies(samples: np.ndarray, wp: np.ndarray) -> np.ndarray:
r"""Computes the energy of GBS samples in :math:`\text{cm}^{-1}` unit.
**Example usage:**
>>> samples = np.array([[1, 1, 0], [1, 0, 2]])
>>> wp = np.array([700.0, 600.0, 500.0])
>>> energies(samples, wp)
[1300.0, 1700.0]
Args:
samples (array): GBS samples
wp (array): normal mode frequencies in :math:`\text{cm}^{-1}`
Returns:
E (list[int]): list of GBS sample energies in units of :math:`\text{cm}^{-1}`
"""
E = []
for sample in samples:
e = sum(sample * wp)
E.append(e)
return E
|
31,683 |
def add_notes(incident_id, comment):
body = {'text': {'format': 'text', 'content': comment}}
CLIENT.post('/incidents/' + str(incident_id) + '/comments', body) # type: ignore
return 'The note was added successfully.'
|
def add_notes(incident_id, comment):
body = {'text': {'format': 'text', 'content': comment}}
CLIENT.post('/incidents/' + str(incident_id) + '/comments', body) # type: ignore
return f'The note was added successfully to incident {incident_id}'
|
31,272 |
def iterate_indicator_entry(indicator, entry):
indicator_type = entry["indicator_type"]
indicator_type = indicator_types.get(indicator_type, indicator_type)
sources = entry.get('sourceBrands', [])
sources = sources if sources else ['']
for source in sources:
dbot_score = Common.DBotScore(indicator=indicator, indicator_type=indicator_type,
integration_name=source, score=entry["score"]).to_context()
dbot_score = dbot_score.get(DbotScoreKey, dbot_score)
yield CommandResults(readable_output=tableToMarkdown('Indicator DBot Score', dbot_score)), dbot_score
|
def iterate_indicator_entry(indicator, entry):
indicator_type = entry["indicator_type"]
indicator_type = indicator_types.get(indicator_type, indicator_type)
sources = entry.get('sourceBrands', [])
sources = sources if sources else ['']
for source in sources:
dbot_score = Common.DBotScore(indicator=indicator, indicator_type=indicator_type,
integration_name=source, score=entry["score"]).to_context()
dbot_score = dbot_score.get(DbotScoreKey, dbot_score)
yield dbot_score, CommandResults(readable_output=tableToMarkdown('Indicator DBot Score', dbot_score))
|
6,072 |
def getQueuesResolved(siteDict):
"""
Get the list of queue descriptions merging site/ce/queue parameters and adding some
derived parameters.
:param dict siteDict: dictionary with configuration data as returned by Resources.getQueues() method
:return: S_OK/S_ERROR, Value dictionary per queue with configuration data updated, e.g. for SiteDirector
"""
queueDict = {}
for site in siteDict:
for ce in siteDict[site]:
ceDict = siteDict[site][ce]
qDict = ceDict.pop('Queues')
for queue in qDict:
queueName = '%s_%s' % (ce, queue)
queueDict[queueName] = qDict[queue]
queueDict[queueName] = qDict[queue]
queueDict[queueName]['Queue'] = queue
queueDict[queueName]['Site'] = site
# Evaluate the CPU limit of the queue according to the Glue convention
# To Do: should be a utility
if "maxCPUTime" in queueDict[queueName] and \
"SI00" in queueDict[queueName]:
maxCPUTime = float(queueDict[queueName]['maxCPUTime'])
# For some sites there are crazy values in the CS
maxCPUTime = max(maxCPUTime, 0)
maxCPUTime = min(maxCPUTime, 86400 * 12.5)
si00 = float(queueDict[queueName]['SI00'])
queueCPUTime = 60. / 250. * maxCPUTime * si00
queueDict[queueName]['CPUTime'] = int(queueCPUTime)
# Tags & RequiredTags defined on the Queue level and on the CE level are concatenated
# This also converts them from a string to a list if required.
for tagFieldName in ('Tag', 'RequiredTag'):
ceTags = ceDict.get(tagFieldName, [])
if isinstance(ceTags, basestring):
ceTags = fromChar(ceTags)
queueTags = queueDict[queueName].get(tagFieldName)
if queueTags and isinstance(queueTags, basestring):
queueTags = fromChar(queueTags)
queueDict[queueName][tagFieldName] = queueTags
if ceTags:
if queueTags:
allTags = list(set(ceTags + queueTags))
queueDict[queueName][tagFieldName] = allTags
else:
queueDict[queueName][tagFieldName] = ceTags
# Some parameters can be defined on the CE level and are inherited by all Queues
for parameter in ['MaxRAM', 'NumberOfProcessors', 'WholeNode']:
queueParameter = queueDict[queueName].get(parameter)
ceParameter = ceDict.get(parameter)
if ceParameter or queueParameter:
queueDict[queueName][parameter] = ceParameter if not queueParameter \
else queueParameter
# If we have a multi-core queue add MultiProcessor tag
if queueDict[queueName].get('NumberOfProcessors', 1) > 1:
queueDict[queueName].setdefault('Tag', []).append('MultiProcessor')
queueDict[queueName]['CEName'] = ce
queueDict[queueName]['GridCE'] = ce
queueDict[queueName]['CEType'] = ceDict['CEType']
queueDict[queueName]['GridMiddleware'] = ceDict['CEType']
queueDict[queueName]['QueueName'] = queue
platform = ''
if "Platform" in queueDict[queueName]:
platform = queueDict[queueName]['Platform']
elif "Platform" in ceDict:
platform = ceDict['Platform']
elif "OS" in ceDict:
architecture = ceDict.get('architecture', 'x86_64')
platform = '_'.join([architecture, ceDict['OS']])
queueDict[queueName]['Platform'] = platform
if "Platform" not in queueDict[queueName] and platform:
result = getDIRACPlatform(platform)
if result['OK']:
queueDict[queueName]['Platform'] = result['Value'][0]
return S_OK(queueDict)
|
def getQueuesResolved(siteDict):
"""
Get the list of queue descriptions merging site/ce/queue parameters and adding some
derived parameters.
:param dict siteDict: dictionary with configuration data as returned by Resources.getQueues() method
:return: S_OK/S_ERROR, Value dictionary per queue with configuration data updated, e.g. for SiteDirector
"""
queueDict = {}
for site in siteDict:
for ce in siteDict[site]:
ceDict = siteDict[site][ce]
qDict = ceDict.pop('Queues')
for queue in qDict:
queueName = '%s_%s' % (ce, queue)
queueConf = qDict[queue]
queueDict[queueName] = qDict[queue]
queueDict[queueName]['Queue'] = queue
queueDict[queueName]['Site'] = site
# Evaluate the CPU limit of the queue according to the Glue convention
# To Do: should be a utility
if "maxCPUTime" in queueDict[queueName] and \
"SI00" in queueDict[queueName]:
maxCPUTime = float(queueDict[queueName]['maxCPUTime'])
# For some sites there are crazy values in the CS
maxCPUTime = max(maxCPUTime, 0)
maxCPUTime = min(maxCPUTime, 86400 * 12.5)
si00 = float(queueDict[queueName]['SI00'])
queueCPUTime = 60. / 250. * maxCPUTime * si00
queueDict[queueName]['CPUTime'] = int(queueCPUTime)
# Tags & RequiredTags defined on the Queue level and on the CE level are concatenated
# This also converts them from a string to a list if required.
for tagFieldName in ('Tag', 'RequiredTag'):
ceTags = ceDict.get(tagFieldName, [])
if isinstance(ceTags, basestring):
ceTags = fromChar(ceTags)
queueTags = queueDict[queueName].get(tagFieldName)
if queueTags and isinstance(queueTags, basestring):
queueTags = fromChar(queueTags)
queueDict[queueName][tagFieldName] = queueTags
if ceTags:
if queueTags:
allTags = list(set(ceTags + queueTags))
queueDict[queueName][tagFieldName] = allTags
else:
queueDict[queueName][tagFieldName] = ceTags
# Some parameters can be defined on the CE level and are inherited by all Queues
for parameter in ['MaxRAM', 'NumberOfProcessors', 'WholeNode']:
queueParameter = queueDict[queueName].get(parameter)
ceParameter = ceDict.get(parameter)
if ceParameter or queueParameter:
queueDict[queueName][parameter] = ceParameter if not queueParameter \
else queueParameter
# If we have a multi-core queue add MultiProcessor tag
if queueDict[queueName].get('NumberOfProcessors', 1) > 1:
queueDict[queueName].setdefault('Tag', []).append('MultiProcessor')
queueDict[queueName]['CEName'] = ce
queueDict[queueName]['GridCE'] = ce
queueDict[queueName]['CEType'] = ceDict['CEType']
queueDict[queueName]['GridMiddleware'] = ceDict['CEType']
queueDict[queueName]['QueueName'] = queue
platform = ''
if "Platform" in queueDict[queueName]:
platform = queueDict[queueName]['Platform']
elif "Platform" in ceDict:
platform = ceDict['Platform']
elif "OS" in ceDict:
architecture = ceDict.get('architecture', 'x86_64')
platform = '_'.join([architecture, ceDict['OS']])
queueDict[queueName]['Platform'] = platform
if "Platform" not in queueDict[queueName] and platform:
result = getDIRACPlatform(platform)
if result['OK']:
queueDict[queueName]['Platform'] = result['Value'][0]
return S_OK(queueDict)
|
16,176 |
def _normalize_states(
hass: HomeAssistant,
entity_history: list[State],
device_class: str | None,
entity_id: str,
) -> tuple[str | None, list[tuple[float, State]]]:
"""Normalize units."""
unit = None
if device_class not in UNIT_CONVERSIONS:
# We're not normalizing this device class, return the state as they are
fstates = [
(float(el.state), el) for el in entity_history if _is_number(el.state)
]
if fstates:
all_units = _get_units(fstates)
if len(all_units) > 1:
if WARN_UNSTABLE_UNIT not in hass.data:
hass.data[WARN_UNSTABLE_UNIT] = set()
if entity_id not in hass.data[WARN_UNSTABLE_UNIT]:
hass.data[WARN_UNSTABLE_UNIT].add(entity_id)
extra = ""
if old_metadata := statistics.get_metadata(hass, entity_id):
extra = f" and matches the unit of already compiled statistics {old_metadata['unit_of_measurement']}"
_LOGGER.warning(
"The unit of %s is changing, got multiple%s, generation of long term "
"statistics will be suppressed unless the unit is stable%s",
entity_id,
all_units,
extra,
)
return None, []
unit = fstates[0][1].attributes.get(ATTR_UNIT_OF_MEASUREMENT)
return unit, fstates
fstates = []
for state in entity_history:
# Exclude non numerical states from statistics
if not _is_number(state.state):
continue
fstate = float(state.state)
unit = state.attributes.get(ATTR_UNIT_OF_MEASUREMENT)
# Exclude unsupported units from statistics
if unit not in UNIT_CONVERSIONS[device_class]:
if WARN_UNSUPPORTED_UNIT not in hass.data:
hass.data[WARN_UNSUPPORTED_UNIT] = set()
if entity_id not in hass.data[WARN_UNSUPPORTED_UNIT]:
hass.data[WARN_UNSUPPORTED_UNIT].add(entity_id)
_LOGGER.warning("%s has unknown unit %s", entity_id, unit)
continue
fstates.append((UNIT_CONVERSIONS[device_class][unit](fstate), state))
return DEVICE_CLASS_UNITS[device_class], fstates
|
def _normalize_states(
hass: HomeAssistant,
entity_history: list[State],
device_class: str | None,
entity_id: str,
) -> tuple[str | None, list[tuple[float, State]]]:
"""Normalize units."""
unit = None
if device_class not in UNIT_CONVERSIONS:
# We're not normalizing this device class, return the state as they are
fstates = [
(float(el.state), el) for el in entity_history if _is_number(el.state)
]
if fstates:
all_units = _get_units(fstates)
if len(all_units) > 1:
if WARN_UNSTABLE_UNIT not in hass.data:
hass.data[WARN_UNSTABLE_UNIT] = set()
if entity_id not in hass.data[WARN_UNSTABLE_UNIT]:
hass.data[WARN_UNSTABLE_UNIT].add(entity_id)
extra = ""
if old_metadata := statistics.get_metadata(hass, entity_id):
extra = f"and matches the unit of already compiled statistics {old_metadata['unit_of_measurement']}"
_LOGGER.warning(
"The unit of %s is changing, got multiple%s, generation of long term "
"statistics will be suppressed unless the unit is stable%s",
entity_id,
all_units,
extra,
)
return None, []
unit = fstates[0][1].attributes.get(ATTR_UNIT_OF_MEASUREMENT)
return unit, fstates
fstates = []
for state in entity_history:
# Exclude non numerical states from statistics
if not _is_number(state.state):
continue
fstate = float(state.state)
unit = state.attributes.get(ATTR_UNIT_OF_MEASUREMENT)
# Exclude unsupported units from statistics
if unit not in UNIT_CONVERSIONS[device_class]:
if WARN_UNSUPPORTED_UNIT not in hass.data:
hass.data[WARN_UNSUPPORTED_UNIT] = set()
if entity_id not in hass.data[WARN_UNSUPPORTED_UNIT]:
hass.data[WARN_UNSUPPORTED_UNIT].add(entity_id)
_LOGGER.warning("%s has unknown unit %s", entity_id, unit)
continue
fstates.append((UNIT_CONVERSIONS[device_class][unit](fstate), state))
return DEVICE_CLASS_UNITS[device_class], fstates
|
59,887 |
def star_shape_cell_centers(g: "pp.Grid", as_nan: bool = False) -> np.ndarray:
"""
For a given grid compute the star shape center for each cell.
The algorithm computes the half space intersections, by using the above method
half_space_pt,
of the spaces defined by the cell faces and the face normals.
This is a wrapper method that operate on a grid.
Parameters
----------
g: pp.Grid
the grid
as_nan: bool, optional
Decide whether, in case some cells are not star-shaped return nan as
new center. Otherwise an exception is raised (default behaviour).
Returns
-------
np.ndarray
The new cell centers.
"""
# no need for 1d or 0d grids
if g.dim < 2:
return g.cell_centers
# retrieve the faces and nodes
faces, _, sgn = sps.find(g.cell_faces)
nodes, _, _ = sps.find(g.face_nodes)
# shift the nodes close to the origin, to avoid numerical problems when coordinates are
# too big
xn = g.nodes.copy()
xn_shift = np.average(xn, axis=1)
xn -= np.tile(xn_shift, (xn.shape[1], 1)).T
# compute the star shape cell centers by constructing the half spaces of each cell
# given by its faces and related normals
cell_centers = np.zeros((3, g.num_cells))
for c in np.arange(g.num_cells):
loc = slice(g.cell_faces.indptr[c], g.cell_faces.indptr[c + 1])
faces_loc = faces[loc]
loc_n = g.face_nodes.indptr[faces_loc]
# make the normals coherent
normal = np.multiply(
sgn[loc], np.divide(g.face_normals[:, faces_loc], g.face_areas[faces_loc])
)
x0, x1 = xn[:, nodes[loc_n]], xn[:, nodes[loc_n + 1]]
coords = np.concatenate((x0, x1), axis=1)
# compute a point in the half space intersection of all cell faces
try:
cell_centers[:, c] = pp.half_space.half_space_interior_point(
normal, (x1 + x0) / 2.0, coords
)
except ValueError:
# the cell is not star-shaped
if as_nan:
cell_centers[:, c] = np.array([np.nan, np.nan, np.nan])
else:
raise ValueError(
"Cell not star-shaped impossible to compute the centre"
)
# shift back the computed cell centers and return them
return cell_centers + np.tile(xn_shift, (g.num_cells, 1)).T
|
def star_shape_cell_centers(g: "pp.Grid", as_nan: bool = False) -> np.ndarray:
"""
For a given grid compute the star shape center for each cell.
The algorithm computes the half space intersections of the spaces defined
by the cell faces and the face normals by using the method half_space_interior_point.
half_space_pt,
of the spaces defined by the cell faces and the face normals.
This is a wrapper method that operate on a grid.
Parameters
----------
g: pp.Grid
the grid
as_nan: bool, optional
Decide whether, in case some cells are not star-shaped return nan as
new center. Otherwise an exception is raised (default behaviour).
Returns
-------
np.ndarray
The new cell centers.
"""
# no need for 1d or 0d grids
if g.dim < 2:
return g.cell_centers
# retrieve the faces and nodes
faces, _, sgn = sps.find(g.cell_faces)
nodes, _, _ = sps.find(g.face_nodes)
# shift the nodes close to the origin, to avoid numerical problems when coordinates are
# too big
xn = g.nodes.copy()
xn_shift = np.average(xn, axis=1)
xn -= np.tile(xn_shift, (xn.shape[1], 1)).T
# compute the star shape cell centers by constructing the half spaces of each cell
# given by its faces and related normals
cell_centers = np.zeros((3, g.num_cells))
for c in np.arange(g.num_cells):
loc = slice(g.cell_faces.indptr[c], g.cell_faces.indptr[c + 1])
faces_loc = faces[loc]
loc_n = g.face_nodes.indptr[faces_loc]
# make the normals coherent
normal = np.multiply(
sgn[loc], np.divide(g.face_normals[:, faces_loc], g.face_areas[faces_loc])
)
x0, x1 = xn[:, nodes[loc_n]], xn[:, nodes[loc_n + 1]]
coords = np.concatenate((x0, x1), axis=1)
# compute a point in the half space intersection of all cell faces
try:
cell_centers[:, c] = pp.half_space.half_space_interior_point(
normal, (x1 + x0) / 2.0, coords
)
except ValueError:
# the cell is not star-shaped
if as_nan:
cell_centers[:, c] = np.array([np.nan, np.nan, np.nan])
else:
raise ValueError(
"Cell not star-shaped impossible to compute the centre"
)
# shift back the computed cell centers and return them
return cell_centers + np.tile(xn_shift, (g.num_cells, 1)).T
|
30,696 |
def get_owners_command(client: Client) -> COMMAND_OUTPUT:
""" Get availble indicators owners from ThreatConnect - Help configure ThreatConnect Feed integraiton.
Args:
client: ThreatConnect client.
Returns:
str: Human readable.
dict: Operation entry context.
dict: Operation raw response.
"""
raw_response: Iterator[Any] = client.get_owners()
readable_output: str = tableToMarkdown(name=f"{INTEGRATION_NAME} - Indicators",
t=list(raw_response))
return readable_output, {}, list(raw_response)
|
def get_owners_command(client: Client) -> COMMAND_OUTPUT:
""" Get availble indicators owners from ThreatConnect - Help configure ThreatConnect Feed integraiton.
Args:
client: ThreatConnect client.
Returns:
str: Human readable.
dict: Operation entry context.
dict: Operation raw response.
"""
raw_response: Iterator[Any] = client.get_owners()
readable_output: str = tableToMarkdown(name=f"{INTEGRATION_NAME} - Owners",
t=list(raw_response))
return readable_output, {}, list(raw_response)
|
23,169 |
def test_slicing_and_unknown_chunks():
a = da.ones((10, 5), chunks=5)
a._chunks = ((np.nan, np.nan), (5,))
with pytest.raises(ValueError, match="Array chunk size or shape is unknown"):
a[[0, 5]].compute()
|
def test_slicing_and_unknown_chunks():
a = da.ones((10, 5), chunks=5)
a = a[a > 20]
with pytest.raises(ValueError, match="Array chunk size or shape is unknown"):
a[[0, 5]].compute()
|
36,828 |
def copy_table(table_name, new_table_name, credentials=None, if_exists='fail', log_enabled=True):
"""
Copy a table into a new table in the CARTO account.
Args:
table_name (str): name of the original table.
new_table_name(str, optional): name for the new table.
credentials (:py:class:`Credentials <cartoframes.auth.Credentials>`, optional):
instance of Credentials (username, api_key, etc).
if_exists (str, optional): 'fail', 'replace', 'append'. Default is 'fail'.
"""
if not isinstance(table_name, str):
raise ValueError('Wrong table name. You should provide a valid table name.')
if not isinstance(new_table_name, str):
raise ValueError('Wrong new table name. You should provide a valid table name.')
if if_exists not in IF_EXISTS_OPTIONS:
raise ValueError('Wrong option. You should provide: {}.'.format(', '.join(IF_EXISTS_OPTIONS)))
context_manager = ContextManager(credentials)
query = 'SELECT * FROM {}'.format(table_name)
context_manager.create_table_from_query(new_table_name, query, if_exists)
if log_enabled:
print('Success! Table copied correctly')
|
def copy_table(table_name, new_table_name, credentials=None, if_exists='fail', log_enabled=True):
"""
Copy a table into a new table in the CARTO account.
Args:
table_name (str): name of the original table.
new_table_name(str, optional): name for the new table.
credentials (:py:class:`Credentials <cartoframes.auth.Credentials>`, optional):
instance of Credentials (username, api_key, etc).
if_exists (str, optional): 'fail', 'replace', 'append'. Default is 'fail'.
"""
if not isinstance(table_name, str):
raise ValueError('Wrong table name. You should provide a valid table name.')
if not isinstance(new_table_name, str):
raise ValueError('Wrong new table name. You should provide a valid table name.')
if if_exists not in IF_EXISTS_OPTIONS:
raise ValueError('Wrong option for the `if_exists` param. You should provide: {}.'.format(', '.join(IF_EXISTS_OPTIONS)))
context_manager = ContextManager(credentials)
query = 'SELECT * FROM {}'.format(table_name)
context_manager.create_table_from_query(new_table_name, query, if_exists)
if log_enabled:
print('Success! Table copied correctly')
|
32,216 |
def unfold(s):
r"""
Remove folding whitespace from a string by converting line breaks (and any
whitespace adjacent to line breaks) to a single space and removing leading
& trailing whitespace.
From: https://github.com/jwodder/headerparser/blob/master/headerparser/types.py#L39
unfold('This is a \n folded string.\n')
'This is a folded string.'
:param string s: a string to unfold
:rtype: string
"""
return re.sub(r'[ \t]*[\r\n][ \t\r\n]*', ' ', s).strip(' ') if s else s
|
def unfold(s):
r"""
Remove folding whitespace from a string by converting line breaks (and any
whitespace adjacent to line breaks) to a single space and removing leading
& trailing whitespace.
From: https://github.com/jwodder/headerparser/blob/master/headerparser/types.py#L39
unfold('This is a \n folded string.\n')
'This is a folded string.'
:param string s: a string to unfold
:rtype: string
"""
return re.sub(r'[ \t]*[\r\n][ \t\r\n]*', ' ', s).strip(' ') if s else ''
|
25,992 |
def update_key_rotation_policy(cmd, client, value, name=None):
from azure.cli.core.util import read_file_content, get_json_object
if os.path.exists(value):
value = read_file_content(value)
policy = get_json_object(value)
if not policy:
raise InvalidArgumentValueError("Please specify a valid policy")
KeyRotationLifetimeAction = cmd.loader.get_sdk('KeyRotationLifetimeAction', mod='_models',
resource_type=ResourceType.DATA_KEYVAULT_KEYS)
lifetime_actions = []
if policy.get('lifetime_actions', None):
for action in policy['lifetime_actions']:
try:
action_type = action['action'].get('type', None) if action.get('action', None) else None
except AttributeError:
action_type = action.get('action', None)
time_after_create = action['trigger'].get('time_after_create', None) \
if action.get('trigger', None) else action.get('time_after_create', None)
time_before_expiry = action['trigger'].get('time_before_expiry', None) \
if action.get('trigger', None) else action.get('time_before_expiry', None)
lifetime_action = KeyRotationLifetimeAction(action_type,
time_after_create=time_after_create,
time_before_expiry=time_before_expiry)
lifetime_actions.append(lifetime_action)
expires_in = policy.get('expires_in', None)
if policy.get('attributes', None):
expires_in = policy['attributes'].get('expires_in')\
if policy['attributes'].get('expires_in', None)\
else policy['attributes'].get('expiry_time', None)
return client.update_key_rotation_policy(name=name, lifetime_actions=lifetime_actions, expires_in=expires_in)
# endregion
|
def update_key_rotation_policy(cmd, client, value, name=None):
from azure.cli.core.util import read_file_content, get_json_object
if os.path.exists(value):
value = read_file_content(value)
policy = get_json_object(value)
if not policy:
raise InvalidArgumentValueError("Please specify a valid policy")
KeyRotationLifetimeAction = cmd.loader.get_sdk('KeyRotationLifetimeAction', mod='_models',
resource_type=ResourceType.DATA_KEYVAULT_KEYS)
lifetime_actions = []
if policy.get('lifetime_actions', None):
for action in policy['lifetime_actions']:
try:
action_type = action['action'].get('type', None) if action.get('action', None) else None
except AttributeError:
action_type = action.get('action', None)
time_after_create = action['trigger'].get('time_after_create', None) \
if action.get('trigger', None) else action.get('time_after_create', None)
time_before_expiry = action['trigger'].get('time_before_expiry', None) \
if action.get('trigger', None) else action.get('time_before_expiry', None)
lifetime_action = KeyRotationLifetimeAction(action_type,
time_after_create=time_after_create,
time_before_expiry=time_before_expiry)
lifetime_actions.append(lifetime_action)
expires_in = policy.get('expires_in', None) or policy.get('expiry_time', None)
if policy.get('attributes', None):
expires_in = policy['attributes'].get('expires_in')\
if policy['attributes'].get('expires_in', None)\
else policy['attributes'].get('expiry_time', None)
return client.update_key_rotation_policy(name=name, lifetime_actions=lifetime_actions, expires_in=expires_in)
# endregion
|
8,245 |
def extract_along_coord(smap, coord):
"""
Return the value of the image array at every point along the coordinate.
For a given coordinate ``coord``, find all the pixels that cross the coordinate
and extract the values of the image array in ``smap`` at these points. This is done by applying
`Bresenham's line algorithm <http://en.wikipedia.org/wiki/Bresenham%27s_line_algorithm>`_
between the consecutive pairs of points in the coordinate and then indexing the data
array of ``smap`` at those points.
Parameters
----------
smap : `~sunpy.map.GenericMap`
coord : `~astropy.coordinates.SkyCoord`
Coordinate along which to extract intensity
Returns
-------
intensity : `~astropy.units.Quantity`
loop_coord : `~astropy.coordinates.SkyCoord`
"""
if not len(coord.shape) or coord.shape[0] < 2:
raise ValueError('At least two points are required for extracting intensity along a '
'line. To extract points at single coordinates, use '
'sunpy.map.maputils.sample_at_coords.')
if not all(contains_coordinate(smap, coord)):
raise ValueError('At least one coordinate is not within the bounds of the map.'
'To extract the intensity along a coordinate, all points must fall within '
'the bounds of the map.')
# Find pixels between each loop segment
px, py = smap.wcs.world_to_array_index(coord)
pix = []
for i in range(len(px)-1):
b = _bresenham(px[i], py[i], px[i+1], py[i+1])
# Pop the last one, unless this is the final entry because the first point
# of the next section will be the same
if i < (len(px) - 2):
b = b[:-1]
pix.append(b)
pix = np.vstack(pix)
intensity = u.Quantity(smap.data[pix[:, 0], pix[:, 1]], smap.unit)
coord_new = smap.pixel_to_world(pix[:, 1]*u.pix, pix[:, 0]*u.pix)
return intensity, coord_new
|
def extract_along_coord(smap, coord):
"""
Return the value of the image array at every point along the coordinate.
For a given coordinate ``coord``, find all the pixels that cross the coordinate
and extract the values of the image array in ``smap`` at these points. This is done by applying
`Bresenham's line algorithm <http://en.wikipedia.org/wiki/Bresenham%27s_line_algorithm>`_
between the consecutive pairs of points in the coordinate and then indexing the data
array of ``smap`` at those points.
Parameters
----------
smap : `~sunpy.map.GenericMap`
coord : `~astropy.coordinates.SkyCoord`
Coordinate along which to extract intensity
Returns
-------
intensity : `~astropy.units.Quantity`
value_coords : `~astropy.coordinates.SkyCoord`
"""
if not len(coord.shape) or coord.shape[0] < 2:
raise ValueError('At least two points are required for extracting intensity along a '
'line. To extract points at single coordinates, use '
'sunpy.map.maputils.sample_at_coords.')
if not all(contains_coordinate(smap, coord)):
raise ValueError('At least one coordinate is not within the bounds of the map.'
'To extract the intensity along a coordinate, all points must fall within '
'the bounds of the map.')
# Find pixels between each loop segment
px, py = smap.wcs.world_to_array_index(coord)
pix = []
for i in range(len(px)-1):
b = _bresenham(px[i], py[i], px[i+1], py[i+1])
# Pop the last one, unless this is the final entry because the first point
# of the next section will be the same
if i < (len(px) - 2):
b = b[:-1]
pix.append(b)
pix = np.vstack(pix)
intensity = u.Quantity(smap.data[pix[:, 0], pix[:, 1]], smap.unit)
coord_new = smap.pixel_to_world(pix[:, 1]*u.pix, pix[:, 0]*u.pix)
return intensity, coord_new
|
59,249 |
def main():
import getopt
opts, args = getopt.getopt(sys.argv[1:], 'mhc:', ['help', 'command='])
if not args:
print(_usage)
sys.exit(2)
commands = []
run_as_module = False
for opt, optarg in opts:
if opt in ['-h', '--help']:
print(_usage)
sys.exit()
elif opt in ['-c', '--command']:
commands.append(optarg)
elif opt in ['-m']:
run_as_module = True
mainpyfile = args[0] # Get script filename
if not run_as_module and not os.path.exists(mainpyfile):
print('Error:', mainpyfile, 'does not exist')
sys.exit(1)
sys.argv[:] = args # Hide "pdb.py" and pdb options from argument list
if not run_as_module:
mainpyfile = os.path.realpath(mainpyfile)
# Replace pdb's dir with script's dir in front of module search path.
sys.path[0] = os.path.dirname(mainpyfile)
# Note on saving/restoring sys.argv: it's a good idea when sys.argv was
# modified by the script being debugged. It's a bad idea when it was
# changed by the user from the command line. There is a "restart" command
# which allows explicit specification of command line arguments.
pdb = Pdb()
pdb.rcLines.extend(commands)
while True:
try:
if run_as_module:
pdb._runmodule(mainpyfile)
else:
pdb._runscript(mainpyfile)
if pdb._user_requested_quit:
break
print("The program finished and will be restarted")
except Restart:
print("Restarting", mainpyfile, "with arguments:")
print("\t" + " ".join(sys.argv[1:]))
except SystemExit:
# In most cases SystemExit does not warrant a post-mortem session.
print("The program exited via sys.exit(). Exit status:", end=' ')
print(sys.exc_info()[1])
except SyntaxError:
traceback.print_exc()
sys.exit(1)
except:
traceback.print_exc()
print("Uncaught exception. Entering post mortem debugging")
print("Running 'cont' or 'step' will restart the program")
t = sys.exc_info()[2]
pdb.interaction(None, t)
if pdb._user_requested_quit:
break
else:
print("Post mortem debugger finished. The " + mainpyfile +
" will be restarted")
|
def main():
import getopt
opts, args = getopt.getopt(sys.argv[1:], 'mhc:', ['help', 'command='])
if not args:
print(_usage)
sys.exit(2)
commands = []
run_as_module = False
for opt, optarg in opts:
if opt in ['-h', '--help']:
print(_usage)
sys.exit()
elif opt in ['-c', '--command']:
commands.append(optarg)
elif opt in ['-m']:
run_as_module = True
mainpyfile = args[0] # Get script filename
if not run_as_module and not os.path.exists(mainpyfile):
print('Error:', mainpyfile, 'does not exist')
sys.exit(1)
sys.argv[:] = args # Hide "pdb.py" and pdb options from argument list
if not run_as_module:
mainpyfile = os.path.realpath(mainpyfile)
# Replace pdb's dir with script's dir in front of module search path.
sys.path[0] = os.path.dirname(mainpyfile)
# Note on saving/restoring sys.argv: it's a good idea when sys.argv was
# modified by the script being debugged. It's a bad idea when it was
# changed by the user from the command line. There is a "restart" command
# which allows explicit specification of command line arguments.
pdb = Pdb()
pdb.rcLines.extend(commands)
while True:
try:
if run_as_module:
pdb._runmodule(mainpyfile)
else:
pdb._runscript(mainpyfile)
if pdb._user_requested_quit:
break
print("The program finished and will be restarted")
except Restart:
print("Restarting", mainpyfile, "with arguments:")
print("\t" + " ".join(sys.argv[1:]))
except SystemExit:
# In most cases SystemExit does not warrant a post-mortem session.
print("The program exited via sys.exit(). Exit status:", end=' ')
print(sys.exc_info()[1])
except SyntaxError:
traceback.print_exc()
sys.exit(1)
except:
traceback.print_exc()
print("Uncaught exception. Entering post mortem debugging")
print("Running 'cont' or 'step' will restart the program")
t = sys.exc_info()[2]
pdb.interaction(None, t)
if pdb._user_requested_quit:
break
print("Post mortem debugger finished. The " + mainpyfile +
" will be restarted")
|
25,800 |
def provision_from_file(file_path):
"""
Expects a JSON file with the following format (example values supplied):
{
"facility": "My Facility",
"preset": "formal",
"facility_settings": {
"learner_can_edit_username": true,
"learner_can_edit_name": true,
"learner_can_edit_password": true,
"learner_can_sign_up": true,
"learner_can_delete_account": true,
"learner_can_login_with_no_password": true,
"show_download_button_in_learn": true
},
"device_settings": {
"language_id": "en",
"landing_page": "homepage",
"allow_guest_access": true,
"allow_peer_unlisted_channel_import": true,
"allow_learner_unassigned_resource_access": true,
"name": "My Device",
"allow_other_browsers_to_connect": true
},
"username": "superuser",
"password": "password"
}
All fields are optional.
"""
from kolibri.core.auth.models import Facility
if device_provisioned() or not os.path.exists(file_path):
return
try:
with open(file_path, "r") as f:
logger.info(
"Attempting to automatically provision device from data in {}".format(
file_path
)
)
options = json.load(f)
except (IOError, ValueError) as e:
logging.error("Failed to load {}:\n{}".format(file_path, e))
return
facility_name = options.get("facility")
facility = None
if facility_name:
facility_query = Facility.objects.filter(name__iexact=facility_name)
if facility_query.exists():
facility = facility_query.get()
logger.warn(
"Facility with name '{name}' already exists, not modifying preset.".format(
name=facility.name
)
)
else:
facility = Facility.get_default_facility() or Facility.objects.first()
try:
device_settings = validate_device_settings(**options.get("device_settings", {}))
except ValueError:
logging.error("Invalid device settings specified in {}.".format(file_path))
return
try:
facility_settings = validate_facility_settings(
options.get("facility_settings", {})
)
except ValueError:
logging.error("Invalid facility settings specified in {}.".format(file_path))
return
preset = options.get("preset")
username = options.get("username")
password = options.get("password")
setup_device_and_facility(
facility,
facility_name,
preset,
facility_settings,
device_settings,
username,
password,
)
|
def provision_from_file(file_path):
"""
Expects a JSON file with the following format (example values supplied):
{
"facility_name": "My Facility",
"preset": "formal",
"facility_settings": {
"learner_can_edit_username": true,
"learner_can_edit_name": true,
"learner_can_edit_password": true,
"learner_can_sign_up": true,
"learner_can_delete_account": true,
"learner_can_login_with_no_password": true,
"show_download_button_in_learn": true
},
"device_settings": {
"language_id": "en",
"landing_page": "homepage",
"allow_guest_access": true,
"allow_peer_unlisted_channel_import": true,
"allow_learner_unassigned_resource_access": true,
"name": "My Device",
"allow_other_browsers_to_connect": true
},
"username": "superuser",
"password": "password"
}
All fields are optional.
"""
from kolibri.core.auth.models import Facility
if device_provisioned() or not os.path.exists(file_path):
return
try:
with open(file_path, "r") as f:
logger.info(
"Attempting to automatically provision device from data in {}".format(
file_path
)
)
options = json.load(f)
except (IOError, ValueError) as e:
logging.error("Failed to load {}:\n{}".format(file_path, e))
return
facility_name = options.get("facility")
facility = None
if facility_name:
facility_query = Facility.objects.filter(name__iexact=facility_name)
if facility_query.exists():
facility = facility_query.get()
logger.warn(
"Facility with name '{name}' already exists, not modifying preset.".format(
name=facility.name
)
)
else:
facility = Facility.get_default_facility() or Facility.objects.first()
try:
device_settings = validate_device_settings(**options.get("device_settings", {}))
except ValueError:
logging.error("Invalid device settings specified in {}.".format(file_path))
return
try:
facility_settings = validate_facility_settings(
options.get("facility_settings", {})
)
except ValueError:
logging.error("Invalid facility settings specified in {}.".format(file_path))
return
preset = options.get("preset")
username = options.get("username")
password = options.get("password")
setup_device_and_facility(
facility,
facility_name,
preset,
facility_settings,
device_settings,
username,
password,
)
|
8,463 |
def Dependencies(lTOC, xtrapath=None, manifest=None, redirects=None):
"""
Expand LTOC to include all the closure of binary dependencies.
`LTOC` is a logical table of contents, ie, a seq of tuples (name, path).
Return LTOC expanded by all the binary dependencies of the entries
in LTOC, except those listed in the module global EXCLUDES
`manifest` may be a winmanifest.Manifest instance for a program manifest, so
that all dependent assemblies of python.exe can be added to the built exe.
`redirects` may be a list. Any assembly redirects found via policy files will
be added to the list as BindingRedirect objects so they can later be used
to modify any manifests that reference the redirected assembly.
"""
# Extract all necessary binary modules from Python eggs to be included
# directly with PyInstaller.
lTOC = _extract_from_egg(lTOC)
# 4 processes may yield up to +40% speed on 2 CPUs
# 2 processes may yield up to +30% speed on 2 CPUs
processes = 2 * os.cpu_count()
pool = multiprocessing.Pool(processes)
if is_win:
# Search for required assemblies and add them to the TOC
paths = [path for name, path, typ in lTOC]
assemblies = pool.map(
functools.partial(getAssemblyFiles, manifest=manifest, redirects=redirects),
paths
)
# getAssemblyFiles returns a list of tuples, so assemblies is a
# list of list of tuples
for assembly in assemblies:
for ftocnm, fn in assembly:
lTOC.append((ftocnm, fn, 'BINARY'))
dataset = collections.deque([(name, path, typ) for (name, path, typ) in lTOC])
while True:
# Breakdown the dataset in chunks as big as the chosen number of processes
# instead of just feeding the whole dataset into process pool
# so that we can keep the "seen" cache in main process only
chunk = []
while (len(chunk) < processes) and len(dataset):
(name, path, typ) = dataset.pop()
if name.upper() in seen:
continue
chunk.append(path)
if not chunk:
break # From while True, no more data
imports = pool.map(
functools.partial(selectImports, xtrapath=xtrapath),
chunk
)
# selectImports returns a list of pairs, so 'imports' is
# a list of lists of pairs
for item_dependencies in imports:
for (lib, npth) in item_dependencies:
if lib.upper() in seen or npth.upper() in seen:
continue
seen.add(npth.upper())
lTOC.append((lib, npth, 'BINARY'))
return lTOC
|
def Dependencies(lTOC, xtrapath=None, manifest=None, redirects=None):
"""
Expand LTOC to include all the closure of binary dependencies.
`LTOC` is a logical table of contents, ie, a seq of tuples (name, path).
Return LTOC expanded by all the binary dependencies of the entries
in LTOC, except those listed in the module global EXCLUDES
`manifest` may be a winmanifest.Manifest instance for a program manifest, so
that all dependent assemblies of python.exe can be added to the built exe.
`redirects` may be a list. Any assembly redirects found via policy files will
be added to the list as BindingRedirect objects so they can later be used
to modify any manifests that reference the redirected assembly.
"""
# Extract all necessary binary modules from Python eggs to be included
# directly with PyInstaller.
lTOC = _extract_from_egg(lTOC)
# 4 processes may yield up to +40% speed on 2 CPUs
# 2 processes may yield up to +30% speed on 2 CPUs
processes = 2 * os.cpu_count()
pool = multiprocessing.Pool(processes)
if is_win:
# Search for required assemblies and add them to the TOC
paths = [path for name, path, typ in lTOC]
assemblies = pool.map(
functools.partial(getAssemblyFiles, manifest=manifest, redirects=redirects),
paths
)
# getAssemblyFiles returns a list of tuples, so assemblies is a
# list of list of tuples
for assembly in assemblies:
for ftocnm, fn in assembly:
lTOC.append((ftocnm, fn, 'BINARY'))
dataset = collections.deque([(name, path, typ) for (name, path, typ) in lTOC])
while True:
# Breakdown the dataset in chunks as big as the chosen number of processes
# instead of just feeding the whole dataset into process pool
# so that we can keep the "seen" cache in main process only
chunk = []
while (len(chunk) < processes) and len(dataset):
(name, path, typ) = dataset.pop()
if name.upper() in seen:
continue
chunk.append(path)
if not chunk:
break # From while True, no more data
imports = pool.map(
functools.partial(selectImports, xtrapath=xtrapath),
chunk
)
# selectImports returns a list of pairs, so 'imports' is
# a list of lists of pairs
for item_dependencies in imports:
for (lib, npth) in item_dependencies:
if lib.upper() in seen or npth.upper() in seen:
continue
seen.add(npath)
lTOC.append((lib, npth, 'BINARY'))
return lTOC
|
31,317 |
def get_domain_details(client: Client, **args) -> CommandResults:
domain = args.get("domain")
uri = f"/domain/{domain}"
response = client._http_request("GET", uri)
md = ""
current_dns = response["current_dns"]
del response["current_dns"]
md = tableToMarkdown(f"Details for {domain}", response)
for record_type, record_values in current_dns.items():
# If a record type has multiple values, this will output the last item in MD
temp_values = {}
for x in record_values["values"]:
temp_values.update(**x)
record_values.update(temp_values)
del record_values["values"]
md += tableToMarkdown(f"DNS {record_type} Records for {domain}", record_values)
results = CommandResults(
outputs_prefix="SecurityTrails",
outputs_key_field=f"SecurityTrails.domain_details.{domain}",
outputs={domain: {"domain_details": response}},
readable_output=md,
)
return results
|
def get_domain_details(client: Client, **args) -> CommandResults:
domain = args.get("domain")
uri = f"/domain/{domain}"
response = client._http_request("GET", uri)
md = ""
current_dns = response["current_dns"]
del response["current_dns"]
md = tableToMarkdown(f"Details for {domain}", response)
for record_type, record_values in current_dns.items():
# If a record type has multiple values, this will output the last item in MD
temp_values = {}
for x in record_values.get("values", []):
temp_values.update(**x)
record_values.update(temp_values)
del record_values["values"]
md += tableToMarkdown(f"DNS {record_type} Records for {domain}", record_values)
results = CommandResults(
outputs_prefix="SecurityTrails",
outputs_key_field=f"SecurityTrails.domain_details.{domain}",
outputs={domain: {"domain_details": response}},
readable_output=md,
)
return results
|
45,684 |
def Clustergram(
data=None,
generate_curves_dict=False,
return_computed_traces=False,
computed_traces=None,
row_labels=None,
column_labels=None,
hide_labels=None,
standardize='none',
cluster='all',
row_dist='euclidean',
col_dist='euclidean',
dist_fun=scs.distance.pdist,
link_fun=lambda x, **kwargs: sch.linkage(x, 'complete', **kwargs),
color_threshold=None,
optimal_leaf_order=False,
color_map=None,
color_list=None,
display_range=3,
symmetric_value=True,
log_transform=False,
display_ratio=0.2,
imputer_parameters=None,
row_group_marker=None, # group number, annotation, color
col_group_marker=None, # same as above
tick_font=None,
annotation_font=None,
line_width=0.5,
paper_bg_color='rgba(0,0,0,0)',
plot_bg_color='rgba(0,0,0,0)',
height=500,
width=500
):
"""Function that returns a Dash Bio Clustergram object.
Keyword arguments:
- data (ndarray; required): Matrix of observations as array of arrays
- generate_curves_dict (bool; default false): Whether or not to return a
dictionary containing information about the cluster number
associated with each curve number in the graph. (May be useful
if one wishes to capture the cluster number that is clicked.)
- return_computed_traces (bool; default false): Whether or not to return
the precomputed dendrogram traces. (May be useful if one wishes
to add, e.g., group markers to the figure without recalculating
the clustering in the entire figure.)
- computed_traces (dict; optional): The dendrogram traces from another
Clustergram component.
- row_labels (list; optional): List of row category labels
(observation labels)
- column_labels (list; optional): List of column category labels
(observation labels)
- hide_labels (list; optional): List of labels not to display on the
final plot.
- standardize (string; default 'none'): The dimension for standardizing
values, so that the mean is 0 and the standard deviation is 1
along the specified dimension: 'row', 'column', or 'none'.
- cluster (string; default 'all'): The dimension along which the data will
be clustered: 'row', 'column', or 'all'; 'all' means data to be
clustered along columns, then clustered along rows of
row-clustered data.
- row_dist (string; default 'euclidean'): String specifying the
distance metric for rows. It will be passed as the argument
'metric' into the function specified in dist_fun (see
scipy.spatial.distance.pdist).
- col_dist (string; default 'euclidean'): String specifying the
distance metric for columns. It will be passed as the argument
'metric' into the function specified in dist_fun (see
scipy.spatial.distance.pdist).
- dist_fun (function; default scipy.spatial.distance.pdist): Function
to compute the pairwise distance from the observations (see
scipy.spatial.distance.pdist).
- link_fun (function; default scipy.cluster.hierarchy.linkage): Function to
compute the linkage matrix from the pairwise distances (see
scipy.cluster.hierarchy.linkage).
- color_threshold (dict; default {'row': 0, 'col': 0}): Maximum
linkage value for which unique colors are assigned to clusters;
'row' for rows, and 'col' for columns.
- optimal_leaf_order (bool; default false): Enabling/disabling of the
option to determine leaf order that maximizes similarity between
neighboring leaves.
- color_map (list; default [[0.0, 'rgb(255,0,0)'], [0.5,
'rgb(0,0,0)'], [1.0, 'rgb(0,255,0)']]): The colorscale for the
heatmap. Each list element contains two elements; the first
element refers to the portion of the maximum data point under
which a cell will be colored, and the second element refers to the
color. e.g., a colorscale [[0.0, 'white'], [0.5, 'gray'], [1.0,
'black']] mean that for all cells with a value in the 50th or
lower percentile of the dataset, the color on the heatmap would be
white; all cells with a value in the 50th or higher percentile,
excluding the 100th percentile, would be gray; and the cell(s) in
the 100th percentile would be colored black.
- color_list (dict; optional): The list of colors to use for different
clusters in the dendrogram that have a root under the threshold for
each dimension. If there are fewer colors than there are clusters
along a specific dimension. The keys are: 'row' (for row clusters),
'col' (for column clusters), and 'bg' (for all traces above the
clustering threshold for both row and column.
- display_range (double; default 3.0): In the heatmap, standardized
values from the dataset that are below the negative of this value
will be colored with one shade, and the values that are above this
value will be colored with another.
- symmetric_value (bool; default true): Whether or not to center the
values of the heatmap about zero.
- log_transform (bool; default false): Whether or not to transforms
the data by taking the base-two logarithm of all values in the
dataset.
- display_ratio (list | number; default 0.2): The dendrograms' heights with
respect to the size of the heatmap; with one element, both the row
and column dendrograms have the same ratio; with two, the row
dendrogram ratio corresponds to the first element of the list and
the column dendrogram ratio corresponds to the second element of
the list.
- imputer_parameters (dict; optional): Specifies the parameters
'missing_values' and 'strategy' of the SimpleImputer class from
scikit-learn 0.20.1 (both of these parameters must be keys in the
dictionary). An additional parameter, 'axis', is used to specify
the direction along which to impute (a parameter of Imputer, which
was deprecated in scikit-learn 0.20.0). 'axis=0' indicates that
imputing should happen along columns, while 'axis=1' indicates
that it should happen along rows (see: https://scikit
-learn.org/stable/modules/generated/sklearn.preprocessing.Imputer.html).
- row_group_marker (list; optional) A list containing the annotations
for row clusters in the dendrogram. Each annotation is a
dictionary with the keys 'group_number' (the cluster number to
highlight), 'annotation' (a string containing the text of the
annotation), and 'color' (a string representation of the color of
the annotation).
- col_group_marker (list; optional): A list containing the annotations for
column clusters in the dendrogram. Each annotation is a dictionary
with the keys 'group_number' (the cluster number to highlight),
'annotation' (a string containing the text of the annotation), and
'color' (a string representation of the color of the
annotation).
- tick_font (dict; optional): The font options for ticks, as specified
in the Plotly graph_objs documentation (see:
https://plot.ly/python/reference/#bar-marker-colorbar-tickfont).
- annotation_font (dict; optional): The font options for annotations,
as specified in the Plotly graph_objs documentation (see:
https://plot.ly/python/reference/#layout-scene-annotations-items-annotation-font).
- line_width (list | number; default 0.5): The line width for the
dendrograms. If in list format, the first element corresponds to
the width of the row dendrogram traces, and the second corresponds
to the width of the column dendrogram traces.
- paper_bg_color (string; default 'rgba(0,0,0,0)`): The background
color of the paper on the graph.
- plot_bg_color (string; default 'rgba(0,0,0,0)'): The background
color of the subplots on the graph.
- height (number; default 500): The height of the graph, in px.
- width (number; default 500): The width of the graph, in px.
"""
if hide_labels is None:
hide_labels = []
if color_threshold is None:
color_threshold = dict(row=0, col=0)
# get rid of arguments that are not used by _Clustergram
kwargs = locals()
kwargs.pop('return_computed_traces')
kwargs.pop('computed_traces')
kwargs.pop('generate_curves_dict')
(fig, ct, curves_dict) = _Clustergram(
**kwargs
).figure(
computed_traces=computed_traces
)
return_values = [go.Figure(fig)]
if generate_curves_dict:
return_values.append(curves_dict)
if return_computed_traces:
return_values.append(ct)
# return only the figure by default
if len(return_values) == 1:
return return_values[0]
# otherwise, return all requested values
return tuple(return_values)
|
def Clustergram(
data=None,
generate_curves_dict=False,
return_computed_traces=False,
computed_traces=None,
row_labels=None,
column_labels=None,
hide_labels=None,
standardize='none',
cluster='all',
row_dist='euclidean',
col_dist='euclidean',
dist_fun=scs.distance.pdist,
link_fun=lambda x, **kwargs: sch.linkage(x, 'complete', **kwargs),
color_threshold=None,
optimal_leaf_order=False,
color_map=None,
color_list=None,
display_range=3,
symmetric_value=True,
log_transform=False,
display_ratio=0.2,
imputer_parameters=None,
row_group_marker=None, # group number, annotation, color
col_group_marker=None, # same as above
tick_font=None,
annotation_font=None,
line_width=0.5,
paper_bg_color='rgba(0,0,0,0)',
plot_bg_color='rgba(0,0,0,0)',
height=500,
width=500
):
"""Function that returns a Dash Bio Clustergram object.
Keyword arguments:
- data (ndarray; required): Matrix of observations as array of arrays
- generate_curves_dict (bool; default false): Whether or not to return a
dictionary containing information about the cluster number
associated with each curve number in the graph. (May be useful
if one wishes to capture the cluster number that is clicked.)
- return_computed_traces (bool; default false): Whether or not to return
the precomputed dendrogram traces. (May be useful if one wishes
to add, e.g., group markers to the figure without recalculating
the clustering in the entire figure.)
- computed_traces (dict; optional): The dendrogram traces from another
Clustergram component.
- row_labels (list; optional): List of row category labels
(observation labels)
- column_labels (list; optional): List of column category labels
(observation labels)
- hide_labels (list; optional): List of labels not to display on the
final plot.
- standardize (string; default 'none'): The dimension for standardizing
values, so that the mean is 0 and the standard deviation is 1
along the specified dimension: 'row', 'column', or 'none'.
- cluster (string; default 'all'): The dimension along which the data will
be clustered: 'row', 'column', or 'all'; 'all' means data to be
clustered along columns, then clustered along rows of
row-clustered data.
- row_dist (string; default 'euclidean'): String specifying the
distance metric for rows. It will be passed as the argument
'metric' into the function specified in dist_fun (see
scipy.spatial.distance.pdist).
- col_dist (string; default 'euclidean'): String specifying the
distance metric for columns. It will be passed as the argument
'metric' into the function specified in dist_fun (see
scipy.spatial.distance.pdist).
- dist_fun (function; default scipy.spatial.distance.pdist): Function
to compute the pairwise distance from the observations (see
scipy.spatial.distance.pdist).
- link_fun (function; default scipy.cluster.hierarchy.linkage): Function to
compute the linkage matrix from the pairwise distances (see
scipy.cluster.hierarchy.linkage).
- color_threshold (dict; default {'row': 0, 'col': 0}): Maximum
linkage value for which unique colors are assigned to clusters;
'row' for rows, and 'col' for columns.
- optimal_leaf_order (bool; default false): Enabling/disabling of the
option to determine leaf order that maximizes similarity between
neighboring leaves.
- color_map (list; default [[0.0, 'rgb(255,0,0)'], [0.5,
'rgb(0,0,0)'], [1.0, 'rgb(0,255,0)']]): The colorscale for the
heatmap. Each list element contains two elements; the first
element refers to the portion of the maximum data point under
which a cell will be colored, and the second element refers to the
color. e.g., a colorscale [[0.0, 'white'], [0.5, 'gray'], [1.0,
'black']] means that for all cells with a value in the 50th or
lower percentile of the dataset, the color on the heatmap would be
white; all cells with a value in the 50th or higher percentile,
excluding the 100th percentile, would be gray; and the cell(s) in
the 100th percentile would be colored black.
- color_list (dict; optional): The list of colors to use for different
clusters in the dendrogram that have a root under the threshold for
each dimension. If there are fewer colors than there are clusters
along a specific dimension. The keys are: 'row' (for row clusters),
'col' (for column clusters), and 'bg' (for all traces above the
clustering threshold for both row and column.
- display_range (double; default 3.0): In the heatmap, standardized
values from the dataset that are below the negative of this value
will be colored with one shade, and the values that are above this
value will be colored with another.
- symmetric_value (bool; default true): Whether or not to center the
values of the heatmap about zero.
- log_transform (bool; default false): Whether or not to transforms
the data by taking the base-two logarithm of all values in the
dataset.
- display_ratio (list | number; default 0.2): The dendrograms' heights with
respect to the size of the heatmap; with one element, both the row
and column dendrograms have the same ratio; with two, the row
dendrogram ratio corresponds to the first element of the list and
the column dendrogram ratio corresponds to the second element of
the list.
- imputer_parameters (dict; optional): Specifies the parameters
'missing_values' and 'strategy' of the SimpleImputer class from
scikit-learn 0.20.1 (both of these parameters must be keys in the
dictionary). An additional parameter, 'axis', is used to specify
the direction along which to impute (a parameter of Imputer, which
was deprecated in scikit-learn 0.20.0). 'axis=0' indicates that
imputing should happen along columns, while 'axis=1' indicates
that it should happen along rows (see: https://scikit
-learn.org/stable/modules/generated/sklearn.preprocessing.Imputer.html).
- row_group_marker (list; optional) A list containing the annotations
for row clusters in the dendrogram. Each annotation is a
dictionary with the keys 'group_number' (the cluster number to
highlight), 'annotation' (a string containing the text of the
annotation), and 'color' (a string representation of the color of
the annotation).
- col_group_marker (list; optional): A list containing the annotations for
column clusters in the dendrogram. Each annotation is a dictionary
with the keys 'group_number' (the cluster number to highlight),
'annotation' (a string containing the text of the annotation), and
'color' (a string representation of the color of the
annotation).
- tick_font (dict; optional): The font options for ticks, as specified
in the Plotly graph_objs documentation (see:
https://plot.ly/python/reference/#bar-marker-colorbar-tickfont).
- annotation_font (dict; optional): The font options for annotations,
as specified in the Plotly graph_objs documentation (see:
https://plot.ly/python/reference/#layout-scene-annotations-items-annotation-font).
- line_width (list | number; default 0.5): The line width for the
dendrograms. If in list format, the first element corresponds to
the width of the row dendrogram traces, and the second corresponds
to the width of the column dendrogram traces.
- paper_bg_color (string; default 'rgba(0,0,0,0)`): The background
color of the paper on the graph.
- plot_bg_color (string; default 'rgba(0,0,0,0)'): The background
color of the subplots on the graph.
- height (number; default 500): The height of the graph, in px.
- width (number; default 500): The width of the graph, in px.
"""
if hide_labels is None:
hide_labels = []
if color_threshold is None:
color_threshold = dict(row=0, col=0)
# get rid of arguments that are not used by _Clustergram
kwargs = locals()
kwargs.pop('return_computed_traces')
kwargs.pop('computed_traces')
kwargs.pop('generate_curves_dict')
(fig, ct, curves_dict) = _Clustergram(
**kwargs
).figure(
computed_traces=computed_traces
)
return_values = [go.Figure(fig)]
if generate_curves_dict:
return_values.append(curves_dict)
if return_computed_traces:
return_values.append(ct)
# return only the figure by default
if len(return_values) == 1:
return return_values[0]
# otherwise, return all requested values
return tuple(return_values)
|
6,957 |
def get_app_details_from_stack(skip_frames=0, ignore_files=()):
""" get name of app, filename and calling function from stack.
args:
skip_frames - number of stack frames to skip
ignore_files - file names to ignore while checking stack
returns: [optional] dictionary with results.
"""
import inspect
from frappe.utils import get_bench_path
try:
# skip one frame extra to remove frame representing *this* function call
callstack = inspect.stack()[skip_frames+1:]
# find first file that is not ignored.
for frame in callstack:
# skip ignored files
if any(file in frame.filename for file in ignore_files):
continue
break
if ".py" not in frame.filename:
return
filepath = frame.filename.replace(get_bench_path() + "/apps/", "").split("/")
return frappe._dict(app=filepath[0], filename=filepath[-1], function=frame.function)
except Exception:
pass
|
def get_app_details_from_stack(skip_frames=1, ignore_files=()):
""" get name of app, filename and calling function from stack.
args:
skip_frames - number of stack frames to skip
ignore_files - file names to ignore while checking stack
returns: [optional] dictionary with results.
"""
import inspect
from frappe.utils import get_bench_path
try:
# skip one frame extra to remove frame representing *this* function call
callstack = inspect.stack()[skip_frames+1:]
# find first file that is not ignored.
for frame in callstack:
# skip ignored files
if any(file in frame.filename for file in ignore_files):
continue
break
if ".py" not in frame.filename:
return
filepath = frame.filename.replace(get_bench_path() + "/apps/", "").split("/")
return frappe._dict(app=filepath[0], filename=filepath[-1], function=frame.function)
except Exception:
pass
|
53,283 |
def thermal_speed_coefficients(method: str, ndim: int) -> float:
r"""
Get the appropriate coefficient for calculating the thermal speed :math:`v_{th}`
based on the given ``method`` and ``ndim``. (See the
`~plasmapy.formulary.parameters.thermal_speed` :ref:`Notes <thermal-speed-notes>`
section for further details.)
Parameters
----------
method : `str`
Method to be used for calculating the thermal speed. Valid values are
``"most_probable"``, ``"rms"``, ``"mean_magnitude"``, and ``"nrl"``.
ndim : `int`
Dimensionality (1D, 2D, 3D) of space in which to calculate thermal
speed. Valid values are ``1``, ``2``, or ``3``.
Raises
------
`ValueError`
If ``method`` or ``ndim`` are not a valid value.
Notes
-----
For a detailed explanation of the different coefficients used to calculate
the therml speed, then look to the :ref:`Notes <thermal-speed-notes>` section
for `~plasmapy.formulary.parameters.thermal_speed`. The possible return
values are listed the table
.. table:: Thermal speed :math:`v_{th}` coefficients.
:widths: 2 1 1 1 1
:width: 100%
+--------------+------------+---------------+---------------+---------------+
| ↓ **method** | **ndim** → | ``1`` | ``2`` | ``3`` |
+--------------+------------+---------------+---------------+---------------+
| ``"most_probable"`` | .. math:: | .. math:: | .. math:: |
| | 0 | 1 | \sqrt{2} |
+--------------+------------+---------------+---------------+---------------+
| ``"rms"`` | .. math:: | .. math:: | .. math:: |
| | 1 | \sqrt{2} | \sqrt{3} |
+--------------+------------+---------------+---------------+---------------+
| ``"mean_magnitude"`` | .. math:: | .. math:: | .. math:: |
| | \sqrt{2/π} | \sqrt{π/2} | \sqrt{8/π} |
+--------------+------------+---------------+---------------+---------------+
| ``"nrl"`` | .. math:: |
| | 1 |
+--------------+------------+---------------+---------------+---------------+
Examples
--------
>>> thermal_speed_coefficients(method="most_probable", ndim=3)
1.414213...
"""
_coefficients = {
(1, "most_probable"): 0,
(2, "most_probable"): 1,
(3, "most_probable"): np.sqrt(2),
(1, "rms"): 1,
(2, "rms"): np.sqrt(2),
(3, "rms"): np.sqrt(3),
(1, "mean_magnitude"): np.sqrt(2 / np.pi),
(2, "mean_magnitude"): np.sqrt(np.pi / 2),
(3, "mean_magnitude"): np.sqrt(8 / np.pi),
(1, "nrl"): 1,
(2, "nrl"): 1,
(3, "nrl"): 1,
}
try:
coeff = _coefficients[(ndim, method)]
except KeyError:
raise ValueError(
f"Value for (ndim, method) pair not valid, got '({ndim}, {method})'."
)
return coeff
|
def thermal_speed_coefficients(method: str, ndim: int) -> float:
r"""
Get the appropriate coefficient for calculating the thermal speed :math:`v_{th}`
based on the given ``method`` and ``ndim``.
See the `~plasmapy.formulary.parameters.thermal_speed`
:ref:`Notes <thermal-speed-notes>` section for further details.)
Parameters
----------
method : `str`
Method to be used for calculating the thermal speed. Valid values are
``"most_probable"``, ``"rms"``, ``"mean_magnitude"``, and ``"nrl"``.
ndim : `int`
Dimensionality (1D, 2D, 3D) of space in which to calculate thermal
speed. Valid values are ``1``, ``2``, or ``3``.
Raises
------
`ValueError`
If ``method`` or ``ndim`` are not a valid value.
Notes
-----
For a detailed explanation of the different coefficients used to calculate
the therml speed, then look to the :ref:`Notes <thermal-speed-notes>` section
for `~plasmapy.formulary.parameters.thermal_speed`. The possible return
values are listed the table
.. table:: Thermal speed :math:`v_{th}` coefficients.
:widths: 2 1 1 1 1
:width: 100%
+--------------+------------+---------------+---------------+---------------+
| ↓ **method** | **ndim** → | ``1`` | ``2`` | ``3`` |
+--------------+------------+---------------+---------------+---------------+
| ``"most_probable"`` | .. math:: | .. math:: | .. math:: |
| | 0 | 1 | \sqrt{2} |
+--------------+------------+---------------+---------------+---------------+
| ``"rms"`` | .. math:: | .. math:: | .. math:: |
| | 1 | \sqrt{2} | \sqrt{3} |
+--------------+------------+---------------+---------------+---------------+
| ``"mean_magnitude"`` | .. math:: | .. math:: | .. math:: |
| | \sqrt{2/π} | \sqrt{π/2} | \sqrt{8/π} |
+--------------+------------+---------------+---------------+---------------+
| ``"nrl"`` | .. math:: |
| | 1 |
+--------------+------------+---------------+---------------+---------------+
Examples
--------
>>> thermal_speed_coefficients(method="most_probable", ndim=3)
1.414213...
"""
_coefficients = {
(1, "most_probable"): 0,
(2, "most_probable"): 1,
(3, "most_probable"): np.sqrt(2),
(1, "rms"): 1,
(2, "rms"): np.sqrt(2),
(3, "rms"): np.sqrt(3),
(1, "mean_magnitude"): np.sqrt(2 / np.pi),
(2, "mean_magnitude"): np.sqrt(np.pi / 2),
(3, "mean_magnitude"): np.sqrt(8 / np.pi),
(1, "nrl"): 1,
(2, "nrl"): 1,
(3, "nrl"): 1,
}
try:
coeff = _coefficients[(ndim, method)]
except KeyError:
raise ValueError(
f"Value for (ndim, method) pair not valid, got '({ndim}, {method})'."
)
return coeff
|
40,483 |
def add_random_edge(edge_index, p: float = 0.2, force_undirected: bool = False,
num_nodes: Optional[Union[Tuple[int], int]] = None,
training: bool = True) -> Tuple[Tensor, Tensor]:
r"""Randomly adds edges of ratio :obj:`p` into the existing edges
:obj:`edge_index`.
The method returns (1) the retained :obj:`edge_index`, (2) the added
edge indices.
Args:
edge_index (LongTensor): The edge indices.
p (float, optional): Ratio of added edges to the existing edges.
(default: :obj:`0.2`)
force_undirected (bool, optional): If set to :obj:`True`, will either
drop or keep both edges of an undirected edge.
(default: :obj:`False`)
num_nodes (int, Tuple[int], optional): The overall number of nodes,
*i.e.* :obj:`max_val + 1`, or the number of source and
destination nodes, *i.e.* :obj:`(max_src_val + 1, max_dst_val + 1)`
of :attr:`edge_index`. (default: :obj:`None`)
training (bool, optional): If set to :obj:`False`, this operation is a
no-op. (default: :obj:`True`)
:rtype: (:class:`LongTensor`, :class:`LongTensor`)
Examples:
>>> # Standard case
>>> edge_index = torch.tensor([[0, 1, 1, 2, 2, 3],
... [1, 0, 2, 1, 3, 2]])
>>> edge_index, added_edges = add_random_edge(edge_index, p=0.5)
>>> edge_index
tensor([[0, 1, 1, 2, 2, 3, 2, 1, 3],
[1, 0, 2, 1, 3, 2, 0, 2, 1]])
>>> added_edges
tensor([[2, 1, 3],
[0, 2, 1]])
>>> # The returned graph is kept undirected
>>> edge_index, added_edges = add_random_edge(edge_index, p=0.5,
... force_undirected=True)
>>> edge_index
tensor([[0, 1, 1, 2, 2, 3, 2, 1, 3, 0, 2, 1],
[1, 0, 2, 1, 3, 2, 0, 2, 1, 2, 1, 3]])
>>> added_edges
tensor([[2, 1, 3, 0, 2, 1],
[0, 2, 1, 2, 1, 3]])
>>> # For bipartite graphs
>>> edge_index = torch.tensor([[0, 1, 2, 3, 4, 5],
... [2, 3, 1, 4, 2, 1]])
>>> edge_index, added_edges = add_random_edge(edge_index, p=0.5,
... num_nodes=(6, 5))
>>> edge_index
tensor([[0, 1, 2, 3, 4, 5, 3, 4, 1],
[2, 3, 1, 4, 2, 1, 1, 3, 2]])
>>> added_edges
tensor([[3, 4, 1],
[1, 3, 2]])
"""
if p < 0. or p > 1.:
raise ValueError(f'Ratio of added edges has to be between 0 and 1 '
f'(got {p}')
device = edge_index.device
if not training or p == 0.0:
edge_index_to_add = torch.tensor([], device=device).view(2, 0)
return edge_index, edge_index_to_add
num_nodes = (num_nodes,
num_nodes) if not isinstance(num_nodes, tuple) else num_nodes
num_src_nodes = num_nodes[0] or edge_index[0].max().item() + 1
num_dst_nodes = num_nodes[1] or edge_index[1].max().item() + 1
num_edges_to_add = round(edge_index.size(1) * p)
row = torch.randint(0, num_src_nodes, size=(num_edges_to_add, ))
col = torch.randint(0, num_dst_nodes, size=(num_edges_to_add, ))
if force_undirected:
edge_index_to_add = torch.stack(
[torch.cat([row, col], dim=0),
torch.cat([col, row], dim=0)], dim=0).to(device)
else:
edge_index_to_add = torch.stack([row, col], dim=0).to(device)
edge_index = torch.cat([edge_index, edge_index_to_add], dim=1)
return edge_index, edge_index_to_add
|
def add_random_edge(edge_index, p: float = 0.2, force_undirected: bool = False,
num_nodes: Optional[Union[Tuple[int], int]] = None,
training: bool = True) -> Tuple[Tensor, Tensor]:
r"""Randomly adds edges to :obj:`edge_index`.
The method returns (1) the retained :obj:`edge_index`, (2) the added
edge indices.
Args:
edge_index (LongTensor): The edge indices.
p (float, optional): Ratio of added edges to the existing edges.
(default: :obj:`0.2`)
force_undirected (bool, optional): If set to :obj:`True`, will either
drop or keep both edges of an undirected edge.
(default: :obj:`False`)
num_nodes (int, Tuple[int], optional): The overall number of nodes,
*i.e.* :obj:`max_val + 1`, or the number of source and
destination nodes, *i.e.* :obj:`(max_src_val + 1, max_dst_val + 1)`
of :attr:`edge_index`. (default: :obj:`None`)
training (bool, optional): If set to :obj:`False`, this operation is a
no-op. (default: :obj:`True`)
:rtype: (:class:`LongTensor`, :class:`LongTensor`)
Examples:
>>> # Standard case
>>> edge_index = torch.tensor([[0, 1, 1, 2, 2, 3],
... [1, 0, 2, 1, 3, 2]])
>>> edge_index, added_edges = add_random_edge(edge_index, p=0.5)
>>> edge_index
tensor([[0, 1, 1, 2, 2, 3, 2, 1, 3],
[1, 0, 2, 1, 3, 2, 0, 2, 1]])
>>> added_edges
tensor([[2, 1, 3],
[0, 2, 1]])
>>> # The returned graph is kept undirected
>>> edge_index, added_edges = add_random_edge(edge_index, p=0.5,
... force_undirected=True)
>>> edge_index
tensor([[0, 1, 1, 2, 2, 3, 2, 1, 3, 0, 2, 1],
[1, 0, 2, 1, 3, 2, 0, 2, 1, 2, 1, 3]])
>>> added_edges
tensor([[2, 1, 3, 0, 2, 1],
[0, 2, 1, 2, 1, 3]])
>>> # For bipartite graphs
>>> edge_index = torch.tensor([[0, 1, 2, 3, 4, 5],
... [2, 3, 1, 4, 2, 1]])
>>> edge_index, added_edges = add_random_edge(edge_index, p=0.5,
... num_nodes=(6, 5))
>>> edge_index
tensor([[0, 1, 2, 3, 4, 5, 3, 4, 1],
[2, 3, 1, 4, 2, 1, 1, 3, 2]])
>>> added_edges
tensor([[3, 4, 1],
[1, 3, 2]])
"""
if p < 0. or p > 1.:
raise ValueError(f'Ratio of added edges has to be between 0 and 1 '
f'(got {p}')
device = edge_index.device
if not training or p == 0.0:
edge_index_to_add = torch.tensor([], device=device).view(2, 0)
return edge_index, edge_index_to_add
num_nodes = (num_nodes,
num_nodes) if not isinstance(num_nodes, tuple) else num_nodes
num_src_nodes = num_nodes[0] or edge_index[0].max().item() + 1
num_dst_nodes = num_nodes[1] or edge_index[1].max().item() + 1
num_edges_to_add = round(edge_index.size(1) * p)
row = torch.randint(0, num_src_nodes, size=(num_edges_to_add, ))
col = torch.randint(0, num_dst_nodes, size=(num_edges_to_add, ))
if force_undirected:
edge_index_to_add = torch.stack(
[torch.cat([row, col], dim=0),
torch.cat([col, row], dim=0)], dim=0).to(device)
else:
edge_index_to_add = torch.stack([row, col], dim=0).to(device)
edge_index = torch.cat([edge_index, edge_index_to_add], dim=1)
return edge_index, edge_index_to_add
|
58,483 |
def handler(event, context):
# this lambda expects inputs from an SQS event source mapping
if not event.get("Records"):
raise ValueError("no records passed to event")
# it expects exactly one record where the message body is '{"destination": "<queue_url>"}' that mimics a
# DestinationConfig (which is not possible with SQS event source mappings).
record = event["Records"][0]
message = json.loads(record["body"])
if not message.get("destination"):
raise ValueError("no destination for the event given")
error = None
try:
if message["fail_attempts"] >= int(record["attributes"]["ApproximateReceiveCount"]):
raise ValueError("failed attempt")
except Exception as e:
error = e
raise
finally:
# we then send a message to the destination queue
result = {"error": None if not error else str(error), "event": event}
sqs = create_external_boto_client("sqs")
sqs.send_message(QueueUrl=message.get("destination"), MessageBody=json.dumps(result))
|
def handler(event, context):
# this lambda expects inputs from an SQS event source mapping
if len(event.get("Records", [])) != 1:
raise ValueError("the payload must consist of exactly one record")
# it expects exactly one record where the message body is '{"destination": "<queue_url>"}' that mimics a
# DestinationConfig (which is not possible with SQS event source mappings).
record = event["Records"][0]
message = json.loads(record["body"])
if not message.get("destination"):
raise ValueError("no destination for the event given")
error = None
try:
if message["fail_attempts"] >= int(record["attributes"]["ApproximateReceiveCount"]):
raise ValueError("failed attempt")
except Exception as e:
error = e
raise
finally:
# we then send a message to the destination queue
result = {"error": None if not error else str(error), "event": event}
sqs = create_external_boto_client("sqs")
sqs.send_message(QueueUrl=message.get("destination"), MessageBody=json.dumps(result))
|
31,288 |
def main():
demisto.debug(f'Command being called is {demisto.command()}')
try:
command = demisto.command()
params = demisto.params()
args = demisto.args()
client = Client(
app_id=params.get('app_id', ''),
verify=not params.get('insecure', False),
proxy=params.get('proxy', False),
)
if command == 'test-module':
# This is the call made when pressing the integration Test button.
return_results('The test module is not functional, run the msgraph-apps--auth-start command instead.')
elif command == 'msgraph-apps-auth-start':
return_results(start_auth(client))
elif command == 'msgraph-apps-auth-complete':
return_results(complete_auth(client))
elif command == 'msgraph-apps-auth-test':
return_results(test_connection(client))
elif command == 'msgraph-apps-auth-reset':
return_results(test_connection(client))
elif command == 'msgraph-apps-list-service-principal':
return_results(list_service_principals_command(client, args))
elif command == 'msgraph-apps-remove-service-principal':
return_results(remove_service_principals_command(client, args))
else:
raise NotImplementedError(f"Command '{command}' not found.")
# Log exceptions and return errors
except Exception as e:
demisto.error(traceback.format_exc()) # print the traceback
return_error(f'Failed to execute {demisto.command()} command.\nError:\n{str(e)}')
|
def main():
demisto.debug(f'Command being called is {demisto.command()}')
try:
command = demisto.command()
params = demisto.params()
args = demisto.args()
client = Client(
app_id=params.get('app_id', ''),
verify=not params.get('insecure', False),
proxy=params.get('proxy', False),
)
if command == 'test-module':
# This is the call made when pressing the integration Test button.
return_results('The test module is not functional, run the msgraph-appsauth-start command instead.')
elif command == 'msgraph-apps-auth-start':
return_results(start_auth(client))
elif command == 'msgraph-apps-auth-complete':
return_results(complete_auth(client))
elif command == 'msgraph-apps-auth-test':
return_results(test_connection(client))
elif command == 'msgraph-apps-auth-reset':
return_results(test_connection(client))
elif command == 'msgraph-apps-list-service-principal':
return_results(list_service_principals_command(client, args))
elif command == 'msgraph-apps-remove-service-principal':
return_results(remove_service_principals_command(client, args))
else:
raise NotImplementedError(f"Command '{command}' not found.")
# Log exceptions and return errors
except Exception as e:
demisto.error(traceback.format_exc()) # print the traceback
return_error(f'Failed to execute {demisto.command()} command.\nError:\n{str(e)}')
|
22,696 |
def safe_open(path, mode="w", chmod=None):
"""Safely open a file.
:param str path: Path to a file.
:param str mode: Same os `mode` for `open`.
:param int chmod: Same as `mode` for `security.open`, uses Python defaults
if ``None``.
"""
open_args = () # type: Union[Tuple[()], Tuple[int]]
if chmod is not None:
open_args = (chmod,)
fdopen_args = () # type: Union[Tuple[()], Tuple[int]]
fd = filesystem.open(path, os.O_CREAT | os.O_EXCL | os.O_RDWR, *open_args)
return os.fdopen(fd, mode, *fdopen_args)
|
def safe_open(path, mode="w", chmod=None):
"""Safely open a file.
:param str path: Path to a file.
:param str mode: Same os `mode` for `open`.
:param int chmod: Same as `mode` for `filesystem.open`, uses Python defaults
if ``None``.
"""
open_args = () # type: Union[Tuple[()], Tuple[int]]
if chmod is not None:
open_args = (chmod,)
fdopen_args = () # type: Union[Tuple[()], Tuple[int]]
fd = filesystem.open(path, os.O_CREAT | os.O_EXCL | os.O_RDWR, *open_args)
return os.fdopen(fd, mode, *fdopen_args)
|
7,329 |
def blob_doh(image, min_sigma=1, max_sigma=30, num_sigma=10, threshold=0.01,
overlap=.5, log_scale=False, *, threshold_rel=None):
"""Finds blobs in the given grayscale image.
Blobs are found using the Determinant of Hessian method [1]_. For each blob
found, the method returns its coordinates and the standard deviation
of the Gaussian Kernel used for the Hessian matrix whose determinant
detected the blob. Determinant of Hessians is approximated using [2]_.
Parameters
----------
image : 2D ndarray
Input grayscale image.Blobs can either be light on dark or vice versa.
min_sigma : float, optional
The minimum standard deviation for Gaussian Kernel used to compute
Hessian matrix. Keep this low to detect smaller blobs.
max_sigma : float, optional
The maximum standard deviation for Gaussian Kernel used to compute
Hessian matrix. Keep this high to detect larger blobs.
num_sigma : int, optional
The number of intermediate values of standard deviations to consider
between `min_sigma` and `max_sigma`.
threshold : float or None, optional
The absolute lower bound for scale space maxima. Local maxima smaller
than `threshold` are ignored. Reduce this to detect blobs with lower
intensities. If `threshold_rel` is also specified, whichever threshold
is larger will be used. If None, `threshold_rel` is used instead.
overlap : float, optional
A value between 0 and 1. If the area of two blobs overlaps by a
fraction greater than `threshold`, the smaller blob is eliminated.
log_scale : bool, optional
If set intermediate values of standard deviations are interpolated
using a logarithmic scale to the base `10`. If not, linear
interpolation is used.
threshold_rel : float or None, optional
Minimum intensity of peaks, calculated as
``max(dog_space) * threshold_rel``. Where ``dog_space`` refers to the
stack of determinant-of-hessian (DoH) images computed internally. This
should have a value between 0 and 1. If None, `threshold_abs` is used
instead.
Returns
-------
A : (n, 3) ndarray
A 2d array with each row representing 3 values, ``(y,x,sigma)``
where ``(y,x)`` are coordinates of the blob and ``sigma`` is the
standard deviation of the Gaussian kernel of the Hessian Matrix whose
determinant detected the blob.
References
----------
.. [1] https://en.wikipedia.org/wiki/Blob_detection#The_determinant_of_the_Hessian
.. [2] Herbert Bay, Andreas Ess, Tinne Tuytelaars, Luc Van Gool,
"SURF: Speeded Up Robust Features"
ftp://ftp.vision.ee.ethz.ch/publications/articles/eth_biwi_00517.pdf
Examples
--------
>>> from skimage import data, feature
>>> img = data.coins()
>>> feature.blob_doh(img)
array([[197. , 153. , 20.33333333],
[124. , 336. , 20.33333333],
[126. , 153. , 20.33333333],
[195. , 100. , 23.55555556],
[192. , 212. , 23.55555556],
[121. , 271. , 30. ],
[126. , 101. , 20.33333333],
[193. , 275. , 23.55555556],
[123. , 205. , 20.33333333],
[270. , 363. , 30. ],
[265. , 113. , 23.55555556],
[262. , 243. , 23.55555556],
[185. , 348. , 30. ],
[156. , 302. , 30. ],
[123. , 44. , 23.55555556],
[260. , 173. , 30. ],
[197. , 44. , 20.33333333]])
Notes
-----
The radius of each blob is approximately `sigma`.
Computation of Determinant of Hessians is independent of the standard
deviation. Therefore detecting larger blobs won't take more time. In
methods line :py:meth:`blob_dog` and :py:meth:`blob_log` the computation
of Gaussians for larger `sigma` takes more time. The downside is that
this method can't be used for detecting blobs of radius less than `3px`
due to the box filters used in the approximation of Hessian Determinant.
"""
check_nD(image, 2)
image = img_as_float(image)
float_dtype = _supported_float_type(image.dtype)
image = image.astype(float_dtype, copy=False)
image = integral_image(image)
if log_scale:
start, stop = math.log(min_sigma, 10), math.log(max_sigma, 10)
sigma_list = np.logspace(start, stop, num_sigma)
else:
sigma_list = np.linspace(min_sigma, max_sigma, num_sigma)
hessian_images = [_hessian_matrix_det(image, s) for s in sigma_list]
image_cube = np.dstack(hessian_images)
local_maxima = peak_local_max(image_cube,
threshold_abs=threshold,
threshold_rel=threshold_rel,
exclude_border=False,
footprint=np.ones((3,) * image_cube.ndim))
# Catch no peaks
if local_maxima.size == 0:
return np.empty((0, 3))
# Convert local_maxima to float64
lm = local_maxima.astype(np.float64)
# Convert the last index to its corresponding scale value
lm[:, -1] = sigma_list[local_maxima[:, -1]]
return _prune_blobs(lm, overlap)
|
def blob_doh(image, min_sigma=1, max_sigma=30, num_sigma=10, threshold=0.01,
overlap=.5, log_scale=False, *, threshold_rel=None):
"""Finds blobs in the given grayscale image.
Blobs are found using the Determinant of Hessian method [1]_. For each blob
found, the method returns its coordinates and the standard deviation
of the Gaussian Kernel used for the Hessian matrix whose determinant
detected the blob. Determinant of Hessians is approximated using [2]_.
Parameters
----------
image : 2D ndarray
Input grayscale image.Blobs can either be light on dark or vice versa.
min_sigma : float, optional
The minimum standard deviation for Gaussian Kernel used to compute
Hessian matrix. Keep this low to detect smaller blobs.
max_sigma : float, optional
The maximum standard deviation for Gaussian Kernel used to compute
Hessian matrix. Keep this high to detect larger blobs.
num_sigma : int, optional
The number of intermediate values of standard deviations to consider
between `min_sigma` and `max_sigma`.
threshold : float or None, optional
The absolute lower bound for scale space maxima. Local maxima smaller
than `threshold` are ignored. Reduce this to detect blobs with lower
intensities. If `threshold_rel` is also specified, whichever threshold
is larger will be used. If None, `threshold_rel` is used instead.
overlap : float, optional
A value between 0 and 1. If the area of two blobs overlaps by a
fraction greater than `threshold`, the smaller blob is eliminated.
log_scale : bool, optional
If set intermediate values of standard deviations are interpolated
using a logarithmic scale to the base `10`. If not, linear
interpolation is used.
threshold_rel : float or None, optional
Minimum intensity of peaks, calculated as
``max(dog_space) * threshold_rel``, where ``dog_space`` refers to the
stack of determinant-of-hessian (DoH) images computed internally. This
should have a value between 0 and 1. If None, `threshold_abs` is used
instead.
Returns
-------
A : (n, 3) ndarray
A 2d array with each row representing 3 values, ``(y,x,sigma)``
where ``(y,x)`` are coordinates of the blob and ``sigma`` is the
standard deviation of the Gaussian kernel of the Hessian Matrix whose
determinant detected the blob.
References
----------
.. [1] https://en.wikipedia.org/wiki/Blob_detection#The_determinant_of_the_Hessian
.. [2] Herbert Bay, Andreas Ess, Tinne Tuytelaars, Luc Van Gool,
"SURF: Speeded Up Robust Features"
ftp://ftp.vision.ee.ethz.ch/publications/articles/eth_biwi_00517.pdf
Examples
--------
>>> from skimage import data, feature
>>> img = data.coins()
>>> feature.blob_doh(img)
array([[197. , 153. , 20.33333333],
[124. , 336. , 20.33333333],
[126. , 153. , 20.33333333],
[195. , 100. , 23.55555556],
[192. , 212. , 23.55555556],
[121. , 271. , 30. ],
[126. , 101. , 20.33333333],
[193. , 275. , 23.55555556],
[123. , 205. , 20.33333333],
[270. , 363. , 30. ],
[265. , 113. , 23.55555556],
[262. , 243. , 23.55555556],
[185. , 348. , 30. ],
[156. , 302. , 30. ],
[123. , 44. , 23.55555556],
[260. , 173. , 30. ],
[197. , 44. , 20.33333333]])
Notes
-----
The radius of each blob is approximately `sigma`.
Computation of Determinant of Hessians is independent of the standard
deviation. Therefore detecting larger blobs won't take more time. In
methods line :py:meth:`blob_dog` and :py:meth:`blob_log` the computation
of Gaussians for larger `sigma` takes more time. The downside is that
this method can't be used for detecting blobs of radius less than `3px`
due to the box filters used in the approximation of Hessian Determinant.
"""
check_nD(image, 2)
image = img_as_float(image)
float_dtype = _supported_float_type(image.dtype)
image = image.astype(float_dtype, copy=False)
image = integral_image(image)
if log_scale:
start, stop = math.log(min_sigma, 10), math.log(max_sigma, 10)
sigma_list = np.logspace(start, stop, num_sigma)
else:
sigma_list = np.linspace(min_sigma, max_sigma, num_sigma)
hessian_images = [_hessian_matrix_det(image, s) for s in sigma_list]
image_cube = np.dstack(hessian_images)
local_maxima = peak_local_max(image_cube,
threshold_abs=threshold,
threshold_rel=threshold_rel,
exclude_border=False,
footprint=np.ones((3,) * image_cube.ndim))
# Catch no peaks
if local_maxima.size == 0:
return np.empty((0, 3))
# Convert local_maxima to float64
lm = local_maxima.astype(np.float64)
# Convert the last index to its corresponding scale value
lm[:, -1] = sigma_list[local_maxima[:, -1]]
return _prune_blobs(lm, overlap)
|
58,071 |
def get_tags_command(client: Client, args=Dict[str, Any]) -> List[CommandResults]:
"""
get_tags commands: Returns paginated list of tags
"""
page = int(args.get("page", 1))
page_size = int(args.get("page_size", 10))
query = args.get("q")
response = client.get_tags(page, page_size, query)
tags_list = response.get("data", {}).get("results", [])
results = []
for tag in tags_list:
results.append(
CommandResults(
readable_output=tableToMarkdown("Tag Data", tag, removeNull=True),
outputs_prefix="CTIX.Tag",
outputs_key_field="name",
outputs=tag,
)
)
return results
|
def get_tags_command(client: Client, args=Dict[str, Any]) -> List[CommandResults]:
"""
get_tags commands: Returns paginated list of tags
"""
page = arg_to_number(args.get("page", 1))
page_size = arg_to_number(args.get("page_size", 10))
query = args.get("q")
response = client.get_tags(page, page_size, query)
tags_list = response.get("data", {}).get("results", [])
results = []
for tag in tags_list:
results.append(
CommandResults(
readable_output=tableToMarkdown("Tag Data", tag, removeNull=True),
outputs_prefix="CTIX.Tag",
outputs_key_field="name",
outputs=tag,
)
)
return results
|
44,247 |
def fidelity(qnode0, qnode1, wires0, wires1):
r"""Compute the fidelity for two :class:`.QNode` returning a :func:`~.state` (a state can be a state vector
or a density matrix, depending on the device) acting on quantum systems with the same size.
The fidelity for two mixed states given by density matrices :math:`\rho` and :math:`\sigma`
is defined as
.. math::
F( \rho , \sigma ) = \text{Tr}( \sqrt{\sqrt{\rho} \sigma \sqrt{\rho}})^2
If one of the states is pure, say :math:`\rho=\ket{\psi}\bra{\psi}`, then the expression
for fidelity simplifies to
.. math::
F( \ket{\psi} , \sigma ) = \bra{\psi} \sigma \ket{\psi}
Finally, if both states are pure, :math:`\sigma=\ket{\phi}\bra{\phi}`, then the
fidelity is simply
.. math::
F( \ket{\psi} , \ket{\phi}) = \left|\braket{\psi, \phi}\right|^2
.. note::
The second state is coerced to the type and dtype of the first state. The fidelity is returned in the type
of the interface of the first state.
Args:
state0 (QNode): A :class:`.QNode` returning a :func:`~.state`.
state1 (QNode): A :class:`.QNode` returning a :func:`~.state`.
wires0 (Sequence[int]): the wires of the first subsystem
wires1 (Sequence[int]): the wires of the second subsystem
Returns:
func: A function that returns the fidelity between the states outputted by the QNodes.
**Example**
First, let's consider two QNodes with potentially different signatures: a circuit with two parameters
and another circuit with a single parameter. The output of the `qml.qinfo.fidelity` transform then requires
two tuples to be passed as arguments, each containing the args and kwargs of their respective circuit, e.g.
`all_args0 = (0.1, 0.3)` and `all_args1 = (0.2)` in the following case:
.. code-block:: python
dev = qml.device('default.qubit', wires=1)
@qml.qnode(dev)
def circuit_rx(x, y):
qml.RX(x, wires=0)
qml.RZ(y, wires=0)
return qml.state()
@qml.qnode(dev)
def circuit_ry(y):
qml.RY(y, wires=0)
return qml.state()
>>> qml.qinfo.fidelity(circuit_rx, circuit_ry, wires0=[0], wires1=[0])((0.1, 0.3), (0.2))
0.9905158135644924
It is also possible to use QNodes that do not depend on any parameters. When it is the case for the first QNode, you
need to pass an empty tuple as an argument for the first QNode.
.. code-block:: python
dev = qml.device('default.qubit', wires=1)
@qml.qnode(dev)
def circuit_rx():
return qml.state()
@qml.qnode(dev)
def circuit_ry(x):
qml.RY(x, wires=0)
return qml.state()
>>> qml.qinfo.fidelity(circuit_rx, circuit_ry, wires0=[0], wires1=[0])((), (0.2))
0.9900332889206207
On the other hand, if the second QNode is the one that does not depend on parameters then a single tuple can also be
passed:
>>> qml.qinfo.fidelity(circuit_ry, circuit_rx, wires0=[0], wires1=[0])((0.2))
0.9900332889206207
The `qml.qinfo.fidelity` transform is also differentiable and you can use the gradient in the different frameworks
with backpropagation, the following example uses `jax` and `backprop`.
.. code-block:: python
dev = qml.device("default.qubit", wires=1)
@qml.qnode(dev, interface="jax")
def circuit0(x):
qml.RX(x, wires=0)
return qml.state()
@qml.qnode(dev, interface="jax")
def circuit1():
qml.PauliZ(wires=0)
return qml.state()
>>> jax.grad(qml.qinfo.fidelity(circuit0, circuit1, wires0=[0], wires1=[0]))((jax.numpy.array(0.3)))
-0.14776011
There is also the possibility to pass a single dictionnary at the end of the tuple for fixing args,
you can follow this example:
.. code-block:: python
dev = qml.device('default.qubit', wires=1)
@qml.qnode(dev)
def circuit_rx(x, y):
qml.RX(x, wires=0)
qml.RZ(y, wires=0)
return qml.state()
@qml.qnode(dev)
def circuit_ry(y, use_ry):
if use_ry:
qml.RY(y, wires=0)
return qml.state()
>>> fidelity(circuit_rx, circuit_ry, wires0=[0], wires1=[0])((0.1, 0.3), (0.9, {'use_ry': True})))
0.8208074192135424
"""
if len(wires0) != len(wires1):
raise qml.QuantumFunctionError("The two states must have the same number of wires.")
# Get the state vector if all wires are selected
if len(wires0) == len(qnode0.device.wires):
state_qnode0 = qnode0
else:
state_qnode0 = qml.qinfo.reduced_dm(qnode0, wires=wires0)
# Get the state vector if all wires are selected
if len(wires1) == len(qnode1.device.wires):
state_qnode1 = qnode1
else:
state_qnode1 = qml.qinfo.reduced_dm(qnode1, wires=wires1)
def evaluate_fidelity(all_args0=None, all_args1=None):
"""Wrapper used for evaluation of the fidelity between two states computed from QNodes. It allows giving
the args and kwargs to each :class:`.QNode`.
Args:
all_args0 (tuple): Tuple containing the arguments (*args, **kwargs) of the first :class:`.QNode`.
all_args1 (tuple): Tuple containing the arguments (*args, **kwargs) of the second :class:`.QNode`.
Returns:
float: Fidelity between two quantum states
"""
if not isinstance(all_args0, tuple) and all_args0 is not None:
all_args0 = (all_args0,)
if not isinstance(all_args1, tuple) and all_args1 is not None:
all_args1 = (all_args1,)
# If no all_args is given, evaluate the QNode without args
if all_args0 is not None:
# Handle a dictionary as last argument
if isinstance(all_args0[-1], dict):
args0 = all_args0[:-1]
kwargs0 = all_args0[-1]
else:
args0 = all_args0
kwargs0 = {}
state0 = state_qnode0(*args0, **kwargs0)
else:
# No args
state0 = state_qnode0()
# If no all_args is given, evaluate the QNode without args
if all_args1 is not None:
# Handle a dictionary as last argument
if isinstance(all_args1[-1], dict):
args1 = all_args1[:-1]
kwargs1 = all_args1[-1]
else:
args1 = all_args1
kwargs1 = {}
state1 = state_qnode1(*args1, **kwargs1)
else:
# No args
state1 = state_qnode1()
# From the two generated states, compute the fidelity.
fid = qml.math.fidelity(state0, state1)
return fid
return evaluate_fidelity
|
def fidelity(qnode0, qnode1, wires0, wires1):
r"""Compute the fidelity for two :class:`.QNode` returning a :func:`~.state` (a state can be a state vector
or a density matrix, depending on the device) acting on quantum systems with the same size.
The fidelity for two mixed states given by density matrices :math:`\rho` and :math:`\sigma`
is defined as
.. math::
F( \rho , \sigma ) = \text{Tr}( \sqrt{\sqrt{\rho} \sigma \sqrt{\rho}})^2
If one of the states is pure, say :math:`\rho=\ket{\psi}\bra{\psi}`, then the expression
for fidelity simplifies to
.. math::
F( \ket{\psi} , \sigma ) = \bra{\psi} \sigma \ket{\psi}
Finally, if both states are pure, :math:`\sigma=\ket{\phi}\bra{\phi}`, then the
fidelity is simply
.. math::
F( \ket{\psi} , \ket{\phi}) = \left|\braket{\psi, \phi}\right|^2
.. note::
The second state is coerced to the type and dtype of the first state. The fidelity is returned in the type
of the interface of the first state.
Args:
state0 (QNode): A :class:`.QNode` returning a :func:`~.state`.
state1 (QNode): A :class:`.QNode` returning a :func:`~.state`.
wires0 (Sequence[int]): the wires of the first subsystem
wires1 (Sequence[int]): the wires of the second subsystem
Returns:
func: A function that returns the fidelity between the states outputted by the QNodes.
**Example**
First, let's consider two QNodes with potentially different signatures: a circuit with two parameters
and another circuit with a single parameter. The output of the `qml.qinfo.fidelity` transform then requires
two tuples to be passed as arguments, each containing the args and kwargs of their respective circuit, e.g.
`all_args0 = (0.1, 0.3)` and `all_args1 = (0.2)` in the following case:
.. code-block:: python
dev = qml.device('default.qubit', wires=1)
@qml.qnode(dev)
def circuit_rx(x, y):
qml.RX(x, wires=0)
qml.RZ(y, wires=0)
return qml.state()
@qml.qnode(dev)
def circuit_ry(y):
qml.RY(y, wires=0)
return qml.state()
>>> qml.qinfo.fidelity(circuit_rx, circuit_ry, wires0=[0], wires1=[0])((0.1, 0.3), (0.2))
0.9905158135644924
It is also possible to use QNodes that do not depend on any parameters. When it is the case for the first QNode, you
need to pass an empty tuple as an argument for the first QNode.
.. code-block:: python
dev = qml.device('default.qubit', wires=1)
@qml.qnode(dev)
def circuit_rx():
return qml.state()
@qml.qnode(dev)
def circuit_ry(x):
qml.RY(x, wires=0)
return qml.state()
>>> qml.qinfo.fidelity(circuit_rx, circuit_ry, wires0=[0], wires1=[0])((), (0.2))
0.9900332889206207
On the other hand, if the second QNode is the one that does not depend on parameters then a single tuple can also be
passed:
>>> qml.qinfo.fidelity(circuit_ry, circuit_rx, wires0=[0], wires1=[0])((0.2))
0.9900332889206207
The `qml.qinfo.fidelity` transform is also differentiable and you can use the gradient in the different frameworks
with backpropagation, the following example uses `jax` and `backprop`.
.. code-block:: python
dev = qml.device("default.qubit", wires=1)
@qml.qnode(dev, interface="jax")
def circuit0(x):
qml.RX(x, wires=0)
return qml.state()
@qml.qnode(dev, interface="jax")
def circuit1():
qml.PauliZ(wires=0)
return qml.state()
>>> jax.grad(qml.qinfo.fidelity(circuit0, circuit1, wires0=[0], wires1=[0]))((jax.numpy.array(0.3)))
-0.14776011
There is also the possibility to pass a single dictionnary at the end of the tuple for fixing args,
you can follow this example:
.. code-block:: python
dev = qml.device('default.qubit', wires=1)
@qml.qnode(dev)
def circuit_rx(x, y):
qml.RX(x, wires=0)
qml.RZ(y, wires=0)
return qml.state()
@qml.qnode(dev)
def circuit_ry(y, use_ry):
if use_ry:
qml.RY(y, wires=0)
return qml.state()
>>> fidelity(circuit_rx, circuit_ry, wires0=[0], wires1=[0])((0.1, 0.3), (0.9, {'use_ry': True}))
0.8208074192135424
"""
if len(wires0) != len(wires1):
raise qml.QuantumFunctionError("The two states must have the same number of wires.")
# Get the state vector if all wires are selected
if len(wires0) == len(qnode0.device.wires):
state_qnode0 = qnode0
else:
state_qnode0 = qml.qinfo.reduced_dm(qnode0, wires=wires0)
# Get the state vector if all wires are selected
if len(wires1) == len(qnode1.device.wires):
state_qnode1 = qnode1
else:
state_qnode1 = qml.qinfo.reduced_dm(qnode1, wires=wires1)
def evaluate_fidelity(all_args0=None, all_args1=None):
"""Wrapper used for evaluation of the fidelity between two states computed from QNodes. It allows giving
the args and kwargs to each :class:`.QNode`.
Args:
all_args0 (tuple): Tuple containing the arguments (*args, **kwargs) of the first :class:`.QNode`.
all_args1 (tuple): Tuple containing the arguments (*args, **kwargs) of the second :class:`.QNode`.
Returns:
float: Fidelity between two quantum states
"""
if not isinstance(all_args0, tuple) and all_args0 is not None:
all_args0 = (all_args0,)
if not isinstance(all_args1, tuple) and all_args1 is not None:
all_args1 = (all_args1,)
# If no all_args is given, evaluate the QNode without args
if all_args0 is not None:
# Handle a dictionary as last argument
if isinstance(all_args0[-1], dict):
args0 = all_args0[:-1]
kwargs0 = all_args0[-1]
else:
args0 = all_args0
kwargs0 = {}
state0 = state_qnode0(*args0, **kwargs0)
else:
# No args
state0 = state_qnode0()
# If no all_args is given, evaluate the QNode without args
if all_args1 is not None:
# Handle a dictionary as last argument
if isinstance(all_args1[-1], dict):
args1 = all_args1[:-1]
kwargs1 = all_args1[-1]
else:
args1 = all_args1
kwargs1 = {}
state1 = state_qnode1(*args1, **kwargs1)
else:
# No args
state1 = state_qnode1()
# From the two generated states, compute the fidelity.
fid = qml.math.fidelity(state0, state1)
return fid
return evaluate_fidelity
|
32,893 |
def patch():
if getattr(aioredis, "_datadog_patch", False):
return
setattr(aioredis, "_datadog_patch", True)
_w("aioredis.client", "Redis.execute_command", traced_execute_command)
_w("aioredis.client", "Redis.pipeline", traced_pipeline)
_w("aioredis.client", "Pipeline.execute", traced_execute_pipeline)
pin = Pin(app="aioredis")
pin.onto(aioredis.client)
|
def patch():
if getattr(aioredis, "_datadog_patch", False):
return
setattr(aioredis, "_datadog_patch", True)
_w("aioredis.client", "Redis.execute_command", traced_execute_command)
_w("aioredis.client", "Redis.pipeline", traced_pipeline)
_w("aioredis.client", "Pipeline.execute", traced_execute_pipeline)
pin = Pin()
pin.onto(aioredis.client)
|
43,696 |
def bit_driver(wires, n):
r"""Returns the bit-driver cost Hamiltonian component.
This Hamiltonian is defined as:
.. math:: H \ = \ (-1)^{n + 1} \displaystyle\sum_{i} Z_i
where :math:`Z_i` is the Pauli-Z operator acting on the
:math:`i`-th wire and :math:`n \ \in \ \{0, \ 1\}`. This Hamiltonian is often used as a term when
constructing larger QAOA cost Hamiltonians.
Args:
wires (Iterable or Wires): The wires on which the returned Hamiltonian acts
n (int): Either :math:`0` or :math:`1`. Determines whether the Hamiltonian assigns
lower energies to bitstrings with more :math:`0`s or :math:`1`s, respectively.
Returns:
.Hamiltonian
**Example**
>>> wires = range(3)
>>> hamiltonian = qaoa.pauli_driver(wires, 1)
>>> print(hamiltonian)
(1.0) [Z0] + (1.0) [Z1] + (1.0) [Z2]
"""
if n == 0:
coeffs = [-1 for _ in wires]
elif n == 1:
coeffs = [1 for _ in wires]
else:
raise ValueError("'state' argument must be either 0 or 1, got {}".format(n))
ops = [qml.PauliZ(w) for w in wires]
return qml.Hamiltonian(coeffs, ops)
|
def bit_driver(wires, n):
r"""Returns the bit-driver cost Hamiltonian.
This Hamiltonian is defined as:
.. math:: H \ = \ (-1)^{n + 1} \displaystyle\sum_{i} Z_i
where :math:`Z_i` is the Pauli-Z operator acting on the
:math:`i`-th wire and :math:`n \ \in \ \{0, \ 1\}`. This Hamiltonian is often used as a term when
constructing larger QAOA cost Hamiltonians.
Args:
wires (Iterable or Wires): The wires on which the returned Hamiltonian acts
n (int): Either :math:`0` or :math:`1`. Determines whether the Hamiltonian assigns
lower energies to bitstrings with more :math:`0`s or :math:`1`s, respectively.
Returns:
.Hamiltonian
**Example**
>>> wires = range(3)
>>> hamiltonian = qaoa.pauli_driver(wires, 1)
>>> print(hamiltonian)
(1.0) [Z0] + (1.0) [Z1] + (1.0) [Z2]
"""
if n == 0:
coeffs = [-1 for _ in wires]
elif n == 1:
coeffs = [1 for _ in wires]
else:
raise ValueError("'state' argument must be either 0 or 1, got {}".format(n))
ops = [qml.PauliZ(w) for w in wires]
return qml.Hamiltonian(coeffs, ops)
|
41,997 |
def _create_zmap(
x_values: np.ndarray,
y_values: np.ndarray,
z_values: List[Union[int, float]],
xi: np.ndarray,
yi: np.ndarray,
) -> Dict[complex, Union[int, float]]:
# creates z-map from trial values and params.
# since params were resampled either with linspace or logspace
# original params might not be on the x and y axes anymore
# so we are going with close approximations of trial value positions
zmap = dict()
for x, y, z in zip(x_values, y_values, z_values):
xindex = np.argmin(np.abs(xi - x))
yindex = np.argmin(np.abs(yi - y))
zmap[complex(xindex, yindex)] = z # type: ignore
return zmap
|
def _create_zmap(
x_values: np.ndarray,
y_values: np.ndarray,
z_values: List[float],
xi: np.ndarray,
yi: np.ndarray,
) -> Dict[complex, Union[int, float]]:
# creates z-map from trial values and params.
# since params were resampled either with linspace or logspace
# original params might not be on the x and y axes anymore
# so we are going with close approximations of trial value positions
zmap = dict()
for x, y, z in zip(x_values, y_values, z_values):
xindex = np.argmin(np.abs(xi - x))
yindex = np.argmin(np.abs(yi - y))
zmap[complex(xindex, yindex)] = z # type: ignore
return zmap
|
48,997 |
def test_dag_import() -> None:
"""Test that the DAG file can be successfully imported.
This tests that the DAG can be parsed, but does not run it in an Airflow
environment. This is a recommended confidence check by the official Airflow
docs: https://airflow.incubator.apache.org/tutorial.html#testing
"""
import retries as module
internal_unit_testing.assert_has_valid_dag(module)
|
def test_dag_import() -> None:
"""Test that the DAG file can be successfully imported.
This tests that the DAG can be parsed, but does not run it in an Airflow
environment. This is a recommended confidence check by the official Airflow
docs: https://airflow.incubator.apache.org/tutorial.html#testing
"""
import retries
internal_unit_testing.assert_has_valid_dag(retries)
|
25,931 |
def _prepare_deployment_properties_unmodified(cmd, template_file=None, template_uri=None, parameters=None,
mode=None, rollback_on_error=None, no_prompt=False, template_spec=None, query_string=None):
cli_ctx = cmd.cli_ctx
DeploymentProperties, TemplateLink, OnErrorDeployment = get_sdk(cli_ctx, ResourceType.MGMT_RESOURCE_RESOURCES,
'DeploymentProperties', 'TemplateLink',
'OnErrorDeployment', mod='models')
template_link = None
template_obj = None
on_error_deployment = None
template_content = None
if query_string and not template_uri:
raise IncorrectUsageError('must provide --template-uri if --query-string is specified')
if template_uri:
if query_string:
template_link = TemplateLink(uri=template_uri, query_string=query_string)
template_uri = _prepare_template_uri_with_query_string(template_uri=template_uri, input_query_string=query_string)
else:
template_link = TemplateLink(uri=template_uri)
template_obj = _remove_comments_from_json(_urlretrieve(template_uri).decode('utf-8'), file_path=template_uri)
elif template_spec:
template_link = TemplateLink(id=template_spec, mode="Incremental")
template_obj = show_resource(cmd=cmd, resource_ids=[template_spec]).properties['template']
else:
template_content = read_file_content(template_file)
template_obj = _remove_comments_from_json(template_content, file_path=template_file)
if rollback_on_error == '':
on_error_deployment = OnErrorDeployment(type='LastSuccessful')
elif rollback_on_error:
on_error_deployment = OnErrorDeployment(type='SpecificDeployment', deployment_name=rollback_on_error)
template_param_defs = template_obj.get('parameters', {})
template_obj['resources'] = template_obj.get('resources', [])
parameters = _process_parameters(template_param_defs, parameters) or {}
parameters = _get_missing_parameters(parameters, template_obj, _prompt_for_parameters, no_prompt)
parameters = json.loads(json.dumps(parameters))
properties = DeploymentProperties(template=template_content, template_link=template_link,
parameters=parameters, mode=mode, on_error_deployment=on_error_deployment)
return properties
|
def _prepare_deployment_properties_unmodified(cmd, template_file=None, template_uri=None, parameters=None,
mode=None, rollback_on_error=None, no_prompt=False, template_spec=None, query_string=None):
cli_ctx = cmd.cli_ctx
DeploymentProperties, TemplateLink, OnErrorDeployment = get_sdk(cli_ctx, ResourceType.MGMT_RESOURCE_RESOURCES,
'DeploymentProperties', 'TemplateLink',
'OnErrorDeployment', mod='models')
template_link = None
template_obj = None
on_error_deployment = None
template_content = None
if query_string and not template_uri:
raise IncorrectUsageError('please provide --template-uri if --query-string is specified')
if template_uri:
if query_string:
template_link = TemplateLink(uri=template_uri, query_string=query_string)
template_uri = _prepare_template_uri_with_query_string(template_uri=template_uri, input_query_string=query_string)
else:
template_link = TemplateLink(uri=template_uri)
template_obj = _remove_comments_from_json(_urlretrieve(template_uri).decode('utf-8'), file_path=template_uri)
elif template_spec:
template_link = TemplateLink(id=template_spec, mode="Incremental")
template_obj = show_resource(cmd=cmd, resource_ids=[template_spec]).properties['template']
else:
template_content = read_file_content(template_file)
template_obj = _remove_comments_from_json(template_content, file_path=template_file)
if rollback_on_error == '':
on_error_deployment = OnErrorDeployment(type='LastSuccessful')
elif rollback_on_error:
on_error_deployment = OnErrorDeployment(type='SpecificDeployment', deployment_name=rollback_on_error)
template_param_defs = template_obj.get('parameters', {})
template_obj['resources'] = template_obj.get('resources', [])
parameters = _process_parameters(template_param_defs, parameters) or {}
parameters = _get_missing_parameters(parameters, template_obj, _prompt_for_parameters, no_prompt)
parameters = json.loads(json.dumps(parameters))
properties = DeploymentProperties(template=template_content, template_link=template_link,
parameters=parameters, mode=mode, on_error_deployment=on_error_deployment)
return properties
|
17,729 |
def RB_to_CHARMM(c0, c1, c2, c3, c4, c5):
"""Converts Ryckaert-Bellemans (RB) type dihedrals to CHARMM type
or
RB_torsions = c0 + c1*Cos[Psi] + c2*Cos[Psi]^2 + c3*CosPsi]^3 + c4*Cos[Psi]^4 + c5*Cos[5*Psi]^5
where Psi= t-Pi = t - 180 degress
Parameters
----------
c0, c1, c2, c3, c4, c5 : Ryckaert-Belleman coefficients (in kcal/mol)
converts to:
CHARMM_torsions =
= K0 * (1 + Cos[n0*(t) - (d0)] ) + K1 * (1 + Cos[n1*(t) - (d1)] ) + K2 * (1 + Cos[n2*(t) - (d2)] )
+ K3 * (1 + Cos[n3*(t) - (d3)] ) + K4 * (1 + Cos[n4*(t) - (d4)] ) + K5 * (1 + Cos[n5*(t) - (d5)] ) .
= K0 + K1 * (1 + Cos[n1*(t) - (d1)] ) + K2 * (1 + Cos[n2*(t) - (d2)] )
+ K3 * (1 + Cos[n3*(t) - (d3)] ) + K4 * (1 + Cos[n4*(t) - (d4)] ) + K5 * (1 + Cos[n5*(t) - (d5)] ) .
Returns
-------
0, K1, K2, K3, K4, K5, n0, n1, n2, n3, n4, n5, d0, d1, d2, d3, d4, and d5 : Charmm coefficients (in kcal/mol)
CHARMM_ dihedral coeffs : np.matrix, shape=(6,3)
Array containing the CHARMM dihedral coeffs [[K0, n0, d0], [K1, n1, d1], [K2, n2, d2], [K3, n3, d3],
[K4, n4, d4], [K5, n5, d5]] (in kcal/mol)
"""
# see below or the long version is, K0 = (c0 + c2 / 2 + 3 / 8 * c4) - K1 - K2 - K3 - K4 - K5
K0 = (c0 -c1 - c3 - c4/4 - c5)
K1 = (+c1 + 3/4 * c3 + 5/8 * c5)
K2 = (+(1/2) * c2 + 1/2 * c4)
K3 = (+(1/4) * c3 + 5/16 * c5)
K4 = (+(1/8) * c4)
K5 = (+(1/16) * c5)
n0 = 0
n1 = 1
n2 = 2
n3 = 3
n4 = 4
n5 = 5
d0 = 90
d1 = 180
d2 = 0
d3 = 180
d4 = 0
d5 = 180
return np.matrix([[K0, n0, d0], [K1, n1, d1], [K2, n2, d2], [K3, n3, d3], [K4, n4, d4], [K5, n5, d5]])
|
def RB_to_CHARMM(c0, c1, c2, c3, c4, c5):
"""Converts Ryckaert-Bellemans (RB) type dihedrals to CHARMM type
or
RB_torsions = c0 + c1*Cos[Psi] + c2*Cos[Psi]^2 + c3*CosPsi]^3 + c4*Cos[Psi]^4 + c5*Cos[5*Psi]^5
where Psi= t-Pi = t - 180 degress
Parameters
----------
c0, c1, c2, c3, c4, c5 : Ryckaert-Belleman coefficients (in kcal/mol)
converts to:
CHARMM_torsions =
= K0 * (1 + Cos[n0*(t) - (d0)] ) + K1 * (1 + Cos[n1*(t) - (d1)] ) + K2 * (1 + Cos[n2*(t) - (d2)] )
+ K3 * (1 + Cos[n3*(t) - (d3)] ) + K4 * (1 + Cos[n4*(t) - (d4)] ) + K5 * (1 + Cos[n5*(t) - (d5)] ) .
= K0 + K1 * (1 + Cos[n1*(t) - (d1)] ) + K2 * (1 + Cos[n2*(t) - (d2)] )
+ K3 * (1 + Cos[n3*(t) - (d3)] ) + K4 * (1 + Cos[n4*(t) - (d4)] ) + K5 * (1 + Cos[n5*(t) - (d5)] ) .
Returns
-------
0, K1, K2, K3, K4, K5, n0, n1, n2, n3, n4, n5, d0, d1, d2, d3, d4, and d5 : Charmm coefficients (in kcal/mol)
CHARMM_dihedral coeffs : np.matrix, shape=(6,3)
Array containing the CHARMM dihedral coeffs [[K0, n0, d0], [K1, n1, d1], [K2, n2, d2], [K3, n3, d3],
[K4, n4, d4], [K5, n5, d5]] (in kcal/mol)
"""
# see below or the long version is, K0 = (c0 + c2 / 2 + 3 / 8 * c4) - K1 - K2 - K3 - K4 - K5
K0 = (c0 -c1 - c3 - c4/4 - c5)
K1 = (+c1 + 3/4 * c3 + 5/8 * c5)
K2 = (+(1/2) * c2 + 1/2 * c4)
K3 = (+(1/4) * c3 + 5/16 * c5)
K4 = (+(1/8) * c4)
K5 = (+(1/16) * c5)
n0 = 0
n1 = 1
n2 = 2
n3 = 3
n4 = 4
n5 = 5
d0 = 90
d1 = 180
d2 = 0
d3 = 180
d4 = 0
d5 = 180
return np.matrix([[K0, n0, d0], [K1, n1, d1], [K2, n2, d2], [K3, n3, d3], [K4, n4, d4], [K5, n5, d5]])
|
37,485 |
def assemble(
experiments: Union[
QuantumCircuit,
List[QuantumCircuit],
Schedule,
List[Schedule],
ScheduleBlock,
Union[ScheduleBlock],
],
backend: Optional[Union[Backend, BaseBackend]] = None,
qobj_id: Optional[str] = None,
qobj_header: Optional[Union[QobjHeader, Dict]] = None,
shots: Optional[int] = None,
memory: Optional[bool] = False,
max_credits: Optional[int] = None,
seed_simulator: Optional[int] = None,
qubit_lo_freq: Optional[List[float]] = None,
meas_lo_freq: Optional[List[float]] = None,
qubit_lo_range: Optional[List[float]] = None,
meas_lo_range: Optional[List[float]] = None,
schedule_los: Optional[
Union[
List[Union[Dict[PulseChannel, float], LoConfig]],
Union[Dict[PulseChannel, float], LoConfig],
]
] = None,
meas_level: Union[int, MeasLevel] = MeasLevel.CLASSIFIED,
meas_return: Union[str, MeasReturnType] = MeasReturnType.AVERAGE,
meas_map: Optional[List[List[Qubit]]] = None,
memory_slot_size: int = 100,
rep_time: Optional[int] = None,
rep_delay: Optional[float] = None,
parameter_binds: Optional[List[Dict[Parameter, float]]] = None,
parametric_pulses: Optional[List[str]] = None,
init_qubits: Optional[bool] = True,
use_measure_esp: Optional[bool] = None,
**run_config: Dict,
) -> Qobj:
"""Assemble a list of circuits or pulse schedules into a ``Qobj``.
This function serializes the payloads, which could be either circuits or schedules,
to create ``Qobj`` "experiments". It further annotates the experiment payload with
header and configurations.
Args:
experiments: Circuit(s) or pulse schedule(s) to execute
backend: If set, some runtime options are automatically grabbed from
``backend.configuration()`` and ``backend.defaults()``.
If any other option is explicitly set (e.g., ``rep_time``), it
will override the backend's.
If any other options is set in the run_config, it will
also override the backend's.
qobj_id: String identifier to annotate the ``Qobj``
qobj_header: User input that will be inserted in ``Qobj`` header, and will also be
copied to the corresponding Result header. Headers do not affect the run.
shots: Number of repetitions of each circuit, for sampling. Default: 1024
or ``max_shots`` from the backend configuration, whichever is smaller
memory: If ``True``, per-shot measurement bitstrings are returned as well
(provided the backend supports it). For OpenPulse jobs, only
measurement level 2 supports this option.
max_credits: Maximum credits to spend on job. Default: 10
seed_simulator: Random seed to control sampling, for when backend is a simulator
qubit_lo_freq: List of job level qubit drive LO frequencies in Hz. Overridden by
``schedule_los`` if specified. Must have length ``n_qubits.``
meas_lo_freq: List of measurement LO frequencies in Hz. Overridden by ``schedule_los`` if
specified. Must have length ``n_qubits.``
qubit_lo_range: List of job level drive LO ranges each of form ``[range_min, range_max]``
in Hz. Used to validate ``qubit_lo_freq``. Must have length ``n_qubits.``
meas_lo_range: List of job level measurement LO ranges each of form
``[range_min, range_max]`` in Hz. Used to validate ``meas_lo_freq``. Must have length
``n_qubits.``
schedule_los: Experiment level (ie circuit or schedule) LO frequency configurations for
qubit drive and measurement channels. These values override the job level values from
``default_qubit_los`` and ``default_meas_los``. Frequencies are in Hz. Settable for qasm
and pulse jobs.
meas_level: Set the appropriate level of the measurement output for pulse experiments.
meas_return: Level of measurement data for the backend to return.
For ``meas_level`` 0 and 1:
* ``single`` returns information from every shot.
* ``avg`` returns average measurement output (averaged over number of shots).
meas_map: List of lists, containing qubits that must be measured together.
memory_slot_size: Size of each memory slot if the output is Level 0.
rep_time (int): Time per program execution in seconds. Must be from the list provided
by the backend (``backend.configuration().rep_times``). Defaults to the first entry.
rep_delay (float): Delay between programs in seconds. Only supported on certain
backends (if ``backend.configuration().dynamic_reprate_enabled=True``). If supported,
``rep_delay`` will be used instead of ``rep_time`` and must be from the range supplied
by the backend (``backend.configuration().rep_delay_range``). Default is given by
``backend.configuration().default_rep_delay``.
parameter_binds: List of Parameter bindings over which the set of experiments will be
executed. Each list element (bind) should be of the form
{Parameter1: value1, Parameter2: value2, ...}. All binds will be
executed across all experiments; e.g., if parameter_binds is a
length-n list, and there are m experiments, a total of m x n
experiments will be run (one for each experiment/bind pair).
parametric_pulses: A list of pulse shapes which are supported internally on the backend.
Example::
['gaussian', 'constant']
init_qubits: Whether to reset the qubits to the ground state for each shot.
Default: ``True``.
use_measure_esp: Whether to use ESP (excited state promoted) readout for the final
measurement in each circuit. ESP readout can offer higher fidelity than standard
measurement sequences. See `here <https://arxiv.org/pdf/2008.08571.pdf>`_.
Default (set on backend): ``True`` if backend supports ESP readout, else ``False``.
**run_config: Extra arguments used to configure the run (e.g., for Aer configurable
backends). Refer to the backend documentation for details on these
arguments.
Returns:
A ``Qobj`` that can be run on a backend. Depending on the type of input,
this will be either a ``QasmQobj`` or a ``PulseQobj``.
Raises:
QiskitError: if the input cannot be interpreted as either circuits or schedules
"""
start_time = time()
experiments = experiments if isinstance(experiments, list) else [experiments]
qobj_id, qobj_header, run_config_common_dict = _parse_common_args(
backend,
qobj_id,
qobj_header,
shots,
memory,
max_credits,
seed_simulator,
init_qubits,
use_measure_esp,
rep_delay,
qubit_lo_freq,
meas_lo_freq,
qubit_lo_range,
meas_lo_range,
schedule_los,
**run_config,
)
# assemble either circuits or schedules
if all(isinstance(exp, QuantumCircuit) for exp in experiments):
run_config = _parse_circuit_args(
parameter_binds,
backend,
meas_level,
meas_return,
parametric_pulses,
**run_config_common_dict,
)
# If circuits are parameterized, bind parameters and remove from run_config
bound_experiments, run_config = _expand_parameters(
circuits=experiments, run_config=run_config
)
end_time = time()
_log_assembly_time(start_time, end_time)
return assemble_circuits(
circuits=bound_experiments,
qobj_id=qobj_id,
qobj_header=qobj_header,
run_config=run_config,
)
elif all(isinstance(exp, (ScheduleBlock, Schedule, Instruction)) for exp in experiments):
run_config = _parse_pulse_args(
backend,
meas_level,
meas_return,
meas_map,
memory_slot_size,
rep_time,
parametric_pulses,
**run_config_common_dict,
)
end_time = time()
_log_assembly_time(start_time, end_time)
return assemble_schedules(
schedules=experiments, qobj_id=qobj_id, qobj_header=qobj_header, run_config=run_config
)
else:
raise QiskitError(
"bad input to assemble() function; " "must be either circuits or schedules"
)
|
def assemble(
experiments: Union[
QuantumCircuit,
List[QuantumCircuit],
Schedule,
List[Schedule],
ScheduleBlock,
Union[ScheduleBlock],
],
backend: Optional[Union[Backend, BaseBackend]] = None,
qobj_id: Optional[str] = None,
qobj_header: Optional[Union[QobjHeader, Dict]] = None,
shots: Optional[int] = None,
memory: Optional[bool] = False,
max_credits: Optional[int] = None,
seed_simulator: Optional[int] = None,
qubit_lo_freq: Optional[List[float]] = None,
meas_lo_freq: Optional[List[float]] = None,
qubit_lo_range: Optional[List[float]] = None,
meas_lo_range: Optional[List[float]] = None,
schedule_los: Optional[
Union[
List[Union[Dict[PulseChannel, float], LoConfig]],
Union[Dict[PulseChannel, float], LoConfig],
]
] = None,
meas_level: Union[int, MeasLevel] = MeasLevel.CLASSIFIED,
meas_return: Union[str, MeasReturnType] = MeasReturnType.AVERAGE,
meas_map: Optional[List[List[Qubit]]] = None,
memory_slot_size: int = 100,
rep_time: Optional[int] = None,
rep_delay: Optional[float] = None,
parameter_binds: Optional[List[Dict[Parameter, float]]] = None,
parametric_pulses: Optional[List[str]] = None,
init_qubits: Optional[bool] = True,
use_measure_esp: Optional[bool] = None,
**run_config: Dict,
) -> Qobj:
"""Assemble a list of circuits or pulse schedules into a ``Qobj``.
This function serializes the payloads, which could be either circuits or schedules,
to create ``Qobj`` "experiments". It further annotates the experiment payload with
header and configurations.
Args:
experiments: Circuit(s) or pulse schedule(s) to execute
backend: If set, some runtime options are automatically grabbed from
``backend.configuration()`` and ``backend.defaults()``.
If any other option is explicitly set (e.g., ``rep_time``), it
will override the backend's.
If any other options is set in the run_config, it will
also override the backend's.
qobj_id: String identifier to annotate the ``Qobj``
qobj_header: User input that will be inserted in ``Qobj`` header, and will also be
copied to the corresponding Result header. Headers do not affect the run.
shots: Number of repetitions of each circuit, for sampling. Default: 1024
or ``max_shots`` from the backend configuration, whichever is smaller
memory: If ``True``, per-shot measurement bitstrings are returned as well
(provided the backend supports it). For OpenPulse jobs, only
measurement level 2 supports this option.
max_credits: Maximum credits to spend on job. Default: 10
seed_simulator: Random seed to control sampling, for when backend is a simulator
qubit_lo_freq: List of job level qubit drive LO frequencies in Hz. Overridden by
``schedule_los`` if specified. Must have length ``n_qubits.``
meas_lo_freq: List of measurement LO frequencies in Hz. Overridden by ``schedule_los`` if
specified. Must have length ``n_qubits.``
qubit_lo_range: List of job level drive LO ranges each of form ``[range_min, range_max]``
in Hz. Used to validate ``qubit_lo_freq``. Must have length ``n_qubits.``
meas_lo_range: List of job level measurement LO ranges each of form
``[range_min, range_max]`` in Hz. Used to validate ``meas_lo_freq``. Must have length
``n_qubits.``
schedule_los: Experiment level (ie circuit or schedule) LO frequency configurations for
qubit drive and measurement channels. These values override the job level values from
``default_qubit_los`` and ``default_meas_los``. Frequencies are in Hz. Settable for qasm
and pulse jobs.
meas_level: Set the appropriate level of the measurement output for pulse experiments.
meas_return: Level of measurement data for the backend to return.
For ``meas_level`` 0 and 1:
* ``single`` returns information from every shot.
* ``avg`` returns average measurement output (averaged over number of shots).
meas_map: List of lists, containing qubits that must be measured together.
memory_slot_size: Size of each memory slot if the output is Level 0.
rep_time (int): Time per program execution in seconds. Must be from the list provided
by the backend (``backend.configuration().rep_times``). Defaults to the first entry.
rep_delay (float): Delay between programs in seconds. Only supported on certain
backends (if ``backend.configuration().dynamic_reprate_enabled=True``). If supported,
``rep_delay`` will be used instead of ``rep_time`` and must be from the range supplied
by the backend (``backend.configuration().rep_delay_range``). Default is given by
``backend.configuration().default_rep_delay``.
parameter_binds: List of Parameter bindings over which the set of experiments will be
executed. Each list element (bind) should be of the form
{Parameter1: value1, Parameter2: value2, ...}. All binds will be
executed across all experiments; e.g., if parameter_binds is a
length-n list, and there are m experiments, a total of m x n
experiments will be run (one for each experiment/bind pair).
parametric_pulses: A list of pulse shapes which are supported internally on the backend.
Example::
['gaussian', 'constant']
init_qubits: Whether to reset the qubits to the ground state for each shot.
Default: ``True``.
use_measure_esp: Whether to use ESP (excited state promoted) readout for the final
measurement in each circuit. ESP readout can offer higher fidelity than standard
measurement sequences. See `here <https://arxiv.org/pdf/2008.08571.pdf>`_.
Defaults to ``True`` if the backend supports ESP readout, else ``False``.
**run_config: Extra arguments used to configure the run (e.g., for Aer configurable
backends). Refer to the backend documentation for details on these
arguments.
Returns:
A ``Qobj`` that can be run on a backend. Depending on the type of input,
this will be either a ``QasmQobj`` or a ``PulseQobj``.
Raises:
QiskitError: if the input cannot be interpreted as either circuits or schedules
"""
start_time = time()
experiments = experiments if isinstance(experiments, list) else [experiments]
qobj_id, qobj_header, run_config_common_dict = _parse_common_args(
backend,
qobj_id,
qobj_header,
shots,
memory,
max_credits,
seed_simulator,
init_qubits,
use_measure_esp,
rep_delay,
qubit_lo_freq,
meas_lo_freq,
qubit_lo_range,
meas_lo_range,
schedule_los,
**run_config,
)
# assemble either circuits or schedules
if all(isinstance(exp, QuantumCircuit) for exp in experiments):
run_config = _parse_circuit_args(
parameter_binds,
backend,
meas_level,
meas_return,
parametric_pulses,
**run_config_common_dict,
)
# If circuits are parameterized, bind parameters and remove from run_config
bound_experiments, run_config = _expand_parameters(
circuits=experiments, run_config=run_config
)
end_time = time()
_log_assembly_time(start_time, end_time)
return assemble_circuits(
circuits=bound_experiments,
qobj_id=qobj_id,
qobj_header=qobj_header,
run_config=run_config,
)
elif all(isinstance(exp, (ScheduleBlock, Schedule, Instruction)) for exp in experiments):
run_config = _parse_pulse_args(
backend,
meas_level,
meas_return,
meas_map,
memory_slot_size,
rep_time,
parametric_pulses,
**run_config_common_dict,
)
end_time = time()
_log_assembly_time(start_time, end_time)
return assemble_schedules(
schedules=experiments, qobj_id=qobj_id, qobj_header=qobj_header, run_config=run_config
)
else:
raise QiskitError(
"bad input to assemble() function; " "must be either circuits or schedules"
)
|
10,117 |
def deprecation_warning(module):
deprecated_aliases = ['login_token']
for aliase in deprecated_aliases:
if aliase in module.params:
module.deprecate("Aliases \'{aliase}\' is deprecated".format(aliase=aliase), "2.10")
|
def deprecation_warning(module):
deprecated_aliases = ['login_token']
for aliase in deprecated_aliases:
if aliase in module.params:
module.deprecate("Alias \'{aliase}\' is deprecated".format(aliase=aliase), "2.10")
|
28,053 |
def write(file_path: str, report_hashes: List[str]):
""" Create a new baseline file or extend an existing one with the given
report hashes in the given output directory. It will remove the duplicates
and also sort the report hashes before writing it to a file.
"""
with open(file_path, mode='a+', encoding='utf-8', errors="ignore") as f:
f.seek(0)
old_report_hashes = __get_report_hashes(f)
new_report_hashes = set(report_hashes) - set(old_report_hashes)
if not new_report_hashes:
LOG.info("Baseline file (%s) is up-to-date.", file_path)
return
if old_report_hashes:
LOG.info("Merging existing baseline file: %s", file_path)
else:
LOG.info("Creating new baseline file: %s", file_path)
LOG.info("Total number of old report hashes: %d",
len(old_report_hashes))
LOG.info("Total number of new report hashes: %d",
len(new_report_hashes))
LOG.debug("New report hashes: %s", sorted(new_report_hashes))
f.seek(0)
f.truncate()
f.write("\n".join(sorted(
set([*old_report_hashes, *report_hashes]))))
|
def write(file_path: str, report_hashes: Iterable[str]):
""" Create a new baseline file or extend an existing one with the given
report hashes in the given output directory. It will remove the duplicates
and also sort the report hashes before writing it to a file.
"""
with open(file_path, mode='a+', encoding='utf-8', errors="ignore") as f:
f.seek(0)
old_report_hashes = __get_report_hashes(f)
new_report_hashes = set(report_hashes) - set(old_report_hashes)
if not new_report_hashes:
LOG.info("Baseline file (%s) is up-to-date.", file_path)
return
if old_report_hashes:
LOG.info("Merging existing baseline file: %s", file_path)
else:
LOG.info("Creating new baseline file: %s", file_path)
LOG.info("Total number of old report hashes: %d",
len(old_report_hashes))
LOG.info("Total number of new report hashes: %d",
len(new_report_hashes))
LOG.debug("New report hashes: %s", sorted(new_report_hashes))
f.seek(0)
f.truncate()
f.write("\n".join(sorted(
set([*old_report_hashes, *report_hashes]))))
|
44,014 |
def _reconstruct_gen(fun, spectrum, shifts=None, fun_at_zero=None):
r"""Reconstruct a univariate (real-valued) Fourier series with given spectrum.
Args:
fun (callable): Fourier series to reconstruct with signature ``float -> float``
spectrum (Sequence): Frequency spectrum of the Fourier series; non-positive
frequencies are ignored
shifts (list): Shift angles at which to evaluate ``fun`` for the reconstruction
Chosen equidistantly within the interval :math:`[0, 2\pi/f_\text{min}]` if ``shifts=None``
where :math:`f_\text{min}` is the smallest frequency in ``spectrum``.
fun_at_zero (float): Value of ``fun`` at zero. If :math:`0` is among the ``shifts``
and ``fun_at_zero`` is provided, one evaluation of ``fun`` is saved.
Returns:
callable: Reconstructed Fourier series with :math:`R` frequencies in ``spectrum``,
as ``qml.numpy`` based function and coinciding with ``fun`` on :math:`2R+1` points.
"""
# pylint: disable=unused-argument
have_fun_at_zero = fun_at_zero is not None
have_shifts = shifts is not None
# For an empty/trivial spectrum, the function simply is constant
if spectrum in ([], [0.0]):
if have_shifts:
fun_value = fun(shifts[0])
if have_fun_at_zero:
warnings.warn(_warn_text_fun_at_zero_ignored)
else:
fun_value = fun_at_zero if have_fun_at_zero else fun(0.0)
def constant_fn(x):
"""Univariate reconstruction of a constant Fourier series."""
return fun_value
return constant_fn
spectrum = np.array([f for f in spectrum if f > 0.0])
f_max = max(spectrum)
# If no shifts are provided, choose equidistant ones
if not have_shifts:
R = len(spectrum)
shifts = np.arange(-R, R + 1) * 2 * np.pi / (f_max * (2 * R + 1)) * R
zero_idx = R
need_fun_at_zero = True
elif have_fun_at_zero:
zero_idx = np.where(np.isclose(shifts, 0.0))[0]
zero_idx = zero_idx[0] if len(zero_idx) > 0 else None
need_fun_at_zero = zero_idx is not None
# Take care of shifts close to zero if fun_at_zero was provided
if have_fun_at_zero and need_fun_at_zero:
# Only one shift may be zero at a time
shifts = np.concatenate([[shifts[zero_idx]], shifts[:zero_idx], shifts[zero_idx + 1 :]])
evals = np.array([fun_at_zero] + list(map(fun, shifts[1:])))
else:
if have_fun_at_zero and not need_fun_at_zero:
warnings.warn(_warn_text_fun_at_zero_ignored)
evals = np.array(list(map(fun, shifts)))
L = len(shifts)
# Construct the coefficient matrix case by case
C1 = np.ones((L, 1))
C2 = np.cos(np.outer(shifts, spectrum))
C3 = np.sin(np.outer(shifts, spectrum))
C = np.hstack([C1, C2, C3])
# Solve the system of linear equations
cond = np.linalg.cond(C)
if cond > 1e8:
warnings.warn(
f"The condition number of the Fourier transform matrix is very large: {cond}.",
UserWarning,
)
W = np.linalg.solve(C, evals)
# Extract the Fourier coefficients
R = (L - 1) // 2
a0 = W[0]
a = W[1 : R + 1]
b = W[R + 1 :]
# Construct the Fourier series
def _reconstruction(x):
"""Univariate reconstruction based on arbitrary shifts."""
return a0 + np.dot(a, np.cos(spectrum * x)) + np.dot(b, np.sin(spectrum * x))
return _reconstruction
|
def _reconstruct_gen(fun, spectrum, shifts=None, fun_at_zero=None):
r"""Reconstruct a univariate (real-valued) Fourier series with given spectrum.
Args:
fun (callable): Fourier series to reconstruct with signature ``float -> float``
spectrum (Sequence): Frequency spectrum of the Fourier series; non-positive
frequencies are ignored.
shifts (list): Shift angles at which to evaluate ``fun`` for the reconstruction
Chosen equidistantly within the interval :math:`[0, 2\pi/f_\text{min}]` if ``shifts=None``
where :math:`f_\text{min}` is the smallest frequency in ``spectrum``.
fun_at_zero (float): Value of ``fun`` at zero. If :math:`0` is among the ``shifts``
and ``fun_at_zero`` is provided, one evaluation of ``fun`` is saved.
Returns:
callable: Reconstructed Fourier series with :math:`R` frequencies in ``spectrum``,
as ``qml.numpy`` based function and coinciding with ``fun`` on :math:`2R+1` points.
"""
# pylint: disable=unused-argument
have_fun_at_zero = fun_at_zero is not None
have_shifts = shifts is not None
# For an empty/trivial spectrum, the function simply is constant
if spectrum in ([], [0.0]):
if have_shifts:
fun_value = fun(shifts[0])
if have_fun_at_zero:
warnings.warn(_warn_text_fun_at_zero_ignored)
else:
fun_value = fun_at_zero if have_fun_at_zero else fun(0.0)
def constant_fn(x):
"""Univariate reconstruction of a constant Fourier series."""
return fun_value
return constant_fn
spectrum = np.array([f for f in spectrum if f > 0.0])
f_max = max(spectrum)
# If no shifts are provided, choose equidistant ones
if not have_shifts:
R = len(spectrum)
shifts = np.arange(-R, R + 1) * 2 * np.pi / (f_max * (2 * R + 1)) * R
zero_idx = R
need_fun_at_zero = True
elif have_fun_at_zero:
zero_idx = np.where(np.isclose(shifts, 0.0))[0]
zero_idx = zero_idx[0] if len(zero_idx) > 0 else None
need_fun_at_zero = zero_idx is not None
# Take care of shifts close to zero if fun_at_zero was provided
if have_fun_at_zero and need_fun_at_zero:
# Only one shift may be zero at a time
shifts = np.concatenate([[shifts[zero_idx]], shifts[:zero_idx], shifts[zero_idx + 1 :]])
evals = np.array([fun_at_zero] + list(map(fun, shifts[1:])))
else:
if have_fun_at_zero and not need_fun_at_zero:
warnings.warn(_warn_text_fun_at_zero_ignored)
evals = np.array(list(map(fun, shifts)))
L = len(shifts)
# Construct the coefficient matrix case by case
C1 = np.ones((L, 1))
C2 = np.cos(np.outer(shifts, spectrum))
C3 = np.sin(np.outer(shifts, spectrum))
C = np.hstack([C1, C2, C3])
# Solve the system of linear equations
cond = np.linalg.cond(C)
if cond > 1e8:
warnings.warn(
f"The condition number of the Fourier transform matrix is very large: {cond}.",
UserWarning,
)
W = np.linalg.solve(C, evals)
# Extract the Fourier coefficients
R = (L - 1) // 2
a0 = W[0]
a = W[1 : R + 1]
b = W[R + 1 :]
# Construct the Fourier series
def _reconstruction(x):
"""Univariate reconstruction based on arbitrary shifts."""
return a0 + np.dot(a, np.cos(spectrum * x)) + np.dot(b, np.sin(spectrum * x))
return _reconstruction
|
34,628 |
def add_subparser(
subparsers: SubParsersAction, parents: List[argparse.ArgumentParser]
) -> None:
"""Add all parsers for training in chunks.
Args:
subparsers: subparser we are going to attach to
parents: Parent parsers, needed to ensure tree structure in argparse
"""
train_parser = subparsers.add_parser(
"train-in-chunks",
help="Trains a Rasa model in smaller chunks using your NLU data and stories.",
parents=parents,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
)
train_subparsers = train_parser.add_subparsers()
train_core_parser = train_subparsers.add_parser(
"core",
parents=parents,
conflict_handler="resolve",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
help="Trains a Rasa Core model in smaller chunks using your stories.",
)
train_core_parser.set_defaults(func=train_chunks_core)
train_nlu_parser = train_subparsers.add_parser(
"nlu",
parents=parents,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
help="Trains a Rasa NLU model in smaller chunks using your NLU data.",
)
train_nlu_parser.set_defaults(func=train_chunks_nlu)
train_arguments.set_train_in_chunks_core_arguments(train_core_parser)
train_arguments.set_train_in_chunks_nlu_arguments(train_nlu_parser)
train_arguments.set_train_in_chunks_arguments(train_parser)
train_parser.set_defaults(func=train_chunks)
|
def add_subparser(
subparsers: SubParsersAction, parents: List[argparse.ArgumentParser]
) -> None:
"""Adds all parsers for training in chunks.
Args:
subparsers: subparser we are going to attach to
parents: Parent parsers, needed to ensure tree structure in argparse
"""
train_parser = subparsers.add_parser(
"train-in-chunks",
help="Trains a Rasa model in smaller chunks using your NLU data and stories.",
parents=parents,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
)
train_subparsers = train_parser.add_subparsers()
train_core_parser = train_subparsers.add_parser(
"core",
parents=parents,
conflict_handler="resolve",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
help="Trains a Rasa Core model in smaller chunks using your stories.",
)
train_core_parser.set_defaults(func=train_chunks_core)
train_nlu_parser = train_subparsers.add_parser(
"nlu",
parents=parents,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
help="Trains a Rasa NLU model in smaller chunks using your NLU data.",
)
train_nlu_parser.set_defaults(func=train_chunks_nlu)
train_arguments.set_train_in_chunks_core_arguments(train_core_parser)
train_arguments.set_train_in_chunks_nlu_arguments(train_nlu_parser)
train_arguments.set_train_in_chunks_arguments(train_parser)
train_parser.set_defaults(func=train_chunks)
|
43,437 |
def CRotx(theta):
r"""Two-qubit controlled rotation about the x axis.
Args:
theta (float): rotation angle
Returns:
array: unitary 4x4 rotation matrix `
"""
return np.array([[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, np.cos(theta/2), -1*1j*np.sin(theta/2)], [0, 0, -1*1j*np.sin(theta/2), np.cos(theta/2)]])
|
def CRotx(theta):
r"""Two-qubit controlled rotation about the x axis.
Args:
theta (float): rotation angle
Returns:
array: unitary 4x4 rotation matrix `
"""
return np.array([[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, np.cos(theta/2), -1j*np.sin(theta/2)], [0, 0, -1j*np.sin(theta/2), np.cos(theta/2)]])
|
40,681 |
def setup_logger(
name: str = None,
level: int = logging.INFO,
format: str = "%(asctime)s %(name)s %(levelname)s: %(message)s",
filepath: Optional[str] = None,
distributed_rank: int = None,
) -> logging.Logger:
"""Setups logger: name, level, format etc.
Args:
name (str): new name for the logger. If None, the standard logger is used.
level (int): logging level, e.g. CRITICAL, ERROR, WARNING, INFO, DEBUG
format (str): logging format. By default, `%(asctime)s %(name)s %(levelname)s: %(message)s`
filepath (str, optional): Optional logging file path. If not None, logs are written to the file.
distributed_rank (int, optional): Optional, rank in distributed configuration to avoid logger setup for workers.
Returns:
logging.Logger
For example, to improve logs readability when training with a trainer and evaluator:
.. code-block:: python
from ignite.utils import setup_logger
trainer = ...
evaluator = ...
trainer.logger = setup_logger("trainer")
evaluator.logger = setup_logger("evaluator")
trainer.run(data, max_epochs=10)
# Logs will look like
# 2020-01-21 12:46:07,356 trainer INFO: Engine run starting with max_epochs=5.
# 2020-01-21 12:46:07,358 trainer INFO: Epoch[1] Complete. Time taken: 00:5:23
# 2020-01-21 12:46:07,358 evaluator INFO: Engine run starting with max_epochs=1.
# 2020-01-21 12:46:07,358 evaluator INFO: Epoch[1] Complete. Time taken: 00:01:02
# ...
"""
logger = logging.getLogger(name)
# Remove previous handlers
if logger.hasHandlers():
for h in list(logger.handlers):
logger.removeHandler(h)
formatter = logging.Formatter(format)
if distributed_rank is None:
if dist.is_available() and dist.is_initialized():
distributed_rank = dist.get_rank()
else:
distributed_rank = 0
if distributed_rank > 0:
logger.addHandler(logging.NullHandler())
else:
logger.setLevel(level)
ch = logging.StreamHandler()
ch.setLevel(level)
ch.setFormatter(formatter)
logger.addHandler(ch)
if filepath is not None:
fh = logging.FileHandler(filepath)
fh.setLevel(level)
fh.setFormatter(formatter)
logger.addHandler(fh)
return logger
|
def setup_logger(
name: str = None,
level: int = logging.INFO,
format: str = "%(asctime)s %(name)s %(levelname)s: %(message)s",
filepath: Optional[str] = None,
distributed_rank: Optional[int] = None,
) -> logging.Logger:
"""Setups logger: name, level, format etc.
Args:
name (str): new name for the logger. If None, the standard logger is used.
level (int): logging level, e.g. CRITICAL, ERROR, WARNING, INFO, DEBUG
format (str): logging format. By default, `%(asctime)s %(name)s %(levelname)s: %(message)s`
filepath (str, optional): Optional logging file path. If not None, logs are written to the file.
distributed_rank (int, optional): Optional, rank in distributed configuration to avoid logger setup for workers.
Returns:
logging.Logger
For example, to improve logs readability when training with a trainer and evaluator:
.. code-block:: python
from ignite.utils import setup_logger
trainer = ...
evaluator = ...
trainer.logger = setup_logger("trainer")
evaluator.logger = setup_logger("evaluator")
trainer.run(data, max_epochs=10)
# Logs will look like
# 2020-01-21 12:46:07,356 trainer INFO: Engine run starting with max_epochs=5.
# 2020-01-21 12:46:07,358 trainer INFO: Epoch[1] Complete. Time taken: 00:5:23
# 2020-01-21 12:46:07,358 evaluator INFO: Engine run starting with max_epochs=1.
# 2020-01-21 12:46:07,358 evaluator INFO: Epoch[1] Complete. Time taken: 00:01:02
# ...
"""
logger = logging.getLogger(name)
# Remove previous handlers
if logger.hasHandlers():
for h in list(logger.handlers):
logger.removeHandler(h)
formatter = logging.Formatter(format)
if distributed_rank is None:
if dist.is_available() and dist.is_initialized():
distributed_rank = dist.get_rank()
else:
distributed_rank = 0
if distributed_rank > 0:
logger.addHandler(logging.NullHandler())
else:
logger.setLevel(level)
ch = logging.StreamHandler()
ch.setLevel(level)
ch.setFormatter(formatter)
logger.addHandler(ch)
if filepath is not None:
fh = logging.FileHandler(filepath)
fh.setLevel(level)
fh.setFormatter(formatter)
logger.addHandler(fh)
return logger
|
102 |
def find_last_updated():
"""Fetches and returns Standard Ebooks most recent update date.
Returns None if the last modified date is not included in the
response headers.
"""
r = requests.head(FEED_URL)
return r.headers['last-modified'] if r.ok else None
|
def find_last_updated() -> [str | None]:
"""Fetches and returns Standard Ebooks most recent update date.
Returns None if the last modified date is not included in the
response headers.
"""
r = requests.head(FEED_URL)
return r.headers['last-modified'] if r.ok else None
|
57,950 |
def remove_duplicates_in_items(items, id_str: str):
ids = {}
new_items = []
for item in items:
if item.get(id_str) and item.get(id_str) not in ids:
ids[item.get(id_str)] = True
new_items.append(item)
return new_items
|
def remove_duplicates_in_items(items, id_str: str):
ids = {}
new_items = []
for item in items:
if item_id := item.get(id_str) not in ids:
ids[item_id] = True
new_items.append(item)
return new_items
|
20,082 |
def _merge_workflows(w1: list('Workflow'),
w2: list('Workflow')) -> list('Workflow'):
workflows = {}
for w in w1 + w2:
workflows[w.name] = w
return list(workflows.values())
|
def _merge_workflows(w1: 'List[Workflow]',
w2: list('Workflow')) -> list('Workflow'):
workflows = {}
for w in w1 + w2:
workflows[w.name] = w
return list(workflows.values())
|
30,150 |
def fetch_price(zone_key, session=None, target_datetime=None,
logger=logging.getLogger(__name__)):
if target_datetime:
now = arrow.get(target_datetime, tz='Europe/Paris')
else:
now = arrow.now(tz='Europe/Paris')
r = session or requests.session()
formatted_from = now.shift(days=-1).format('DD/MM/YYYY')
formatted_to = now.format('DD/MM/YYYY')
url = 'http://www.rte-france.com/getEco2MixXml.php?type=donneesMarche&da' \
'teDeb={}&dateFin={}&mode=NORM'.format(formatted_from, formatted_to)
response = r.get(url)
obj = ET.fromstring(response.content)
datas = {}
for donnesMarche in obj:
if donnesMarche.tag != 'donneesMarche':
continue
start_date = arrow.get(arrow.get(donnesMarche.attrib['date']).datetime, 'Europe/Paris')
for item in donnesMarche:
if item.get('granularite') != 'Global':
continue
country_c = item.get('perimetre')
if zone_key != country_c:
continue
value = None
for value in item:
if value.text == 'ND':
continue
period = int(value.attrib['periode'])
datetime = start_date.replace(hour=+period).datetime
if not datetime in datas:
datas[datetime] = {
'zoneKey': zone_key,
'currency': 'EUR',
'datetime': datetime,
'source': 'rte-france.com',
}
data = datas[datetime]
data['price'] = float(value.text)
return list(datas.values())
|
def fetch_price(zone_key, session=None, target_datetime=None,
logger=logging.getLogger(__name__)):
if target_datetime:
now = arrow.get(target_datetime, tz='Europe/Paris')
else:
now = arrow.now(tz='Europe/Paris')
r = session or requests.session()
formatted_from = now.shift(days=-1).format('DD/MM/YYYY')
formatted_to = now.format('DD/MM/YYYY')
url = 'http://www.rte-france.com/getEco2MixXml.php?type=donneesMarche&da' \
'teDeb={}&dateFin={}&mode=NORM'.format(formatted_from, formatted_to)
response = r.get(url)
obj = ET.fromstring(response.content)
datas = {}
for donnesMarche in obj:
if donnesMarche.tag != 'donneesMarche':
continue
start_date = arrow.get(arrow.get(donnesMarche.attrib['date']).datetime, 'Europe/Paris')
for item in donnesMarche:
if item.get('granularite') != 'Global':
continue
country_c = item.get('perimetre')
if zone_key != country_c:
continue
value = None
for value in item:
if value.text == 'ND':
continue
period = int(value.attrib['periode'])
datetime = start_date.shift(hours=+period).datetime
if not datetime in datas:
datas[datetime] = {
'zoneKey': zone_key,
'currency': 'EUR',
'datetime': datetime,
'source': 'rte-france.com',
}
data = datas[datetime]
data['price'] = float(value.text)
return list(datas.values())
|
34,439 |
def get_evaluation_metrics(
targets: Iterable[Any],
predictions: Iterable[Any],
output_dict: bool = False,
exclude_label: Text = None,
) -> Tuple[Union[Text, Dict[Text, Dict[Text, float]]], float, float, float]:
"""Compute the f1, precision, accuracy and summary report from sklearn.
Args:
targets: target labels
predictions: predicted labels
output_dict: if True sklearn returns a summary report as dict, if False the
report is in string format
exclude_label: labels to exclude from evaluation
Returns:
Report from sklearn, precision, f1, and accuracy values.
"""
from sklearn import metrics
targets = clean_labels(targets)
predictions = clean_labels(predictions)
labels = get_unique_labels(targets, exclude_label)
if not labels:
logger.warning("No labels to evaluate. Skip evaluation.")
return {}, 0.0, 0.0, 0.0
report = metrics.classification_report(
targets, predictions, labels=labels, output_dict=output_dict
)
precision = metrics.precision_score(
targets, predictions, labels=labels, average="weighted"
)
f1 = metrics.f1_score(targets, predictions, labels=labels, average="weighted")
accuracy = metrics.accuracy_score(targets, predictions)
return report, precision, f1, accuracy
|
def get_evaluation_metrics(
targets: Iterable[Any],
predictions: Iterable[Any],
output_dict: bool = False,
exclude_label: Optional[Text] = None,
) -> Tuple[Union[Text, Dict[Text, Dict[Text, float]]], float, float, float]:
"""Compute the f1, precision, accuracy and summary report from sklearn.
Args:
targets: target labels
predictions: predicted labels
output_dict: if True sklearn returns a summary report as dict, if False the
report is in string format
exclude_label: labels to exclude from evaluation
Returns:
Report from sklearn, precision, f1, and accuracy values.
"""
from sklearn import metrics
targets = clean_labels(targets)
predictions = clean_labels(predictions)
labels = get_unique_labels(targets, exclude_label)
if not labels:
logger.warning("No labels to evaluate. Skip evaluation.")
return {}, 0.0, 0.0, 0.0
report = metrics.classification_report(
targets, predictions, labels=labels, output_dict=output_dict
)
precision = metrics.precision_score(
targets, predictions, labels=labels, average="weighted"
)
f1 = metrics.f1_score(targets, predictions, labels=labels, average="weighted")
accuracy = metrics.accuracy_score(targets, predictions)
return report, precision, f1, accuracy
|
17,418 |
def _plot1d(plotfunc):
"""
Decorator for common 2d plotting logic
Also adds the 2d plot method to class _PlotMethods
"""
commondoc = """
Parameters
----------
darray : DataArray
Must be 2 dimensional, unless creating faceted plots
x : string, optional
Coordinate for x axis. If None use darray.dims[1]
y : string, optional
Coordinate for y axis. If None use darray.dims[0]
hue : string, optional
Dimension or coordinate for which you want multiple lines plotted.
figsize : tuple, optional
A tuple (width, height) of the figure in inches.
Mutually exclusive with ``size`` and ``ax``.
aspect : scalar, optional
Aspect ratio of plot, so that ``aspect * size`` gives the width in
inches. Only used if a ``size`` is provided.
size : scalar, optional
If provided, create a new figure for the plot with the given size.
Height (in inches) of each plot. See also: ``aspect``.
ax : matplotlib.axes.Axes, optional
Axis on which to plot this figure. By default, use the current axis.
Mutually exclusive with ``size`` and ``figsize``.
row : string, optional
If passed, make row faceted plots on this dimension name
col : string, optional
If passed, make column faceted plots on this dimension name
col_wrap : int, optional
Use together with ``col`` to wrap faceted plots
xscale, yscale : 'linear', 'symlog', 'log', 'logit', optional
Specifies scaling for the x- and y-axes respectively
xticks, yticks : Specify tick locations for x- and y-axes
xlim, ylim : Specify x- and y-axes limits
xincrease : None, True, or False, optional
Should the values on the x axes be increasing from left to right?
if None, use the default for the matplotlib function.
yincrease : None, True, or False, optional
Should the values on the y axes be increasing from top to bottom?
if None, use the default for the matplotlib function.
add_labels : bool, optional
Use xarray metadata to label axes
subplot_kws : dict, optional
Dictionary of keyword arguments for matplotlib subplots. Only used
for 2D and FacetGrid plots.
**kwargs : optional
Additional arguments to wrapped matplotlib function
Returns
-------
artist :
The same type of primitive artist that the wrapped matplotlib
function returns
"""
# Build on the original docstring
plotfunc.__doc__ = f"{plotfunc.__doc__}\n{commondoc}"
# plotfunc and newplotfunc have different signatures:
# - plotfunc: (x, y, z, ax, **kwargs)
# - newplotfunc: (darray, *args, x, y, **kwargs)
# where plotfunc accepts numpy arrays, while newplotfunc accepts a DataArray
# and variable names. newplotfunc also explicitly lists most kwargs, so we
# need to shorten it
def signature(darray, *args, x, y, **kwargs):
pass
@override_signature(signature)
@functools.wraps(plotfunc)
def newplotfunc(
darray,
*args,
x=None,
y=None,
hue=None,
figsize=None,
size=None,
aspect=None,
ax=None,
row=None,
col=None,
col_wrap=None,
xincrease=True,
yincrease=True,
add_legend=True,
add_labels=True,
subplot_kws=None,
xscale=None,
yscale=None,
xticks=None,
yticks=None,
xlim=None,
ylim=None,
**kwargs,
):
# All 2d plots in xarray share this function signature.
# Method signature below should be consistent.
# Handle facetgrids first
if row or col:
allargs = locals().copy()
allargs.update(allargs.pop("kwargs"))
allargs.pop("darray")
allargs.pop("plotfunc")
if plotfunc.__name__ == "line":
return _easy_facetgrid(darray, line, kind="line", **allargs)
else:
raise ValueError(f"Faceting not implemented for {plotfunc.__name__}")
# The allargs dict passed to _easy_facetgrid above contains args
if args == ():
args = kwargs.pop("args", ())
else:
assert "args" not in kwargs
ax = get_axis(figsize, size, aspect, ax)
xplt, yplt, hueplt, hue_label = _infer_line_data(darray, x, y, hue)
primitive = plotfunc(xplt, yplt, ax, *args, add_labels=add_labels, **kwargs)
if add_labels:
ax.set_title(darray._title_for_slice())
if hueplt is not None and add_legend:
if plotfunc.__name__ == "hist":
handles = primitive[-1]
else:
handles = primitive
ax.legend(
handles=handles,
labels=list(hueplt.values),
title=label_from_attrs(hueplt),
)
_update_axes(
ax, xincrease, yincrease, xscale, yscale, xticks, yticks, xlim, ylim
)
return primitive
# For use as DataArray.plot.plotmethod
@functools.wraps(newplotfunc)
def plotmethod(
_PlotMethods_obj,
*args,
x=None,
y=None,
figsize=None,
size=None,
aspect=None,
ax=None,
row=None,
col=None,
col_wrap=None,
xincrease=True,
yincrease=True,
add_legend=True,
add_labels=True,
subplot_kws=None,
xscale=None,
yscale=None,
xticks=None,
yticks=None,
xlim=None,
ylim=None,
**kwargs,
):
"""
The method should have the same signature as the function.
This just makes the method work on Plotmethods objects,
and passes all the other arguments straight through.
"""
allargs = locals()
allargs["darray"] = _PlotMethods_obj._da
allargs.update(kwargs)
for arg in ["_PlotMethods_obj", "newplotfunc", "kwargs"]:
del allargs[arg]
return newplotfunc(**allargs)
# Add to class _PlotMethods
setattr(_PlotMethods, plotmethod.__name__, plotmethod)
return newplotfunc
|
def _plot1d(plotfunc):
"""
Decorator for common 2d plotting logic
Also adds the 1d plot method to class _PlotMethods.
"""
commondoc = """
Parameters
----------
darray : DataArray
Must be 2 dimensional, unless creating faceted plots
x : string, optional
Coordinate for x axis. If None use darray.dims[1]
y : string, optional
Coordinate for y axis. If None use darray.dims[0]
hue : string, optional
Dimension or coordinate for which you want multiple lines plotted.
figsize : tuple, optional
A tuple (width, height) of the figure in inches.
Mutually exclusive with ``size`` and ``ax``.
aspect : scalar, optional
Aspect ratio of plot, so that ``aspect * size`` gives the width in
inches. Only used if a ``size`` is provided.
size : scalar, optional
If provided, create a new figure for the plot with the given size.
Height (in inches) of each plot. See also: ``aspect``.
ax : matplotlib.axes.Axes, optional
Axis on which to plot this figure. By default, use the current axis.
Mutually exclusive with ``size`` and ``figsize``.
row : string, optional
If passed, make row faceted plots on this dimension name
col : string, optional
If passed, make column faceted plots on this dimension name
col_wrap : int, optional
Use together with ``col`` to wrap faceted plots
xscale, yscale : 'linear', 'symlog', 'log', 'logit', optional
Specifies scaling for the x- and y-axes respectively
xticks, yticks : Specify tick locations for x- and y-axes
xlim, ylim : Specify x- and y-axes limits
xincrease : None, True, or False, optional
Should the values on the x axes be increasing from left to right?
if None, use the default for the matplotlib function.
yincrease : None, True, or False, optional
Should the values on the y axes be increasing from top to bottom?
if None, use the default for the matplotlib function.
add_labels : bool, optional
Use xarray metadata to label axes
subplot_kws : dict, optional
Dictionary of keyword arguments for matplotlib subplots. Only used
for 2D and FacetGrid plots.
**kwargs : optional
Additional arguments to wrapped matplotlib function
Returns
-------
artist :
The same type of primitive artist that the wrapped matplotlib
function returns
"""
# Build on the original docstring
plotfunc.__doc__ = f"{plotfunc.__doc__}\n{commondoc}"
# plotfunc and newplotfunc have different signatures:
# - plotfunc: (x, y, z, ax, **kwargs)
# - newplotfunc: (darray, *args, x, y, **kwargs)
# where plotfunc accepts numpy arrays, while newplotfunc accepts a DataArray
# and variable names. newplotfunc also explicitly lists most kwargs, so we
# need to shorten it
def signature(darray, *args, x, y, **kwargs):
pass
@override_signature(signature)
@functools.wraps(plotfunc)
def newplotfunc(
darray,
*args,
x=None,
y=None,
hue=None,
figsize=None,
size=None,
aspect=None,
ax=None,
row=None,
col=None,
col_wrap=None,
xincrease=True,
yincrease=True,
add_legend=True,
add_labels=True,
subplot_kws=None,
xscale=None,
yscale=None,
xticks=None,
yticks=None,
xlim=None,
ylim=None,
**kwargs,
):
# All 2d plots in xarray share this function signature.
# Method signature below should be consistent.
# Handle facetgrids first
if row or col:
allargs = locals().copy()
allargs.update(allargs.pop("kwargs"))
allargs.pop("darray")
allargs.pop("plotfunc")
if plotfunc.__name__ == "line":
return _easy_facetgrid(darray, line, kind="line", **allargs)
else:
raise ValueError(f"Faceting not implemented for {plotfunc.__name__}")
# The allargs dict passed to _easy_facetgrid above contains args
if args == ():
args = kwargs.pop("args", ())
else:
assert "args" not in kwargs
ax = get_axis(figsize, size, aspect, ax)
xplt, yplt, hueplt, hue_label = _infer_line_data(darray, x, y, hue)
primitive = plotfunc(xplt, yplt, ax, *args, add_labels=add_labels, **kwargs)
if add_labels:
ax.set_title(darray._title_for_slice())
if hueplt is not None and add_legend:
if plotfunc.__name__ == "hist":
handles = primitive[-1]
else:
handles = primitive
ax.legend(
handles=handles,
labels=list(hueplt.values),
title=label_from_attrs(hueplt),
)
_update_axes(
ax, xincrease, yincrease, xscale, yscale, xticks, yticks, xlim, ylim
)
return primitive
# For use as DataArray.plot.plotmethod
@functools.wraps(newplotfunc)
def plotmethod(
_PlotMethods_obj,
*args,
x=None,
y=None,
figsize=None,
size=None,
aspect=None,
ax=None,
row=None,
col=None,
col_wrap=None,
xincrease=True,
yincrease=True,
add_legend=True,
add_labels=True,
subplot_kws=None,
xscale=None,
yscale=None,
xticks=None,
yticks=None,
xlim=None,
ylim=None,
**kwargs,
):
"""
The method should have the same signature as the function.
This just makes the method work on Plotmethods objects,
and passes all the other arguments straight through.
"""
allargs = locals()
allargs["darray"] = _PlotMethods_obj._da
allargs.update(kwargs)
for arg in ["_PlotMethods_obj", "newplotfunc", "kwargs"]:
del allargs[arg]
return newplotfunc(**allargs)
# Add to class _PlotMethods
setattr(_PlotMethods, plotmethod.__name__, plotmethod)
return newplotfunc
|
3,907 |
def voterank(G, number_of_nodes=None):
"""Select a list of influential nodes in a graph using VoteRank algorithm
VoteRank [1]_ computes a ranking of the nodes in a graph G based on a
voting scheme. With VoteRank, every node votes for each of its neighbours
and the node with the highest votes is elected iteratively. If a node has been elected,
it doesn’t participate in subsequent voting, and the voting ability of its neighbors also be decreased in subsequent turns.
Parameters
----------
G : graph
A NetworkX graph.
number_of_nodes : integer, optional
Number of ranked nodes to extract (default all nodes).
Returns
-------
voterank : list
Ordered list of computed seeds.
Only nodes with positive number of votes are returned.
Examples
--------
>>> G= nx.Graph()
>>> G.add_edge(0,1)
>>> G.add_edge(0,2)
>>> G.add_edge(0,3)
>>> G.add_edge(1,4)
>>> nx.voterank(G)
[0, 1]
The algorithm can be used both for undirected and directed graphs. However, the directed version is different in two ways:
(i) nodes only vote for their in-neighbors and (ii) only the voting ability of elected node and its out-neighbors are updated.
You can see an example below:
>>> G= nx.DiGraph()
>>> G.add_edge(0,1)
>>> G.add_edge(2,1)
>>> G.add_edge(2,3)
>>> G.add_edge(3,4)
>>> nx.voterank(G)
[2, 3]
Notes
-----
Each edge is treated independently in case of multigraphs.
References
----------
.. [1] Zhang, J.-X. et al. (2016).
Identifying a set of influential spreaders in complex networks.
Sci. Rep. 6, 27823; doi: 10.1038/srep27823.
"""
influential_nodes = []
voterank = {}
if len(G) == 0:
return influential_nodes
if number_of_nodes is None or number_of_nodes > len(G):
number_of_nodes = len(G)
if G.is_directed():
# For directed graphs compute average out-degree
avgDegree = sum(deg for _, deg in G.out_degree()) / len(G)
else:
# For undirected graphs compute average degree
avgDegree = sum(deg for _, deg in G.degree()) / len(G)
# step 1 - initiate all nodes to (0,1) (score, voting ability)
for n in G.nodes():
voterank[n] = [0, 1]
# Repeat steps 1b to 4 until num_seeds are elected.
for _ in range(number_of_nodes):
# step 1b - reset rank
for n in G.nodes():
voterank[n][0] = 0
# step 2 - vote
for n, nbr in G.edges():
# In directed graphs nodes only vote for their in-neighbors
voterank[n][0] += voterank[nbr][1]
if not G.is_directed():
voterank[nbr][0] += voterank[n][1]
for n in influential_nodes:
voterank[n][0] = 0
# step 3 - select top node
n = max(G.nodes, key=lambda x: voterank[x][0])
if voterank[n][0] == 0:
return influential_nodes
influential_nodes.append(n)
# weaken the selected node
voterank[n] = [0, 0]
# step 4 - update voterank properties
for _, nbr in G.edges(n):
voterank[nbr][1] -= 1 / avgDegree
voterank[nbr][1] = max(voterank[nbr][1], 0)
return influential_nodes
|
def voterank(G, number_of_nodes=None):
"""Select a list of influential nodes in a graph using VoteRank algorithm
VoteRank [1]_ computes a ranking of the nodes in a graph G based on a
voting scheme. With VoteRank, every node votes for each of its neighbours
and the node with the highest votes is elected iteratively. If a node has been elected,
it doesn’t participate in subsequent voting, and the voting ability of its neighbors also be decreased in subsequent turns.
Parameters
----------
G : graph
A NetworkX graph.
number_of_nodes : integer, optional
Number of ranked nodes to extract (default all nodes).
Returns
-------
voterank : list
Ordered list of computed seeds.
Only nodes with positive number of votes are returned.
Examples
--------
>>> G = nx.Graph([(0, 1), (0, 2), (0, 3), (1, 4)])
>>> nx.voterank(G)
[0, 1]
The algorithm can be used both for undirected and directed graphs. However, the directed version is different in two ways:
(i) nodes only vote for their in-neighbors and (ii) only the voting ability of elected node and its out-neighbors are updated.
You can see an example below:
>>> G= nx.DiGraph()
>>> G.add_edge(0,1)
>>> G.add_edge(2,1)
>>> G.add_edge(2,3)
>>> G.add_edge(3,4)
>>> nx.voterank(G)
[2, 3]
Notes
-----
Each edge is treated independently in case of multigraphs.
References
----------
.. [1] Zhang, J.-X. et al. (2016).
Identifying a set of influential spreaders in complex networks.
Sci. Rep. 6, 27823; doi: 10.1038/srep27823.
"""
influential_nodes = []
voterank = {}
if len(G) == 0:
return influential_nodes
if number_of_nodes is None or number_of_nodes > len(G):
number_of_nodes = len(G)
if G.is_directed():
# For directed graphs compute average out-degree
avgDegree = sum(deg for _, deg in G.out_degree()) / len(G)
else:
# For undirected graphs compute average degree
avgDegree = sum(deg for _, deg in G.degree()) / len(G)
# step 1 - initiate all nodes to (0,1) (score, voting ability)
for n in G.nodes():
voterank[n] = [0, 1]
# Repeat steps 1b to 4 until num_seeds are elected.
for _ in range(number_of_nodes):
# step 1b - reset rank
for n in G.nodes():
voterank[n][0] = 0
# step 2 - vote
for n, nbr in G.edges():
# In directed graphs nodes only vote for their in-neighbors
voterank[n][0] += voterank[nbr][1]
if not G.is_directed():
voterank[nbr][0] += voterank[n][1]
for n in influential_nodes:
voterank[n][0] = 0
# step 3 - select top node
n = max(G.nodes, key=lambda x: voterank[x][0])
if voterank[n][0] == 0:
return influential_nodes
influential_nodes.append(n)
# weaken the selected node
voterank[n] = [0, 0]
# step 4 - update voterank properties
for _, nbr in G.edges(n):
voterank[nbr][1] -= 1 / avgDegree
voterank[nbr][1] = max(voterank[nbr][1], 0)
return influential_nodes
|
36,537 |
def create(*, isolated=True):
""" create() -> Interpreter
Initialize a new (idle) Python interpreter.
"""
id = _interpreters.create()
return Interpreter(id)
|
def create(*, isolated=True):
""" create() -> Interpreter
Initialize a new (idle) Python interpreter.
"""
id = _interpreters.create()
return Interpreter(id, isolated=isolated)
|
51,568 |
def getAllNamesForInstrument(instrumentClass: Instrument,
language: SearchLanguage = SearchLanguage.ALL):
'''
Retrieves all currently stored names for a given instrument.
The instrumentClass should be a valid music21
:class:`~music21.instrument.Instrument`.
By default, this function searches over all supported languages
including instrument name abbreviations (an honorary 'language' for these purposes),
and returns a dict with keys for the language tested and values as a list of
strings for any names in that language.
>>> instrument.getAllNamesForInstrument(instrument.Flute())
{'english': ['flute', 'flutes', 'transverse flute'],
'french': ['flûte', 'flûte traversière', 'flûtes', 'grande flûte'],
'german': ['flöte', 'flöten', 'querflöte'],
'italian': ['flauti', 'flauto', 'flauto traverso'],
'russian': ['fleita'],
'spanish': ['flauta', 'flauta de boehm', 'flauta de concierto',
'flauta traversa', 'flauta travesera', 'flautas'],
'abbreviation': ['fl']}
Alternatively, you can specify the language to search using the `language`
argument.
>>> instrument.getAllNamesForInstrument(instrument.Flute(), language='german')
{'german': ['flöte', 'flöten', 'querflöte']}
An InstrumentException is raised if the specified language is not
one of those currently supported:
'english', 'french', 'german', 'italian', 'russian', 'spanish', and 'abbreviation'.
Note that the language string is not case-sensitive, so 'German' is also fine.
'''
language = language.lower()
instrumentNameDict = {}
instrumentClassName = instrumentClass.instrumentName
if language == 'all':
for lang in [str(x) for x in SearchLanguage][1:]: # skip 'all' here
instrumentNameDict[lang] = _getKeys(instrumentClassName, lang)
elif language not in SearchLanguage:
raise InstrumentException(f'Chosen language {language} not currently supported.')
else: # one, valid language
instrumentNameDict[language] = _getKeys(instrumentClassName, language)
return instrumentNameDict
|
def getAllNamesForInstrument(instrumentClass: Instrument,
language: SearchLanguage = SearchLanguage.ALL):
'''
Retrieves all currently stored names for a given instrument.
The instrumentClass should be a valid music21
:class:`~music21.instrument.Instrument`.
By default, this function searches over all supported languages
including instrument name abbreviations (an honorary 'language' for these purposes),
and returns a dict with keys for the language tested and values as a list of
strings for any names in that language.
>>> instrument.getAllNamesForInstrument(instrument.Flute())
{'english': ['flute', 'flutes', 'transverse flute'],
'french': ['flûte', 'flûte traversière', 'flûtes', 'grande flûte'],
'german': ['flöte', 'flöten', 'querflöte'],
'italian': ['flauti', 'flauto', 'flauto traverso'],
'russian': ['fleita'],
'spanish': ['flauta', 'flauta de boehm', 'flauta de concierto',
'flauta traversa', 'flauta travesera', 'flautas'],
'abbreviation': ['fl']}
Alternatively, you can specify the language to search using the `language`
argument.
>>> instrument.getAllNamesForInstrument(instrument.Flute(), language='german')
{'german': ['flöte', 'flöten', 'querflöte']}
An InstrumentException is raised if the specified language is not
one of those currently supported:
'english', 'french', 'german', 'italian', 'russian', 'spanish', and 'abbreviation'.
Note that the language string is not case-sensitive, so 'German' is also fine.
'''
language = language.lower()
instrumentNameDict = {}
instrumentClassName = instrumentClass.instrumentName
if language == SearchLanguage.ALL:
for lang in SearchLanguage:
if lang is SearchLanguage.ALL:
continue
instrumentNameDict[lang] = _getKeys(instrumentClassName, lang)
elif language not in SearchLanguage:
raise InstrumentException(f'Chosen language {language} not currently supported.')
else: # one, valid language
instrumentNameDict[language] = _getKeys(instrumentClassName, language)
return instrumentNameDict
|
57,820 |
def create_ip_list_dicts(res_json: Dict[str, Any]) -> List[Dict[str, Any]]:
"""
Creates separate dictionaries of addresses and ranges
Args:
res_json: Dictionary received from ip list command with 'Address' or 'Range' keys
Returns:
List with address dictionary and ranges of addresses dictionary
Raises:
DemistoException: dictionary doesn't have any of the expected keys
TypeError: res_json is not a dictionary
"""
output_list = []
address_dict = create_ip_list_dict(res_json, 'Address')
range_dict = create_ip_list_dict(res_json, 'Range')
if len(address_dict) != 0:
output_list.append(address_dict)
if len(range_dict) != 0:
output_list.append(range_dict)
if len(output_list) == 0:
raise DemistoException("IP list command is missing keys")
return output_list
|
def create_ip_list_dicts(res_json: Dict[str, Any]) -> List[Dict[str, Any]]:
"""
Creates separate dictionaries of addresses and ranges
Args:
res_json: Dictionary received from ip list command with 'Address' or 'Range' keys
Returns:
List with address dictionary and ranges of addresses dictionary
Raises:
DemistoException: dictionary doesn't have any of the expected keys
TypeError: res_json is not a dictionary
"""
output_list = []
address_dict = create_ip_list_dict(res_json, 'Address')
range_dict = create_ip_list_dict(res_json, 'Range')
if address_dict:
output_list.append(address_dict)
if range_dict:
output_list.append(range_dict)
if not output_list:
raise DemistoException("IP list command is missing keys")
return output_list
|
41,039 |
def _checkOrder(n_in):
"""
Checks the order passed to the window functions.
Parameters
----------
n_in :
Returns
-------
n_out :
w :
trivalwin :
"""
w = []
trivalwin = 0
# Special case of negative orders:
if n_in < 0:
raise ValueError('Order cannot be less than zero.')
# Check if order is already an integer or empty
# If not, round to nearest integer.
if not n_in or n_in == np.floor(n_in):
n_out = n_in
else:
n_out = np.round(n_in)
LGR.warning('Rounding order to nearest integer.')
# Special cases:
if not n_out or n_out == 0:
w = np.zeros((0, 1)) # Empty matrix: 0-by-1
trivalwin = 1
elif n_out == 1:
w = 1
trivalwin = 1
return n_out, w, trivalwin
|
def _checkOrder(n_in):
"""
Checks the order passed to the window functions.
Parameters
----------
n_in :
Returns
-------
n_out :
w :
trivalwin :
"""
w = []
trivalwin = 0
# Special case of negative orders:
if n_in < 0:
raise ValueError('Order cannot be less than zero.')
# Check if order is already an integer or empty
# If not, round to nearest integer.
if n_in == np.floor(n_in):
n_out = n_in
else:
n_out = np.round(n_in)
LGR.warning('Rounding order to nearest integer.')
# Special cases:
if not n_out or n_out == 0:
w = np.zeros((0, 1)) # Empty matrix: 0-by-1
trivalwin = 1
elif n_out == 1:
w = 1
trivalwin = 1
return n_out, w, trivalwin
|
16,941 |
def resolve_db_url(hass: HomeAssistant, db_url: str) -> str:
"""Return the db_url provided if not empty, otherwise return the recorder db_url."""
if bool(db_url and not db_url.isspace()):
return db_url
return get_instance(hass).db_url
|
def resolve_db_url(hass: HomeAssistant, db_url: str | None) -> str:
"""Return the db_url provided if not empty, otherwise return the recorder db_url."""
if bool(db_url and not db_url.isspace()):
return db_url
return get_instance(hass).db_url
|
6,675 |
def replace_bom(boms: Dict) -> None:
"Replace current BOM with new BOM in parent BOMs."
current_bom = boms.get("current_bom")
new_bom = boms.get("new_bom")
unit_cost = get_bom_unit_cost(new_bom)
update_new_bom_in_bom_items(unit_cost, current_bom, new_bom)
frappe.cache().delete_key("bom_children")
parent_boms = get_ancestor_boms(new_bom)
for bom in parent_boms:
bom_obj = frappe.get_doc("BOM", bom)
# this is only used for versioning and we do not want
# to make separate db calls by using load_doc_before_save
# which proves to be expensive while doing bulk replace
bom_obj._doc_before_save = bom_obj
bom_obj.update_exploded_items()
bom_obj.calculate_cost()
bom_obj.update_parent_cost()
bom_obj.db_update()
if bom_obj.meta.get("track_changes") and not bom_obj.flags.ignore_version:
bom_obj.save_version()
|
def replace_bom(boms: Dict) -> None:
"Replace current BOM with new BOM in parent BOMs."
current_bom = boms.get("current_bom")
new_bom = boms.get("new_bom")
unit_cost = get_bom_unit_cost(new_bom)
update_new_bom_in_bom_items(unit_cost, current_bom, new_bom)
frappe.cache().delete_key("bom_children")
parent_boms = get_ancestor_boms(new_bom)
for bom in parent_boms:
bom_obj = frappe.get_doc("BOM", bom)
# this is only used for versioning and we do not want
# to make separate db calls by using load_doc_before_save
# which proves to be expensive while doing bulk replace
bom_obj._doc_before_save = bom_obj
bom_obj.update_exploded_items()
bom_obj.calculate_cost()
bom_obj.update_parent_cost()
bom_obj.db_update()
bom_obj.save_version()
|
42,420 |
def crop_image(raster, geoms, all_touched=True):
"""Crop a single file using geometry objects.
Parameters
----------
raster : rasterio.io.DatasetReader object
The rasterio object to be cropped.
geoms : geopandas geodataframe or list of polygons
The spatial polygon boundaries in GeoJSON-like dict format
to be used to crop the image. All data outside of the polygon
boundaries will be set to nodata and/or removed from the image.
all_touched : bool (default=True)
Include a pixel in the mask if it touches any of the
shapes. If False, include a pixel only if its center is within one of
the shapes, or if it is selected by Bresenham's line algorithm.
(from rasterio)
Returns
----------
tuple
out_image: cropped numpy array
A numpy array that is cropped to the geoms object
extent with shape (bands, rows, columns)
out_meta: dict
A dictionary containing updated metadata for the cropped raster,
including extent (shape elements) and transform properties.
Example
-------
>>> import geopandas as gpd
>>> import rasterio as rio
>>> import earthpy.spatial as es
>>> from earthpy.io import path_to_example
>>> # Clip an RGB image to the extent of Rocky Mountain National Park
>>> rmnp = gpd.read_file(path_to_example("rmnp.shp"))
>>> with rio.open(path_to_example("rmnp-rgb.tif")) as src:
... in_image = src.read()
... out_image, out_meta = es.crop_image(src, rmnp)
>>> in_image.shape
(3, 373, 485)
>>> out_image.shape
(3, 265, 281)
"""
if isinstance(geoms, gpd.geodataframe.GeoDataFrame):
clip_extent = [extent_to_json(geoms)]
else:
clip_extent = geoms
out_image, out_transform = mask(
raster, clip_extent, crop=True, all_touched=all_touched
)
out_meta = raster.meta.copy()
out_meta.update(
{
"driver": "GTiff",
"height": out_image.shape[1],
"width": out_image.shape[2],
"transform": out_transform,
}
)
return out_image, out_meta
|
def crop_image(raster, geoms, all_touched=True):
"""Crop a single file using geometry objects.
Parameters
----------
raster : rasterio.io.DatasetReader object
The rasterio object to be cropped.
geoms : geopandas geodataframe or list of polygons
The spatial polygon boundaries in GeoJSON-like dict format
to be used to crop the image. All data outside of the polygon
boundaries will be set to nodata and/or removed from the image.
all_touched : bool (default=True)
Include a pixel in the mask if it touches any of the
shapes. If False, include a pixel only if its center is within one of
the shapes, or if it is selected by Bresenham's line algorithm.
(from rasterio)
Returns
----------
tuple
out_image: cropped numpy array
A numpy array that is cropped to the geoms object
extent with shape (bands, rows, columns)
out_meta: dict
A dictionary containing updated metadata for the cropped raster,
including extent (shape elements) and transform properties.
Example
-------
>>> import geopandas as gpd
>>> import rasterio as rio
>>> import earthpy.spatial as es
>>> from earthpy.io import path_to_example
>>> # Clip an RGB image to the extent of Rocky Mountain National Park
>>> rmnp = gpd.read_file(path_to_example("rmnp.shp"))
>>> with rio.open(path_to_example("rmnp-rgb.tif")) as src:
... in_image = src.read()
... out_image, out_meta = es.crop_image(src, rmnp)
>>> in_image.shape
(3, 373, 485)
>>> out_image.shape
(265, 281)
"""
if isinstance(geoms, gpd.geodataframe.GeoDataFrame):
clip_extent = [extent_to_json(geoms)]
else:
clip_extent = geoms
out_image, out_transform = mask(
raster, clip_extent, crop=True, all_touched=all_touched
)
out_meta = raster.meta.copy()
out_meta.update(
{
"driver": "GTiff",
"height": out_image.shape[1],
"width": out_image.shape[2],
"transform": out_transform,
}
)
return out_image, out_meta
|
45,907 |
def gaussian_blur2d(input: torch.Tensor,
kernel_size: Tuple[int, int],
sigma: Tuple[float, float],
border_type: str = 'reflect',
separable: bool = True) -> torch.Tensor:
r"""Create an operator that blurs a tensor using a Gaussian filter.
.. image:: _static/img/gaussian_blur2d.png
The operator smooths the given tensor with a gaussian kernel by convolving
it to each channel. It supports batched operation.
Arguments:
input: the input tensor with shape :math:`(B,C,H,W)`.
kernel_size: the size of the kernel.
sigma: the standard deviation of the kernel.
border_type: the padding mode to be applied before convolving.
The expected modes are: ``'constant'``, ``'reflect'``,
``'replicate'`` or ``'circular'``. Default: ``'reflect'``.
separable: run as two 1d-convolutions
Returns:
the blurred tensor with shape :math:`(B, C, H, W)`.
.. note::
See a working example `here <https://kornia-tutorials.readthedocs.io/en/latest/
gaussian_blur.html>`__.
Examples:
>>> input = torch.rand(2, 4, 5, 5)
>>> output = gaussian_blur2d(input, (3, 3), (1.5, 1.5))
>>> output.shape
torch.Size([2, 4, 5, 5])
"""
if separable:
kernel_x: torch.Tensor = get_gaussian_kernel1d(kernel_size[1], sigma[1])
kernel_y: torch.Tensor = get_gaussian_kernel1d(kernel_size[0], sigma[0])
out = kornia.filters.separable_filter2d(input, kernel_x[None], kernel_y[None], border_type)
else:
kernel: torch.Tensor = torch.unsqueeze(get_gaussian_kernel2d(kernel_size, sigma), dim=0)
out = kornia.filter2d(input, kernel, border_type)
return out
|
def gaussian_blur2d(input: torch.Tensor,
kernel_size: Tuple[int, int],
sigma: Tuple[float, float],
border_type: str = 'reflect',
separable: bool = True) -> torch.Tensor:
r"""Create an operator that blurs a tensor using a Gaussian filter.
.. image:: _static/img/gaussian_blur2d.png
The operator smooths the given tensor with a gaussian kernel by convolving
it to each channel. It supports batched operation.
Arguments:
input: the input tensor with shape :math:`(B,C,H,W)`.
kernel_size: the size of the kernel.
sigma: the standard deviation of the kernel.
border_type: the padding mode to be applied before convolving.
The expected modes are: ``'constant'``, ``'reflect'``,
``'replicate'`` or ``'circular'``. Default: ``'reflect'``.
separable: run as two 1d-convolutions
Returns:
the blurred tensor with shape :math:`(B, C, H, W)`.
.. note::
See a working example `here <https://kornia-tutorials.readthedocs.io/en/latest/
gaussian_blur.html>`__.
Examples:
>>> input = torch.rand(2, 4, 5, 5)
>>> output = gaussian_blur2d(input, (3, 3), (1.5, 1.5))
>>> output.shape
torch.Size([2, 4, 5, 5])
"""
if separable:
kernel_x: torch.Tensor = get_gaussian_kernel1d(kernel_size[1], sigma[1])
kernel_y: torch.Tensor = get_gaussian_kernel1d(kernel_size[0], sigma[0])
out = kornia.filters.separable_filter2d(input, kernel_x[None], kernel_y[None], border_type)
else:
kernel: torch.Tensor = torch.unsqueeze(get_gaussian_kernel2d(kernel_size, sigma), dim=0)
out = kornia.filters.filter2d(input, kernel[None], border_type)
return out
|
34,541 |
def _validate_rasa_x_start(args: argparse.Namespace, project_path: Text):
if not is_rasa_x_installed():
cli_utils.print_error_and_exit(
"Rasa X is not installed. The `rasa x` "
"command requires an installation of Rasa X. "
"Instructions on how to install Rasa X can be found here: "
"https://rasa.com/docs/rasa-x/."
)
if args.port == args.rasa_x_port:
cli_utils.print_error_and_exit(
"The port for Rasa X '{}' and the port of the Rasa server '{}' are the "
"same. We need two different ports, one to run Rasa X (e.g. delivering the "
"UI) and another one to run a normal Rasa server.\nPlease specify two "
"different ports using the arguments '--port' and '--rasa-x-port'.".format(
args.rasa_x_port, args.port
)
)
if not is_rasa_project_setup(args, project_path):
cli_utils.print_error_and_exit(
"This directory is not a valid Rasa project. Use 'rasa init' "
"to create a new Rasa project or switch to a valid Rasa project "
"directory (see http://rasa.com/docs/rasa/installation#quick-installation)."
)
_validate_domain(os.path.join(project_path, DEFAULT_DOMAIN_PATH))
if args.data and not os.path.exists(args.data):
cli_utils.print_warning(
"The provided data path ('{}') does not exists. Rasa X will start "
"without any training data.".format(args.data)
)
|
def _validate_rasa_x_start(args: argparse.Namespace, project_path: Text):
if not is_rasa_x_installed():
cli_utils.print_error_and_exit(
"Rasa X is not installed. The `rasa x` "
"command requires an installation of Rasa X. "
"Instructions on how to install Rasa X can be found here: "
"https://rasa.com/docs/rasa-x/."
)
if args.port == args.rasa_x_port:
cli_utils.print_error_and_exit(
"The port for Rasa X '{}' and the port of the Rasa server '{}' are the "
"same. We need two different ports, one to run Rasa X (e.g. delivering the "
"UI) and another one to run a normal Rasa server.\nPlease specify two "
"different ports using the arguments '--port' and '--rasa-x-port'.".format(
args.rasa_x_port, args.port
)
)
if not is_rasa_project_setup(args, project_path):
cli_utils.print_error_and_exit(
"This directory is not a valid Rasa project. Use 'rasa init' "
"to create a new Rasa project or switch to a valid Rasa project "
"directory (see https://rasa.com/docs/rasa/command-line-interface#rasa-init)."
)
_validate_domain(os.path.join(project_path, DEFAULT_DOMAIN_PATH))
if args.data and not os.path.exists(args.data):
cli_utils.print_warning(
"The provided data path ('{}') does not exists. Rasa X will start "
"without any training data.".format(args.data)
)
|
49,341 |
def _convert_datetime_to_utc_int(expires_on):
epoch = time.localtime().tm_gmtoff
return time.mktime(expires_on.timetuple()) + epoch
|
def _convert_datetime_to_utc_int(expires_on):
offset_from_utc = time.localtime().tm_gmtoff
return time.mktime(input_datetime.timetuple()) + offset_from_utc
return time.mktime(expires_on.timetuple()) + epoch
|
21,756 |
def _get_in_flight_counts() -> Dict[Tuple[str, ...], float]:
"""Returns a count of all in flight requests by (method, server_name)"""
# Cast to a list to prevent it changing while the Prometheus
# thread is collecting metrics
with _in_flight_requests_lock:
reqs = list(_in_flight_requests)
for rm in reqs:
rm.update_metrics()
# Map from (method, name) -> int, the number of in flight requests of that
# type. The key type is Tuple[str, str], but we leavethe length unspecified
# for compatability with LaterGauge's annotations.
counts: Dict[Tuple[str, ...], float] = {}
for rm in reqs:
key = (rm.method, rm.name)
counts[key] = counts.get(key, 0.0) + 1.0
return counts
|
def _get_in_flight_counts() -> Dict[Tuple[str, str], float]:
"""Returns a count of all in flight requests by (method, server_name)"""
# Cast to a list to prevent it changing while the Prometheus
# thread is collecting metrics
with _in_flight_requests_lock:
reqs = list(_in_flight_requests)
for rm in reqs:
rm.update_metrics()
# Map from (method, name) -> int, the number of in flight requests of that
# type. The key type is Tuple[str, str], but we leavethe length unspecified
# for compatability with LaterGauge's annotations.
counts: Dict[Tuple[str, ...], float] = {}
for rm in reqs:
key = (rm.method, rm.name)
counts[key] = counts.get(key, 0.0) + 1.0
return counts
|
32,830 |
def test_activate_distributed_tracing_context(int_config):
pin = Pin()
int_config.myint["distributed_tracing_enabled"] = True
headers = {
HTTP_HEADER_PARENT_ID: "12345",
HTTP_HEADER_TRACE_ID: "678910",
}
trace_utils.activate_distributed_tracing_context(pin, int_config.myint, headers)
context = pin.tracer.context_provider.active()
assert context._parent_trace_id == 678910
assert context._parent_span_id == 12345
|
def test_activate_distributed_tracing_context(int_config):
pin = Pin()
int_config.myint["distributed_tracing_enabled"] = True
headers = {
HTTP_HEADER_PARENT_ID: "12345",
HTTP_HEADER_TRACE_ID: "678910",
}
trace_utils.activate_distributed_tracing_context(pin, int_config.myint, headers)
context = pin.tracer.context_provider.active()
assert context.trace_id == 678910
assert context._parent_span_id == 12345
|
22,767 |
def generate_csr(privkey: util.Key, names: Set[str], path: str,
must_staple: bool = False, strict_permissions: bool = True) -> util.CSR:
"""Initialize a CSR with the given private key.
:param privkey: Key to include in the CSR
:type privkey: :class:`certbot.util.Key`
:param set names: `str` names to include in the CSR
:param str path: Certificate save directory.
:param boolean must_staple: If true, include the TLS Feature extension "OCSP Must Staple"
:param boolean strict_permissions: If true, the CSR file will be saved with strict
permissions (POSIX mode 0600).
:returns: CSR
:rtype: :class:`certbot.util.CSR`
"""
csr_pem = acme_crypto_util.make_csr(
privkey.pem, names, must_staple=must_staple)
# Save CSR
util.make_or_verify_dir(path, 0o755, strict_permissions)
csr_f, csr_filename = util.unique_file(
os.path.join(path, "csr-certbot.pem"), 0o644, "wb")
with csr_f:
csr_f.write(csr_pem)
logger.debug("Creating CSR: %s", csr_filename)
return util.CSR(csr_filename, csr_pem, "pem")
|
def generate_csr(privkey: util.Key, names: Set[str], path: str,
must_staple: bool = False, strict_permissions: bool = True) -> util.CSR:
"""Initialize a CSR with the given private key.
:param privkey: Key to include in the CSR
:type privkey: :class:`certbot.util.Key`
:param set names: `str` names to include in the CSR
:param str path: Certificate save directory.
:param bool must_staple: If true, include the TLS Feature extension "OCSP Must Staple"
:param boolean strict_permissions: If true, the CSR file will be saved with strict
permissions (POSIX mode 0600).
:returns: CSR
:rtype: :class:`certbot.util.CSR`
"""
csr_pem = acme_crypto_util.make_csr(
privkey.pem, names, must_staple=must_staple)
# Save CSR
util.make_or_verify_dir(path, 0o755, strict_permissions)
csr_f, csr_filename = util.unique_file(
os.path.join(path, "csr-certbot.pem"), 0o644, "wb")
with csr_f:
csr_f.write(csr_pem)
logger.debug("Creating CSR: %s", csr_filename)
return util.CSR(csr_filename, csr_pem, "pem")
|
24,859 |
def my_func(self, doc_type):
"""This is a docstring.
:param doc_type: Sphinx
:type doc_type: str
"""
return
|
def my_func(self, doc_type):
"""ignores_sphinx_return_none
:param doc_type: Sphinx
:type doc_type: str
"""
return
|
42,702 |
def deserialize_ethereum_transaction(
data: Dict[str, Any],
internal: bool,
ethereum: Optional['EthereumManager'] = None,
trace_id: Optional[int] = None,
) -> Union[EthereumTransaction, EthereumInternalTransaction]:
"""Reads dict data of a transaction and deserializes it.
If the transaction is not from etherscan then it's missing some data
so ethereum manager is used to fetch it.
Can raise DeserializationError if something is wrong
"""
source = 'etherscan' if ethereum is None else 'web3'
try:
block_number = read_integer(data, 'blockNumber', source)
if block_number == 0:
tx_hash = deserialize_evm_tx_hash('0x0000000000000000000000000000000000000000')
from_address = deserialize_ethereum_address('0x0000000000000000000000000000000000000000') # noqa: E501
internal = True
else:
tx_hash = deserialize_evm_tx_hash(data['hash'])
from_address = deserialize_ethereum_address(data['from'])
if 'timeStamp' not in data:
if ethereum is None:
raise DeserializationError('Got in deserialize ethereum transaction without timestamp and without ethereum manager') # noqa: E501
block_data = ethereum.get_block_by_number(block_number)
timestamp = Timestamp(read_integer(block_data, 'timestamp', source))
else:
timestamp = deserialize_timestamp(data['timeStamp'])
is_empty_to_address = data['to'] != '' and data['to'] is not None
to_address = deserialize_ethereum_address(data['to']) if is_empty_to_address else None
value = read_integer(data, 'value', source)
if internal:
trace_id = trace_id or int(data.get('traceId', '0'))
return EthereumInternalTransaction(
parent_tx_hash=tx_hash,
trace_id=trace_id,
timestamp=timestamp,
block_number=block_number,
from_address=from_address,
to_address=to_address,
value=value,
)
# else normal transaction
gas_price = read_integer(data=data, key='gasPrice', api=source)
input_data = read_hash(data, 'input', source)
if 'gasUsed' not in data:
if ethereum is None:
raise DeserializationError('Got in deserialize ethereum transaction without gasUsed and without ethereum manager') # noqa: E501
tx_hash = deserialize_evm_tx_hash(data['hash'])
receipt_data = ethereum.get_transaction_receipt(tx_hash)
gas_used = read_integer(receipt_data, 'gasUsed', source)
else:
gas_used = read_integer(data, 'gasUsed', source)
nonce = read_integer(data, 'nonce', source)
return EthereumTransaction(
timestamp=timestamp,
block_number=block_number,
tx_hash=tx_hash,
from_address=from_address,
to_address=to_address,
value=value,
gas=read_integer(data, 'gas', source),
gas_price=gas_price,
gas_used=gas_used,
input_data=input_data,
nonce=nonce,
)
except KeyError as e:
raise DeserializationError(
f'ethereum {"internal" if internal else ""}transaction from {source} missing expected key {str(e)}', # noqa: E501
) from e
|
def deserialize_ethereum_transaction(
data: Dict[str, Any],
internal: bool,
ethereum: Optional['EthereumManager'] = None,
trace_id: Optional[int] = None,
) -> Union[EthereumTransaction, EthereumInternalTransaction]:
"""Reads dict data of a transaction and deserializes it.
If the transaction is not from etherscan then it's missing some data
so ethereum manager is used to fetch it.
Can raise DeserializationError if something is wrong
"""
source = 'etherscan' if ethereum is None else 'web3'
try:
block_number = read_integer(data, 'blockNumber', source)
if block_number == 0:
tx_hash = deserialize_evm_tx_hash('0x0000000000000000000000000000000000000000')
from_address = deserialize_ethereum_address('0x0000000000000000000000000000000000000000') # noqa: E501
internal = True
else:
tx_hash = deserialize_evm_tx_hash(data['hash'])
from_address = deserialize_ethereum_address(data['from'])
if 'timeStamp' not in data:
if ethereum is None:
raise DeserializationError('Got in deserialize ethereum transaction without timestamp and without ethereum manager') # noqa: E501
block_data = ethereum.get_block_by_number(block_number)
timestamp = Timestamp(read_integer(block_data, 'timestamp', source))
else:
timestamp = deserialize_timestamp(data['timeStamp'])
is_empty_to_address = data['to'] != '' and data['to'] is not None
to_address = deserialize_ethereum_address(data['to']) if is_empty_to_address else None
value = read_integer(data, 'value', source)
if internal:
trace_id = trace_id is not None or int(data.get('traceId', '0'))
return EthereumInternalTransaction(
parent_tx_hash=tx_hash,
trace_id=trace_id,
timestamp=timestamp,
block_number=block_number,
from_address=from_address,
to_address=to_address,
value=value,
)
# else normal transaction
gas_price = read_integer(data=data, key='gasPrice', api=source)
input_data = read_hash(data, 'input', source)
if 'gasUsed' not in data:
if ethereum is None:
raise DeserializationError('Got in deserialize ethereum transaction without gasUsed and without ethereum manager') # noqa: E501
tx_hash = deserialize_evm_tx_hash(data['hash'])
receipt_data = ethereum.get_transaction_receipt(tx_hash)
gas_used = read_integer(receipt_data, 'gasUsed', source)
else:
gas_used = read_integer(data, 'gasUsed', source)
nonce = read_integer(data, 'nonce', source)
return EthereumTransaction(
timestamp=timestamp,
block_number=block_number,
tx_hash=tx_hash,
from_address=from_address,
to_address=to_address,
value=value,
gas=read_integer(data, 'gas', source),
gas_price=gas_price,
gas_used=gas_used,
input_data=input_data,
nonce=nonce,
)
except KeyError as e:
raise DeserializationError(
f'ethereum {"internal" if internal else ""}transaction from {source} missing expected key {str(e)}', # noqa: E501
) from e
|
11,553 |
def generate_code(_=None):
"""Simulate the way oauthlib generates authz codes."""
chars = "abcdefghijklmnopqrstuvwxyz" + "ABCDEFGHIJKLMNOPQRSTUVWXYZ" + "012a456789"
rand = random.SystemRandom()
return "".join(rand.choice(chars) for x in range(30))
|
def generate_code(_=None):
"""Simulate the way oauthlib generates authz codes."""
chars = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ012a456789"
rand = random.SystemRandom()
return "".join(rand.choice(chars) for x in range(30))
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.