id
int64 11
59.9k
| original
stringlengths 33
150k
| modified
stringlengths 37
150k
|
---|---|---|
292 |
def point_list_to_multitrace(point_list: List[Dict[str, np.ndarray]], model: Optional[Model]) -> MultiTrace:
'''transform point list into MultiTrace'''
_model = modelcontext(model)
varnames = list(point_list[0].keys())
with _model:
chain = NDArray(model=_model, vars=[_model[vn] for vn in varnames])
chain.setup(draws=len(point_list), chain=0)
# since we are simply loading a trace by hand, we need only a vacuous function for
# chain.record() to use. This crushes the default.
def point_fun(point):
return [point[vn] for vn in varnames]
chain.fn = point_fun
for point in point_list:
chain.record(point)
return MultiTrace([chain])
|
def point_list_to_multitrace(point_list: List[Dict[str, np.ndarray]], model: Optional[Model]=None) -> MultiTrace:
'''transform point list into MultiTrace'''
_model = modelcontext(model)
varnames = list(point_list[0].keys())
with _model:
chain = NDArray(model=_model, vars=[_model[vn] for vn in varnames])
chain.setup(draws=len(point_list), chain=0)
# since we are simply loading a trace by hand, we need only a vacuous function for
# chain.record() to use. This crushes the default.
def point_fun(point):
return [point[vn] for vn in varnames]
chain.fn = point_fun
for point in point_list:
chain.record(point)
return MultiTrace([chain])
|
49,644 |
def log_status_message(item: str, summary: str, color: str, progress: int,
length: int, verbosity: int = 0) -> None:
s = '{}[{:3d}%] {}'.format(summary, (100 * progress) // length,
colorize(color, item))
if verbosity:
s += '\n'
else:
s = term_width_line(s)
logger.info(s, nonl=True)
|
def log_status_message(item: str, summary: str, color: str, progress: int,
length: int, verbosity: int = 0) -> None:
s = '{}[{:3d}%] {}'.format(summary, (100 * progress) // length,
colorize(color, item))
if verbosity:
logger.info(s)
else:
logger.info(term_width_line(s), nonl=True)
|
7,603 |
def aggregate_downsample(time_series, *, time_bin_size=None, time_bin_start=None,
time_bin_end=None, n_bins=None, aggregate_func=None):
"""
Downsample a time series by binning values into bins with a fixed size or
custom sizes, using a single function to combine the values in the bin.
Parameters
----------
time_series : :class:`~astropy.timeseries.TimeSeries`
The time series to downsample.
time_bin_size : `~astropy.units.Quantity` or `~astropy.time.TimeDelta` ['time'], optional
The time interval for the binned time series - this is either a scalar
value (in which case all time bins will be assumed to have the same
duration) or as an array of values (in which case each time bin can
have a different duration). If this argument is provided,
``time_bin_end`` should not be provided.
time_bin_start : `~astropy.time.Time` or iterable, optional
The start time for the binned time series - this can be either given
directly as a `~astropy.time.Time` array or as any iterable that
initializes the `~astropy.time.Time` class. This can also be a scalar
value if ``time_bin_size`` or ``time_bin_end`` is provided.
Defaults to the first time in the sampled time series.
time_bin_end : `~astropy.time.Time` or iterable, optional
The times of the end of each bin - this can be either given directly as
a `~astropy.time.Time` array or as any iterable that initializes the
`~astropy.time.Time` class. This can only be given if ``time_bin_start``
is provided or its default is used. If ``time_bin_end`` is scalar and
``time_bin_start`` is an array, time bins are assumed to be contiguous;
the end of each bin is the start of the next one, and ``time_bin_end`` gives
the end time for the last bin. If ``time_bin_end`` is an array and
``time_bin_start`` is scalar, bins will be contiguous. If both ``time_bin_end``
and ``time_bin_start`` are arrays, bins do not need to be contiguous.
If this argument is provided, ``time_bin_size`` should not be provided.
n_bins : int, optional
The number of bins to use. Defaults to the number needed to fit all
the original points. If both ``time_bin_start`` and ``time_bin_size``
are provided and are scalar values, this determines the total bins
within that interval. If ``time_bin_start`` is an iterable, this
parameter will be ignored.
aggregate_func : callable, optional
The function to use for combining points in the same bin. Defaults
to np.nanmean.
Returns
-------
binned_time_series : :class:`~astropy.timeseries.BinnedTimeSeries`
The downsampled time series.
"""
if not isinstance(time_series, TimeSeries):
raise TypeError("time_series should be a TimeSeries")
if time_bin_size is not None and not isinstance(time_bin_size, (u.Quantity, TimeDelta)):
raise TypeError("'time_bin_size' should be a Quantity or a TimeDelta")
if time_bin_start is not None and not isinstance(time_bin_start, (Time, TimeDelta)):
time_bin_start = Time(time_bin_start)
if time_bin_end is not None and not isinstance(time_bin_end, (Time, TimeDelta)):
time_bin_end = Time(time_bin_end)
# Use the table sorted by time
ts_sorted = time_series.iloc[:]
# If start time is not provided, it is assumed to be the start of the timeseries
if time_bin_start is None:
time_bin_start = ts_sorted.time[0]
# Total duration of the timeseries is needed for determining either
# `time_bin_size` or `nbins` in the case of scalar `time_bin_start`
if time_bin_start.isscalar:
time_duration = (ts_sorted.time[-1] - time_bin_start).sec
if time_bin_size is None and time_bin_end is None:
if time_bin_start.isscalar:
if n_bins is None:
raise TypeError("With single 'time_bin_start' either 'n_bins', "
"'time_bin_size' or time_bin_end' must be provided")
else:
# `nbins` defaults to the number needed to fit all points
time_bin_size = time_duration / n_bins * u.s
else:
time_bin_end = np.maximum(ts_sorted.time[-1], time_bin_start[-1])
if time_bin_start.isscalar:
if time_bin_size is not None:
if time_bin_size.isscalar:
# Determine the number of bins
if n_bins is None:
bin_size_sec = time_bin_size.to_value(u.s)
n_bins = int(np.ceil(time_duration/bin_size_sec))
elif time_bin_end is not None:
if not time_bin_end.isscalar:
# Convert start time to an array and populate using `time_bin_end`
scalar_start_time = time_bin_start
time_bin_start = time_bin_end.replicate(copy=True)
time_bin_start[0] = scalar_start_time
time_bin_start[1:] = time_bin_end[:-1]
# Check for overlapping bins, and warn if they are present
if time_bin_end is not None:
if (not time_bin_end.isscalar and not time_bin_start.isscalar and
np.any(time_bin_start[1:] < time_bin_end[:-1])):
warnings.warn("Overlapping bins should be avoided since they "
"can lead to double-counting of data during binning.",
AstropyUserWarning)
binned = BinnedTimeSeries(time_bin_size=time_bin_size,
time_bin_start=time_bin_start,
time_bin_end=time_bin_end,
n_bins=n_bins)
if aggregate_func is None:
aggregate_func = np.nanmean
# Start and end times of the binned timeseries
bin_start = binned.time_bin_start
bin_end = binned.time_bin_end
# Set `n_bins` to match the length of `time_bin_start` if
# `n_bins` is unspecified or if `time_bin_start` is an iterable
if n_bins is None or not time_bin_start.isscalar:
n_bins = len(bin_start)
# Find the subset of the table that is inside the union of all bins
keep = ((ts_sorted.time >= bin_start[0]) & (ts_sorted.time <= bin_end[-1]))
# Find out indices to be removed because of uncontiguous bins
for ind in range(n_bins-1):
delete_indices = np.where(np.logical_and(ts_sorted.time > bin_end[ind],
ts_sorted.time < bin_start[ind+1]))
keep[delete_indices] = False
subset = ts_sorted[keep]
# Figure out which bin each row falls in by sorting with respect
# to the bin end times
indices = np.searchsorted(bin_end, ts_sorted.time[keep])
# For time == bin_start[i+1] == bin_end[i], let bin_start takes precedence
if len(indices) and np.all(bin_start[1:] >= bin_end[:-1]):
indices_start = np.searchsorted(ts_sorted.time[keep],
np.minimum(bin_start, ts_sorted.time[-1]))
indices[indices_start] = np.arange(len(indices_start))
# Determine rows where values are defined
if len(indices):
groups = np.hstack([0, np.nonzero(np.diff(indices))[0] + 1])
else:
groups = np.array([])
# Find unique indices to determine which rows in the final time series
# will not be empty.
unique_indices = np.unique(indices)
# Add back columns
for colname in subset.colnames:
if colname == 'time':
continue
values = subset[colname]
# FIXME: figure out how to avoid the following, if possible
if not isinstance(values, (np.ndarray, u.Quantity)):
warnings.warn("Skipping column {0} since it has a mix-in type", AstropyUserWarning)
continue
if isinstance(values, u.Quantity):
data = u.Quantity(np.repeat(np.nan, n_bins), unit=values.unit)
data[unique_indices] = u.Quantity(reduceat(values.value, groups, aggregate_func),
values.unit, copy=False)
else:
data = np.ma.zeros(n_bins, dtype=values.dtype)
data.mask = 1
data[unique_indices] = reduceat(values, groups, aggregate_func)
data.mask[unique_indices] = 0
binned[colname] = data
return binned
|
def aggregate_downsample(time_series, *, time_bin_size=None, time_bin_start=None,
time_bin_end=None, n_bins=None, aggregate_func=None):
"""
Downsample a time series by binning values into bins with a fixed size or
custom sizes, using a single function to combine the values in the bin.
Parameters
----------
time_series : :class:`~astropy.timeseries.TimeSeries`
The time series to downsample.
time_bin_size : `~astropy.units.Quantity` or `~astropy.time.TimeDelta` ['time'], optional
The time interval for the binned time series - this is either a scalar
value (in which case all time bins will be assumed to have the same
duration) or as an array of values (in which case each time bin can
have a different duration). If this argument is provided,
``time_bin_end`` should not be provided.
time_bin_start : `~astropy.time.Time` or iterable, optional
The start time for the binned time series - this can be either given
directly as a `~astropy.time.Time` array or as any iterable that
initializes the `~astropy.time.Time` class. This can also be a scalar
value if ``time_bin_size`` or ``time_bin_end`` is provided.
Defaults to the first time in the sampled time series.
time_bin_end : `~astropy.time.Time` or iterable, optional
The times of the end of each bin - this can be either given directly as
a `~astropy.time.Time` array or as any iterable that initializes the
`~astropy.time.Time` class. This can only be given if ``time_bin_start``
is provided or its default is used. If ``time_bin_end`` is scalar and
``time_bin_start`` is an array, time bins are assumed to be contiguous;
the end of each bin is the start of the next one, and ``time_bin_end`` gives
the end time for the last bin. If ``time_bin_end`` is an array and
``time_bin_start`` is scalar, bins will be contiguous. If both ``time_bin_end``
and ``time_bin_start`` are arrays, bins do not need to be contiguous.
If this argument is provided, ``time_bin_size`` should not be provided.
n_bins : int, optional
The number of bins to use. Defaults to the number needed to fit all
the original points. If both ``time_bin_start`` and ``time_bin_size``
are provided and are scalar values, this determines the total bins
within that interval. If ``time_bin_start`` is an iterable, this
parameter will be ignored.
aggregate_func : callable, optional
The function to use for combining points in the same bin. Defaults
to np.nanmean.
Returns
-------
binned_time_series : :class:`~astropy.timeseries.BinnedTimeSeries`
The downsampled time series.
"""
if not isinstance(time_series, TimeSeries):
raise TypeError("time_series should be a TimeSeries")
if time_bin_size is not None and not isinstance(time_bin_size, (u.Quantity, TimeDelta)):
raise TypeError("'time_bin_size' should be a Quantity or a TimeDelta")
if time_bin_start is not None and not isinstance(time_bin_start, (Time, TimeDelta)):
time_bin_start = Time(time_bin_start)
if time_bin_end is not None and not isinstance(time_bin_end, (Time, TimeDelta)):
time_bin_end = Time(time_bin_end)
# Use the table sorted by time
ts_sorted = time_series.iloc[:]
# If start time is not provided, it is assumed to be the start of the timeseries
if time_bin_start is None:
time_bin_start = ts_sorted.time[0]
# Total duration of the timeseries is needed for determining either
# `time_bin_size` or `nbins` in the case of scalar `time_bin_start`
if time_bin_start.isscalar:
time_duration = (ts_sorted.time[-1] - time_bin_start).sec
if time_bin_size is None and time_bin_end is None:
if time_bin_start.isscalar:
if n_bins is None:
raise TypeError("With single 'time_bin_start' either 'n_bins', "
"'time_bin_size' or time_bin_end' must be provided")
else:
# `nbins` defaults to the number needed to fit all points
time_bin_size = time_duration / n_bins * u.s
else:
time_bin_end = np.maximum(ts_sorted.time[-1], time_bin_start[-1])
if time_bin_start.isscalar:
if time_bin_size is not None:
if time_bin_size.isscalar:
# Determine the number of bins
if n_bins is None:
bin_size_sec = time_bin_size.to_value(u.s)
n_bins = int(np.ceil(time_duration/bin_size_sec))
elif time_bin_end is not None:
if not time_bin_end.isscalar:
# Convert start time to an array and populate using `time_bin_end`
scalar_start_time = time_bin_start
time_bin_start = time_bin_end.replicate(copy=True)
time_bin_start[0] = scalar_start_time
time_bin_start[1:] = time_bin_end[:-1]
# Check for overlapping bins, and warn if they are present
if time_bin_end is not None:
if (not time_bin_end.isscalar and not time_bin_start.isscalar and
np.any(time_bin_start[1:] < time_bin_end[:-1])):
warnings.warn("Overlapping bins should be avoided since they "
"can lead to double-counting of data during binning.",
AstropyUserWarning)
binned = BinnedTimeSeries(time_bin_size=time_bin_size,
time_bin_start=time_bin_start,
time_bin_end=time_bin_end,
n_bins=n_bins)
if aggregate_func is None:
aggregate_func = np.nanmean
# Start and end times of the binned timeseries
bin_start = binned.time_bin_start
bin_end = binned.time_bin_end
# Set `n_bins` to match the length of `time_bin_start` if
# `n_bins` is unspecified or if `time_bin_start` is an iterable
if n_bins is None or not time_bin_start.isscalar:
n_bins = len(bin_start)
# Find the subset of the table that is inside the union of all bins
keep = ((ts_sorted.time >= bin_start[0]) & (ts_sorted.time <= bin_end[-1]))
# Find out indices to be removed because of uncontiguous bins
for ind in range(n_bins-1):
delete_indices = np.where(np.logical_and(ts_sorted.time > bin_end[ind],
ts_sorted.time < bin_start[ind+1]))
keep[delete_indices] = False
subset = ts_sorted[keep]
# Figure out which bin each row falls in by sorting with respect
# to the bin end times
indices = np.searchsorted(bin_end, ts_sorted.time[keep])
# For time == bin_start[i+1] == bin_end[i], let bin_start takes precedence
if len(indices) and np.all(bin_start[1:] >= bin_end[:-1]):
indices_start = np.searchsorted(subset.time, bin_start[bin_start <= ts_sorted.time[-1]])
indices[indices_start] = np.arange(len(indices_start))
# Determine rows where values are defined
if len(indices):
groups = np.hstack([0, np.nonzero(np.diff(indices))[0] + 1])
else:
groups = np.array([])
# Find unique indices to determine which rows in the final time series
# will not be empty.
unique_indices = np.unique(indices)
# Add back columns
for colname in subset.colnames:
if colname == 'time':
continue
values = subset[colname]
# FIXME: figure out how to avoid the following, if possible
if not isinstance(values, (np.ndarray, u.Quantity)):
warnings.warn("Skipping column {0} since it has a mix-in type", AstropyUserWarning)
continue
if isinstance(values, u.Quantity):
data = u.Quantity(np.repeat(np.nan, n_bins), unit=values.unit)
data[unique_indices] = u.Quantity(reduceat(values.value, groups, aggregate_func),
values.unit, copy=False)
else:
data = np.ma.zeros(n_bins, dtype=values.dtype)
data.mask = 1
data[unique_indices] = reduceat(values, groups, aggregate_func)
data.mask[unique_indices] = 0
binned[colname] = data
return binned
|
50,485 |
def main(args=None):
parser = create_argument_parser()
cli_options = parser.parse_args(args=args)
# load the config
cfg_name = find_config_name(cli_options)
cfg_options = {}
if cfg_name is not None:
with io.open(cfg_name, encoding='UTF-8') as cfg_file:
cfg_options = parse_config_into_dict(
parse_config_file(cfg_file, filename=cfg_name))
options_dict = merge_options_and_set_defaults(
[cfg_options, cli_options.__dict__])
options = Options(**options_dict)
logger = Logger(options.verbose)
if sys.version_info < (3, 8) and options.use_canonical_paths:
logger.warn("--use_canonical_paths will be ignored due to incompatible Python version.")
options.use_canonical_paths = None
if cli_options.version:
logger.msg(
"gcovr {version}\n"
"\n"
"{copyright}",
version=__version__, copyright=COPYRIGHT)
sys.exit(0)
if options.html_title == '':
logger.error(
"an empty --html_title= is not allowed.")
sys.exit(1)
if options.html_medium_threshold == 0:
logger.error(
"value of --html-medium-threshold= should not be zero.")
sys.exit(1)
if options.html_medium_threshold > options.html_high_threshold:
logger.error(
"value of --html-medium-threshold={} should be\n"
"lower than or equal to the value of --html-high-threshold={}.",
options.html_medium_threshold, options.html_high_threshold)
sys.exit(1)
if options.html_tab_size < 1:
logger.error(
"value of --html-tab-size= should be greater 0.")
sys.exit(1)
potential_html_output = (
(options.html and options.html.value)
or (options.html_details and options.html_details.value)
or (options.output and options.output.value))
if options.html_details and not potential_html_output:
logger.error(
"a named output must be given, if the option --html-details\n"
"is used.")
sys.exit(1)
if options.html_self_contained is False and not potential_html_output:
logger.error(
"can only disable --html-self-contained when a named output is given.")
sys.exit(1)
if options.objdir is not None:
if not options.objdir:
logger.error(
"empty --object-directory option.\n"
"\tThis option specifies the path to the object file "
"directory of your project.\n"
"\tThis option cannot be an empty string.")
sys.exit(1)
tmp = options.objdir.replace('/', os.sep).replace('\\', os.sep)
while os.sep + os.sep in tmp:
tmp = tmp.replace(os.sep + os.sep, os.sep)
if normpath(options.objdir) != tmp:
logger.warn(
"relative referencing in --object-directory.\n"
"\tthis could cause strange errors when gcovr attempts to\n"
"\tidentify the original gcc working directory.")
if not os.path.exists(normpath(options.objdir)):
logger.error(
"Bad --object-directory option.\n"
"\tThe specified directory does not exist.")
sys.exit(1)
if options.use_canonical_paths:
canonical_objdir = os.path.realpath(options.objdir)
if canonical_objdir != options.objdir:
options.objdir = canonical_objdir
logger.msg(f"--object-directory has been normalized to {options.objdir}.")
options.starting_dir = os.path.abspath(os.getcwd())
if options.use_canonical_paths:
canonical_starting_dir = os.path.realpath(options.starting_dir)
if canonical_starting_dir != options.starting_dir:
options.starting_dir = canonical_starting_dir
logger.msg(f"starting_dir has been normalized to {options.starting_dir}.")
if not options.root:
logger.error(
"empty --root option.\n"
"\tRoot specifies the path to the root "
"directory of your project.\n"
"\tThis option cannot be an empty string.")
sys.exit(1)
options.root_dir = os.path.abspath(options.root)
if options.use_canonical_paths:
canonical_root = os.path.realpath(options.root)
if canonical_root != options.root:
options.root = canonical_root
logger.msg(f"--root has been normalized to {options.root}.")
canonical_rootdir = os.path.realpath(options.root_dir)
if canonical_rootdir != options.root_dir:
options.root_dir = canonical_rootdir
logger.msg(f"root_dir has been normalized to {options.root_dir}.")
#
# Setup filters
#
# The root filter isn't technically a filter,
# but is used to turn absolute paths into relative paths
options.root_filter = re.compile('^' + re.escape(options.root_dir + os.sep))
if options.exclude_dirs is not None:
options.exclude_dirs = [
f.build_filter(logger, options.use_canonical_paths) for f in options.exclude_dirs]
options.exclude = [f.build_filter(logger, options.use_canonical_paths) for f in options.exclude]
options.filter = [f.build_filter(logger, options.use_canonical_paths) for f in options.filter]
if not options.filter:
options.filter = [DirectoryPrefixFilter(options.root_dir)]
options.gcov_exclude = [
f.build_filter(logger, options.use_canonical_paths) for f in options.gcov_exclude]
options.gcov_filter = [f.build_filter(logger, options.use_canonical_paths) for f in options.gcov_filter]
if not options.gcov_filter:
options.gcov_filter = [AlwaysMatchFilter()]
# Output the filters for debugging
for name, filters in [
('--root', [options.root_filter]),
('--filter', options.filter),
('--exclude', options.exclude),
('--gcov-filter', options.gcov_filter),
('--gcov-exclude', options.gcov_exclude),
('--exclude-directories', options.exclude_dirs),
]:
logger.verbose_msg('Filters for {}: ({})', name, len(filters))
for f in filters:
logger.verbose_msg('- {}', f)
if options.exclude_lines_by_pattern:
try:
re.compile(options.exclude_lines_by_pattern)
except re.error as e:
logger.error(
"--exclude-lines-by-pattern: "
"Invalid regular expression: {}, error: {}",
repr(options.exclude_lines_by_pattern), e)
sys.exit(1)
covdata = dict()
if options.add_tracefile:
collect_coverage_from_tracefiles(covdata, options, logger)
else:
collect_coverage_from_gcov(covdata, options, logger)
logger.verbose_msg("Gathered coveraged data for {} files", len(covdata))
# Print reports
error_occurred = print_reports(covdata, options, logger)
if error_occurred:
logger.error(
"Error occurred while printing reports"
)
sys.exit(7)
if options.fail_under_line > 0.0 or options.fail_under_branch > 0.0:
fail_under(covdata, options.fail_under_line, options.fail_under_branch, logger)
|
def main(args=None):
parser = create_argument_parser()
cli_options = parser.parse_args(args=args)
# load the config
cfg_name = find_config_name(cli_options)
cfg_options = {}
if cfg_name is not None:
with io.open(cfg_name, encoding='UTF-8') as cfg_file:
cfg_options = parse_config_into_dict(
parse_config_file(cfg_file, filename=cfg_name))
options_dict = merge_options_and_set_defaults(
[cfg_options, cli_options.__dict__])
options = Options(**options_dict)
logger = Logger(options.verbose)
if sys.version_info < (3, 8) and options.use_canonical_paths:
logger.warn("--use_canonical_paths will be ignored due to incompatible Python version.")
options.use_canonical_paths = None
if cli_options.version:
logger.msg(
"gcovr {version}\n"
"\n"
"{copyright}",
version=__version__, copyright=COPYRIGHT)
sys.exit(0)
if options.html_title == '':
logger.error(
"an empty --html_title= is not allowed.")
sys.exit(1)
if options.html_medium_threshold == 0:
logger.error(
"value of --html-medium-threshold= should not be zero.")
sys.exit(1)
if options.html_medium_threshold > options.html_high_threshold:
logger.error(
"value of --html-medium-threshold={} should be\n"
"lower than or equal to the value of --html-high-threshold={}.",
options.html_medium_threshold, options.html_high_threshold)
sys.exit(1)
if options.html_tab_size < 1:
logger.error(
"value of --html-tab-size= should be greater 0.")
sys.exit(1)
potential_html_output = (
(options.html and options.html.value)
or (options.html_details and options.html_details.value)
or (options.output and options.output.value))
if options.html_details and not potential_html_output:
logger.error(
"a named output must be given, if the option --html-details\n"
"is used.")
sys.exit(1)
if options.html_self_contained is False and not potential_html_output:
logger.error(
"can only disable --html-self-contained when a named output is given.")
sys.exit(1)
if options.objdir is not None:
if not options.objdir:
logger.error(
"empty --object-directory option.\n"
"\tThis option specifies the path to the object file "
"directory of your project.\n"
"\tThis option cannot be an empty string.")
sys.exit(1)
tmp = options.objdir.replace('/', os.sep).replace('\\', os.sep)
while os.sep + os.sep in tmp:
tmp = tmp.replace(os.sep + os.sep, os.sep)
if normpath(options.objdir) != tmp:
logger.warn(
"relative referencing in --object-directory.\n"
"\tthis could cause strange errors when gcovr attempts to\n"
"\tidentify the original gcc working directory.")
if not os.path.exists(normpath(options.objdir)):
logger.error(
"Bad --object-directory option.\n"
"\tThe specified directory does not exist.")
sys.exit(1)
if options.use_canonical_paths:
canonical_objdir = os.path.realpath(options.objdir)
if canonical_objdir != options.objdir:
options.objdir = canonical_objdir
logger.msg(f"--object-directory has been normalized to {options.objdir}.")
options.starting_dir = os.path.abspath(os.getcwd())
if canonical_path:
options.starting_dir = canonical_path(options.starting_dir, "starting dir")
if not options.root:
logger.error(
"empty --root option.\n"
"\tRoot specifies the path to the root "
"directory of your project.\n"
"\tThis option cannot be an empty string.")
sys.exit(1)
options.root_dir = os.path.abspath(options.root)
if options.use_canonical_paths:
canonical_root = os.path.realpath(options.root)
if canonical_root != options.root:
options.root = canonical_root
logger.msg(f"--root has been normalized to {options.root}.")
canonical_rootdir = os.path.realpath(options.root_dir)
if canonical_rootdir != options.root_dir:
options.root_dir = canonical_rootdir
logger.msg(f"root_dir has been normalized to {options.root_dir}.")
#
# Setup filters
#
# The root filter isn't technically a filter,
# but is used to turn absolute paths into relative paths
options.root_filter = re.compile('^' + re.escape(options.root_dir + os.sep))
if options.exclude_dirs is not None:
options.exclude_dirs = [
f.build_filter(logger, options.use_canonical_paths) for f in options.exclude_dirs]
options.exclude = [f.build_filter(logger, options.use_canonical_paths) for f in options.exclude]
options.filter = [f.build_filter(logger, options.use_canonical_paths) for f in options.filter]
if not options.filter:
options.filter = [DirectoryPrefixFilter(options.root_dir)]
options.gcov_exclude = [
f.build_filter(logger, options.use_canonical_paths) for f in options.gcov_exclude]
options.gcov_filter = [f.build_filter(logger, options.use_canonical_paths) for f in options.gcov_filter]
if not options.gcov_filter:
options.gcov_filter = [AlwaysMatchFilter()]
# Output the filters for debugging
for name, filters in [
('--root', [options.root_filter]),
('--filter', options.filter),
('--exclude', options.exclude),
('--gcov-filter', options.gcov_filter),
('--gcov-exclude', options.gcov_exclude),
('--exclude-directories', options.exclude_dirs),
]:
logger.verbose_msg('Filters for {}: ({})', name, len(filters))
for f in filters:
logger.verbose_msg('- {}', f)
if options.exclude_lines_by_pattern:
try:
re.compile(options.exclude_lines_by_pattern)
except re.error as e:
logger.error(
"--exclude-lines-by-pattern: "
"Invalid regular expression: {}, error: {}",
repr(options.exclude_lines_by_pattern), e)
sys.exit(1)
covdata = dict()
if options.add_tracefile:
collect_coverage_from_tracefiles(covdata, options, logger)
else:
collect_coverage_from_gcov(covdata, options, logger)
logger.verbose_msg("Gathered coveraged data for {} files", len(covdata))
# Print reports
error_occurred = print_reports(covdata, options, logger)
if error_occurred:
logger.error(
"Error occurred while printing reports"
)
sys.exit(7)
if options.fail_under_line > 0.0 or options.fail_under_branch > 0.0:
fail_under(covdata, options.fail_under_line, options.fail_under_branch, logger)
|
57,594 |
def main():
parser = SanicArgumentParser(
prog="sanic",
description=BASE_LOGO,
formatter_class=RawDescriptionHelpFormatter,
)
parser.add_argument(
"-H",
"--host",
dest="host",
type=str,
default="127.0.0.1",
help="host address [default 127.0.0.1]",
)
parser.add_argument(
"-p",
"--port",
dest="port",
type=int,
default=8000,
help="port to serve on [default 8000]",
)
parser.add_argument(
"-u",
"--unix",
dest="unix",
type=str,
default="",
help="location of unix socket",
)
parser.add_argument(
"--cert", dest="cert", type=str, help="location of certificate for SSL"
)
parser.add_argument(
"--key", dest="key", type=str, help="location of keyfile for SSL."
)
parser.add_argument(
"-w",
"--workers",
dest="workers",
type=int,
default=1,
help="number of worker processes [default 1]",
)
parser.add_argument("-d", "--debug", dest="debug", action="store_true")
parser.add_argument(
"-r",
"--auto-reload",
dest="auto_reload",
action="store_true",
help="Watch source directory for file changes and reload on changes",
)
parser.add_argument(
"--factory",
action="store_true",
help="Treat app as an application factory, i.e. a () -> <Sanic app> callable.",
)
parser.add_argument(
"-v",
"--version",
action="version",
version=f"Sanic {__version__}; Routing {__routing_version__}",
)
parser.add_bool_arguments(
"--access-logs", dest="access_log", help="display access logs"
)
parser.add_argument(
"module", help="path to your Sanic app. Example: path.to.server:app"
)
args = parser.parse_args()
try:
module_path = os.path.abspath(os.getcwd())
if module_path not in sys.path:
sys.path.append(module_path)
delimiter = ":" if ":" in args.module else "."
module_name, app_name = args.module.rsplit(delimiter, 1)
module = import_module(module_name)
app = getattr(module, app_name, None)
if args.factory:
app = app()
app_name = type(app).__name__
if not isinstance(app, Sanic):
raise ValueError(
f"Module is not a Sanic app, it is a {app_name}. "
f"Perhaps you meant {args.module}.app?"
)
if args.cert is not None or args.key is not None:
ssl: Optional[Dict[str, Any]] = {
"cert": args.cert,
"key": args.key,
}
else:
ssl = None
kwargs = {
"host": args.host,
"port": args.port,
"unix": args.unix,
"workers": args.workers,
"debug": args.debug,
"access_log": args.access_log,
"ssl": ssl,
}
if args.auto_reload:
kwargs["auto_reload"] = True
app.run(**kwargs)
except ImportError as e:
if module_name.startswith(e.name):
error_logger.error(
f"No module named {e.name} found.\n"
" Example File: project/sanic_server.py -> app\n"
" Example Module: project.sanic_server.app"
)
else:
raise e
except ValueError:
error_logger.exception("Failed to run app")
|
def main():
parser = SanicArgumentParser(
prog="sanic",
description=BASE_LOGO,
formatter_class=RawDescriptionHelpFormatter,
)
parser.add_argument(
"-H",
"--host",
dest="host",
type=str,
default="127.0.0.1",
help="host address [default 127.0.0.1]",
)
parser.add_argument(
"-p",
"--port",
dest="port",
type=int,
default=8000,
help="port to serve on [default 8000]",
)
parser.add_argument(
"-u",
"--unix",
dest="unix",
type=str,
default="",
help="location of unix socket",
)
parser.add_argument(
"--cert", dest="cert", type=str, help="location of certificate for SSL"
)
parser.add_argument(
"--key", dest="key", type=str, help="location of keyfile for SSL."
)
parser.add_argument(
"-w",
"--workers",
dest="workers",
type=int,
default=1,
help="number of worker processes [default 1]",
)
parser.add_argument("-d", "--debug", dest="debug", action="store_true")
parser.add_argument(
"-r",
"--auto-reload",
dest="auto_reload",
action="store_true",
help="Watch source directory for file changes and reload on changes",
)
parser.add_argument(
"--factory",
action="store_true",
help=(
"Treat app as an application factory, "
"i.e. a () -> <Sanic app> callable."
),
)
parser.add_argument(
"-v",
"--version",
action="version",
version=f"Sanic {__version__}; Routing {__routing_version__}",
)
parser.add_bool_arguments(
"--access-logs", dest="access_log", help="display access logs"
)
parser.add_argument(
"module", help="path to your Sanic app. Example: path.to.server:app"
)
args = parser.parse_args()
try:
module_path = os.path.abspath(os.getcwd())
if module_path not in sys.path:
sys.path.append(module_path)
delimiter = ":" if ":" in args.module else "."
module_name, app_name = args.module.rsplit(delimiter, 1)
module = import_module(module_name)
app = getattr(module, app_name, None)
if args.factory:
app = app()
app_name = type(app).__name__
if not isinstance(app, Sanic):
raise ValueError(
f"Module is not a Sanic app, it is a {app_name}. "
f"Perhaps you meant {args.module}.app?"
)
if args.cert is not None or args.key is not None:
ssl: Optional[Dict[str, Any]] = {
"cert": args.cert,
"key": args.key,
}
else:
ssl = None
kwargs = {
"host": args.host,
"port": args.port,
"unix": args.unix,
"workers": args.workers,
"debug": args.debug,
"access_log": args.access_log,
"ssl": ssl,
}
if args.auto_reload:
kwargs["auto_reload"] = True
app.run(**kwargs)
except ImportError as e:
if module_name.startswith(e.name):
error_logger.error(
f"No module named {e.name} found.\n"
" Example File: project/sanic_server.py -> app\n"
" Example Module: project.sanic_server.app"
)
else:
raise e
except ValueError:
error_logger.exception("Failed to run app")
|
33,021 |
def parse_poi_query(north, south, east, west, tags=None, timeout=180, maxsize=''):
"""
Construct an Overpass QL query to load POIs with certain tags.
By default, queries all features with an amenity tag.
Parameters
----------
north : float
Northernmost coordinate from bounding box of the search area.
south : float
Southernmost coordinate from bounding box of the search area.
east : float
Easternmost coordinate from bounding box of the search area.
west : float
Westernmost coordinate of the bounding box of the search area.
tags : dict
Dictionary of tag keys and values that will be used for finding POIs in the selected area.
Keys may be strings or lists of strings.
Values make be string, lists of strings, or None, if all values should be returned for a given key.
By default, all POIs with an 'amenity' key of any value will be be returned.
timeout : int
Timeout for the API request.
"""
# build default tags
if not tags:
tags = {'amenity':True}
# define templates for objects and extents
object_template = '({object_type}[{{keys}}{{op}}"{{values}}"]{{extent}});'
# object_template = '({object_type}[~"^({{keys}})$"{{op}}"{{values}}"]{{extent}});'
re_keys_template = '~"^({keys})$"'
single_key_template = '"{key}"'
extent_template = '({south:.6f},{west:.6f},{north:.6f},{east:.6f});(._;>;);'
extent = extent_template.format(south=south, west=west, north=north, east=east)
# initate query string
query_template = "[out:json][timeout:{timeout}]{maxsize};("
query_str = query_template.format(timeout=timeout, maxsize=maxsize)
# add statements for each object type
# templates = [object_template.format(object_type=x) for x in ['node','way','relation']]
templates = [object_template.format(object_type=x) for x in ['nwr']]
for template in templates:
# add statements for each key
for keys, values in tags.items():
# ensure keys is a list
keys = [keys] if not isinstance(keys, list) else keys
if values == True:
# get features with any value for these keys
# add positive statement with multiple keys and no specific values
query_str += template.format(keys=re_keys_template.format(keys='|'.join(keys)), values='.*', extent=extent, op='~')
elif values == False:
# get features wihout these keys, not matter their values
for key in keys:
# add negative statement with multiple keys and no specific values
# can only be added one at a time withough key regex
query_str += template.format(keys=single_key_template.format(key=key), values='.*', extent=extent, op='!~')
else:
# get features with specified values for these keys
# ensure values is a list
values = [values] if not isinstance(values, list) else values
# add positive statement with multiple keys in specific values
query_str += template.format(keys='{}'.format('|'.join(keys)), values='|'.join(values), extent=extent, op='~')
# terminate query string
query_str += ");out;"
return query_str
|
def parse_poi_query(north, south, east, west, tags=None, timeout=180, maxsize=''):
"""
Construct an Overpass QL query to load POIs with certain tags.
By default, queries all features with an amenity tag.
Parameters
----------
north : float
Northernmost coordinate from bounding box of the search area.
south : float
Southernmost coordinate from bounding box of the search area.
east : float
Easternmost coordinate from bounding box of the search area.
west : float
Westernmost coordinate of the bounding box of the search area.
tags : dict
Dictionary of tag keys and values that will be used for finding POIs in the selected area.
Keys may be strings or lists of strings.
Values make be string, lists of strings, or None, if all values should be returned for a given key.
By default, all POIs with an 'amenity' key of any value will be be returned.
timeout : int
Timeout for the API request.
"""
# build default tags
if not tags:
tags = {'amenity':True}
# define templates for objects and extents
object_template = '({object_type}[{{keys}}{{op}}"{{values}}"]{{extent}});'
# object_template = '({object_type}[~"^({{keys}})$"{{op}}"{{values}}"]{{extent}});'
re_keys_template = '~"^({keys})$"'
single_key_template = '"{key}"'
extent_template = '({south:.6f},{west:.6f},{north:.6f},{east:.6f});(._;>;);'
extent = extent_template.format(south=south, west=west, north=north, east=east)
# initate query string
query_template = "[out:json][timeout:{timeout}]{maxsize};("
query_str = query_template.format(timeout=timeout, maxsize=maxsize)
# add statements for each object type
# templates = [object_template.format(object_type=x) for x in ['node','way','relation']]
templates = [object_template.format(object_type=x) for x in ['nwr']]
for template in templates:
# add statements for each key
for keys, values in tags.items():
# ensure keys is a list
keys = [keys] if not isinstance(keys, list) else keys
if values == True:
# get features with any value for these keys
# add positive statement with multiple keys and no specific values
query_str += template.format(keys=re_keys_template.format(keys='|'.join(keys)), values='.*', extent=extent, op='~')
elif values == False:
# get features without these keys, no matter their values
for key in keys:
# add negative statement with multiple keys and no specific values
# can only be added one at a time withough key regex
query_str += template.format(keys=single_key_template.format(key=key), values='.*', extent=extent, op='!~')
else:
# get features with specified values for these keys
# ensure values is a list
values = [values] if not isinstance(values, list) else values
# add positive statement with multiple keys in specific values
query_str += template.format(keys='{}'.format('|'.join(keys)), values='|'.join(values), extent=extent, op='~')
# terminate query string
query_str += ");out;"
return query_str
|
29,745 |
def _silent_request(*args, **kwargs):
"""
Attempt to make Slack API request and ignore if an exception is thrown.
TODO(jrbotros): this silences all errors, but we likely will want to be
able to surface errors in some cases in the future
"""
try:
return _request(*args, **kwargs)
except SlackError:
logger.exception('Slack API Error')
except HTTPError as e:
status_code = e.response.status_code
if status_code == 429:
logger.exception('HTTPError: {}'.format(e))
else:
raise(e)
|
def _silent_request(*args, **kwargs):
"""
Attempt to make Slack API request and ignore if an exception is thrown.
TODO(jrbotros): this silences all errors, but we likely will want to be
able to surface errors in some cases in the future
"""
try:
return _request(*args, **kwargs)
except SlackError:
logger.exception('Slack API Error')
except HTTPError as e:
status_code = e.response.status_code
if status_code == 429:
logger.exception('HTTPError: {}'.format(e))
else:
raise
|
10,763 |
def remove_dead_parfor_recursive(parfor, lives, arg_aliases, alias_map,
func_ir, typemap):
"""create a dummy function from parfor and call remove dead recursively
"""
blocks = parfor.loop_body.copy() # shallow copy is enough
first_body_block = min(blocks.keys())
assert first_body_block > 0 # we are using 0 for init block here
last_label = max(blocks.keys())
"""
Previously, this statement used lives_n_aliases. That had the effect of
keeping variables in the init_block alive if they aliased an array that
was later written to. By using just lives to indicate which variables
names are live at exit of the parfor but then using alias_map for the
actual recursive dead code removal, we keep any writes to aliased arrays
alive but also allow aliasing assignments (i.e., a = b) to be eliminated
so long as 'a' is not written to through the variable 'a' later on.
This makes assignment handling of remove_dead_block work properly since
it allows distinguishing between live variables and their aliases.
"""
return_label, tuple_var = _add_liveness_return_block(blocks, lives, typemap)
# branch back to first body label to simulate loop
scope = blocks[last_label].scope
branchcond = ir.Var(scope, mk_unique_var("$branchcond"), ir.Loc("parfors_dummy", -1))
typemap[branchcond.name] = types.boolean
branch = ir.Branch(branchcond, first_body_block, return_label, ir.Loc("parfors_dummy", -1))
blocks[last_label].body.append(branch)
# add dummy jump in init_block for CFG to work
blocks[0] = parfor.init_block
blocks[0].body.append(ir.Jump(first_body_block, ir.Loc("parfors_dummy", -1)))
# args var including aliases is ok
remove_dead(blocks, arg_aliases, func_ir, typemap, alias_map, arg_aliases)
typemap.pop(tuple_var.name) # remove dummy tuple type
blocks[0].body.pop() # remove dummy jump
blocks[last_label].body.pop() # remove branch
return
|
def remove_dead_parfor_recursive(parfor, lives, arg_aliases, alias_map,
func_ir, typemap):
"""create a dummy function from parfor and call remove dead recursively
"""
blocks = parfor.loop_body.copy() # shallow copy is enough
first_body_block = min(blocks.keys())
assert first_body_block > 0 # we are using 0 for init block here
last_label = max(blocks.keys())
"""
Previously, this statement used lives_n_aliases. That had the effect of
keeping variables in the init_block alive if they aliased an array that
was later written to. By using just lives to indicate which variables
names are live at exit of the parfor but then using alias_map for the
actual recursive dead code removal, we keep any writes to aliased arrays
alive but also allow aliasing assignments (i.e., a = b) to be eliminated
so long as 'a' is not written to through the variable 'b' later on.
This makes assignment handling of remove_dead_block work properly since
it allows distinguishing between live variables and their aliases.
"""
return_label, tuple_var = _add_liveness_return_block(blocks, lives, typemap)
# branch back to first body label to simulate loop
scope = blocks[last_label].scope
branchcond = ir.Var(scope, mk_unique_var("$branchcond"), ir.Loc("parfors_dummy", -1))
typemap[branchcond.name] = types.boolean
branch = ir.Branch(branchcond, first_body_block, return_label, ir.Loc("parfors_dummy", -1))
blocks[last_label].body.append(branch)
# add dummy jump in init_block for CFG to work
blocks[0] = parfor.init_block
blocks[0].body.append(ir.Jump(first_body_block, ir.Loc("parfors_dummy", -1)))
# args var including aliases is ok
remove_dead(blocks, arg_aliases, func_ir, typemap, alias_map, arg_aliases)
typemap.pop(tuple_var.name) # remove dummy tuple type
blocks[0].body.pop() # remove dummy jump
blocks[last_label].body.pop() # remove branch
return
|
32,380 |
def fetch_incidents(args):
client = aws_session(
region=args.get('region'),
roleArn=args.get('roleArn'),
roleSessionName=args.get('roleSessionName'),
roleSessionDuration=args.get('roleSessionDuration'),
)
last_fetch = demisto.getLastRun()
first_fetch = demisto.params().get('first_fetch')
attribute_key = demisto.params().get('AttributeKey')
if not attribute_key:
attribute_key = 'EventName'
attribute_value = demisto.params().get('AttributeValue')
fetch_start_time = calculate_fetch_start_time(last_fetch, first_fetch)
incidents = []
incident_created_time = fetch_start_time
kwargs = {
'LookupAttributes': [{
'AttributeKey': attribute_key,
'AttributeValue': attribute_value
}]
}
kwargs.update({'StartTime': fetch_start_time})
client.lookup_events(**kwargs)
paginator = client.get_paginator('lookup_events')
for response in paginator.paginate(**kwargs):
for i, event in enumerate(response['Events']):
incident = {
'EventId': event.get('EventId'),
'Name': event.get('EventName'),
'EventTime': handle_returning_date_to_string(event.get('EventTime', '01-01-01T00:00:00')),
'EventSource': event.get('EventSource'),
'ResourceName': event.get('Resources')[0].get('ResourceName') if event.get('Resources') else None,
'ResourceType': event.get('Resources')[0].get('ResourceType') if event.get('Resources') else None,
'CloudTrailEvent': event.get('CloudTrailEvent'),
'Username': event.get('Username'),
'rawJSON': json.dumps(event, indent=4, sort_keys=True, default=str)
}
incidents.append(incident)
incident_created_time = (event.get('EventTime', '01-01-01T00:00:00') + timedelta(seconds=1)).timestamp()
if incident_created_time > fetch_start_time:
last_fetch = str(incident_created_time)
demisto.setLastRun(last_fetch)
demisto.incidents(incidents)
|
def fetch_incidents(args):
client = aws_session(
region=args.get('region'),
roleArn=args.get('roleArn'),
roleSessionName=args.get('roleSessionName'),
roleSessionDuration=args.get('roleSessionDuration'),
)
last_fetch = demisto.getLastRun()
first_fetch = demisto.params().get('first_fetch')
attribute_key = demisto.params().get('AttributeKey')
if not attribute_key:
attribute_key = 'EventName'
attribute_value = params.get('AttributeValue')
fetch_start_time = calculate_fetch_start_time(last_fetch, first_fetch)
incidents = []
incident_created_time = fetch_start_time
kwargs = {
'LookupAttributes': [{
'AttributeKey': attribute_key,
'AttributeValue': attribute_value
}]
}
kwargs.update({'StartTime': fetch_start_time})
client.lookup_events(**kwargs)
paginator = client.get_paginator('lookup_events')
for response in paginator.paginate(**kwargs):
for i, event in enumerate(response['Events']):
incident = {
'EventId': event.get('EventId'),
'Name': event.get('EventName'),
'EventTime': handle_returning_date_to_string(event.get('EventTime', '01-01-01T00:00:00')),
'EventSource': event.get('EventSource'),
'ResourceName': event.get('Resources')[0].get('ResourceName') if event.get('Resources') else None,
'ResourceType': event.get('Resources')[0].get('ResourceType') if event.get('Resources') else None,
'CloudTrailEvent': event.get('CloudTrailEvent'),
'Username': event.get('Username'),
'rawJSON': json.dumps(event, indent=4, sort_keys=True, default=str)
}
incidents.append(incident)
incident_created_time = (event.get('EventTime', '01-01-01T00:00:00') + timedelta(seconds=1)).timestamp()
if incident_created_time > fetch_start_time:
last_fetch = str(incident_created_time)
demisto.setLastRun(last_fetch)
demisto.incidents(incidents)
|
32,846 |
def test_custom_writer():
tracer = ddtrace.Tracer()
class CustomWriter(TraceWriter):
def recreate(self):
# type: () -> TraceWriter
return self
def stop(self, timeout=None):
# type: (Optional[float]) -> None
pass
def write(self, spans=None):
# type: (Optional[List[Span]]) -> None
pass
tracer.writer = CustomWriter()
info = debug.collect(tracer)
assert info.get("agent_url", "CUSTOM")
|
def test_custom_writer():
tracer = ddtrace.Tracer()
class CustomWriter(TraceWriter):
def recreate(self):
# type: () -> TraceWriter
return self
def stop(self, timeout=None):
# type: (Optional[float]) -> None
pass
def write(self, spans=None):
# type: (Optional[List[Span]]) -> None
pass
tracer.writer = CustomWriter()
info = debug.collect(tracer)
assert info.get("agent_url", None) == "CUSTOM"
|
55,340 |
def apply_to_dataframe(
data: pd.DataFrame,
metric_functions: Dict[str, AnnotatedMetricFunction]) -> pd.Series:
"""Apply metric functions to a DataFrame."""
values = dict()
for name, mf in metric_functions.items():
values[name] = mf.invoke(data)
result = pd.Series(data=values.values(), index=values.keys())
return result
|
def apply_to_dataframe(
data: pd.DataFrame,
metric_functions: Dict[str, AnnotatedMetricFunction]) -> pd.Series:
"""Apply metric functions to a DataFrame."""
values = dict()
for name, func in metric_functions.items():
values[name] = func.invoke(data)
result = pd.Series(data=values.values(), index=values.keys())
return result
|
3,686 |
def fromstring(datastring, dtype=None, shape=None, offset=0, formats=None,
names=None, titles=None, aligned=False, byteorder=None):
"""Create a (read-only) record array from binary data contained in
a string
Parameters
----------
datastring : str
Binary data contained in a string
dtype : data-type, optional
Valid dtype for all arrays
shape : int or tuple of ints, optional
Shape of each array.
offset : int, optional
Position in the file to start reading from.
formats, names, titles, aligned, byteorder :
If `dtype` is ``None``, these arguments are passed to
`numpy.format_parser` to construct a dtype. See that function for
detailed documentation.
Returns
-------
np.recarray
record array consisting of data in datastring.
Examples
--------
>>> a = np.empty(10,dtype='f8,i4,a5')
>>> a[5] = (0.5,10,'abcde')
>>> b=np.core.records.fromstring(a.tostring(), formats='f8,i4,a5', shape=10,
... byteorder='<')
>>> print(b[5])
(0.5, 10, 'abcde')
>>> b.shape
(10,)
"""
if dtype is None and formats is None:
raise TypeError("fromstring() needs a 'dtype' or 'formats' argument")
if dtype is not None:
descr = sb.dtype(dtype)
else:
descr = format_parser(formats, names, titles, aligned, byteorder).dtype
itemsize = descr.itemsize
# NumPy 1.19.0, 2020-01-01
shape = _deprecate_shape_0_as_None(shape)
if shape in (None, -1):
shape = (len(datastring) - offset) // itemsize
_array = recarray(shape, descr, buf=datastring, offset=offset)
return _array
|
def fromstring(datastring, dtype=None, shape=None, offset=0, formats=None,
names=None, titles=None, aligned=False, byteorder=None):
"""Create a (read-only) record array from binary data contained in
a string
Parameters
----------
datastring : str
Binary data contained in a string
dtype : data-type, optional
Valid dtype for all arrays
shape : int or tuple of ints, optional
Shape of each array.
offset : int, optional
Position in the buffer to start reading from.
formats, names, titles, aligned, byteorder :
If `dtype` is ``None``, these arguments are passed to
`numpy.format_parser` to construct a dtype. See that function for
detailed documentation.
Returns
-------
np.recarray
record array consisting of data in datastring.
Examples
--------
>>> a = np.empty(10,dtype='f8,i4,a5')
>>> a[5] = (0.5,10,'abcde')
>>> b=np.core.records.fromstring(a.tostring(), formats='f8,i4,a5', shape=10,
... byteorder='<')
>>> print(b[5])
(0.5, 10, 'abcde')
>>> b.shape
(10,)
"""
if dtype is None and formats is None:
raise TypeError("fromstring() needs a 'dtype' or 'formats' argument")
if dtype is not None:
descr = sb.dtype(dtype)
else:
descr = format_parser(formats, names, titles, aligned, byteorder).dtype
itemsize = descr.itemsize
# NumPy 1.19.0, 2020-01-01
shape = _deprecate_shape_0_as_None(shape)
if shape in (None, -1):
shape = (len(datastring) - offset) // itemsize
_array = recarray(shape, descr, buf=datastring, offset=offset)
return _array
|
7,466 |
def write_table_hdf5(table, output, path=None, compression=False,
append=False, overwrite=False, serialize_meta=False,
compatibility_mode=False):
"""
Write a Table object to an HDF5 file
This requires `h5py <http://www.h5py.org/>`_ to be installed.
Parameters
----------
table : `~astropy.table.Table`
Data table that is to be written to file.
output : str or :class:`h5py:File` or :class:`h5py:Group`
If a string, the filename to write the table to. If an h5py object,
either the file or the group object to write the table to.
path : str
The path to which to write the table inside the HDF5 file.
This should be relative to the input file or group.
If not specified, defaults to __astropy__
compression : bool or str or int
Whether to compress the table inside the HDF5 file. If set to `True`,
``'gzip'`` compression is used. If a string is specified, it should be
one of ``'gzip'``, ``'szip'``, or ``'lzf'``. If an integer is
specified (in the range 0-9), ``'gzip'`` compression is used, and the
integer denotes the compression level.
append : bool
Whether to append the table to an existing HDF5 file.
overwrite : bool
Whether to overwrite any existing file without warning.
If ``append=True`` and ``overwrite=True`` then only the dataset will be
replaced; the file/group will not be overwritten.
"""
from astropy.table import meta
try:
import h5py
except ImportError:
raise Exception("h5py is required to read and write HDF5 files")
if path is None:
path = '__astropy__'
warnings.warn("table path was not set via the path= argument; "
"using default path __astropy__")
elif path.endswith('/'):
raise ValueError("table path should end with table name, not /")
if '/' in path:
group, name = path.rsplit('/', 1)
else:
group, name = None, path
if isinstance(output, (h5py.File, h5py.Group)):
if group:
try:
output_group = output[group]
except (KeyError, ValueError):
output_group = output.create_group(group)
else:
output_group = output
elif isinstance(output, str):
if os.path.exists(output) and not append:
if overwrite and not append:
os.remove(output)
else:
raise OSError("File exists: {0}".format(output))
# Open the file for appending or writing
f = h5py.File(output, 'a' if append else 'w')
# Recursively call the write function
try:
return write_table_hdf5(table, f, path=path,
compression=compression, append=append,
overwrite=overwrite,
serialize_meta=serialize_meta,
compatibility_mode=compatibility_mode)
finally:
f.close()
else:
raise TypeError('output should be a string or an h5py File or '
'Group object')
# Check whether table already exists
if name in output_group:
if append and overwrite:
# Delete only the dataset itself
del output_group[name]
else:
raise OSError("Table {0} already exists".format(path))
# Encode any mixin columns as plain columns + appropriate metadata
table = _encode_mixins(table)
# Table with numpy unicode strings can't be written in HDF5 so
# to write such a table a copy of table is made containing columns as
# bytestrings. Now this copy of the table can be written in HDF5.
if any(col.info.dtype.kind == 'U' for col in table.itercols()):
table = table.copy(copy_data=False)
table.convert_unicode_to_bytestring()
# Warn if information will be lost when serialize_meta=False. This is
# hardcoded to the set difference between column info attributes and what
# HDF5 can store natively (name, dtype) with no meta.
if serialize_meta is False:
for col in table.itercols():
for attr in ('unit', 'format', 'description', 'meta'):
if getattr(col.info, attr, None) not in (None, {}):
warnings.warn("table contains column(s) with defined 'unit', 'format',"
" 'description', or 'meta' info attributes. These will"
" be dropped since serialize_meta=False.",
AstropyUserWarning)
# Write the table to the file
if compression:
if compression is True:
compression = 'gzip'
dset = output_group.create_dataset(name, data=table.as_array(),
compression=compression)
else:
dset = output_group.create_dataset(name, data=table.as_array())
if serialize_meta:
header_yaml = meta.get_yaml_from_table(table)
header_encoded = [h.encode('utf-8') for h in header_yaml]
if compatibility_mode:
warnings.warn("compatibility mode for writing is deprecated",
AstropyDeprecationWarning)
try:
dset.attrs[META_KEY] = header_encoded
except Exception as e:
warnings.warn(
"Attributes could not be written to the output HDF5 "
"file: {0}".format(e))
else:
output_group.create_dataset(meta_path(name),
data=header_encoded)
else:
# Write the Table meta dict key:value pairs to the file as HDF5
# attributes. This works only for a limited set of scalar data types
# like numbers, strings, etc., but not any complex types. This path
# also ignores column meta like unit or format.
for key in table.meta:
val = table.meta[key]
try:
dset.attrs[key] = val
except TypeError:
warnings.warn("Attribute `{0}` of type {1} cannot be written to "
"HDF5 files - skipping. (Consider specifying "
"serialize_meta=True to write all meta data)".format(key, type(val)),
AstropyUserWarning)
|
def write_table_hdf5(table, output, path=None, compression=False,
append=False, overwrite=False, serialize_meta=False,
compatibility_mode=False):
"""
Write a Table object to an HDF5 file
This requires `h5py <http://www.h5py.org/>`_ to be installed.
Parameters
----------
table : `~astropy.table.Table`
Data table that is to be written to file.
output : str or :class:`h5py:File` or :class:`h5py:Group`
If a string, the filename to write the table to. If an h5py object,
either the file or the group object to write the table to.
path : str
The path to which to write the table inside the HDF5 file.
This should be relative to the input file or group.
If not specified, defaults to __astropy__
compression : bool or str or int
Whether to compress the table inside the HDF5 file. If set to `True`,
``'gzip'`` compression is used. If a string is specified, it should be
one of ``'gzip'``, ``'szip'``, or ``'lzf'``. If an integer is
specified (in the range 0-9), ``'gzip'`` compression is used, and the
integer denotes the compression level.
append : bool
Whether to append the table to an existing HDF5 file.
overwrite : bool
Whether to overwrite any existing file without warning.
If ``append=True`` and ``overwrite=True`` then only the dataset will be
replaced; the file/group will not be overwritten.
"""
from astropy.table import meta
try:
import h5py
except ImportError:
raise Exception("h5py is required to read and write HDF5 files")
if path is None:
path = '__astropy__'
warnings.warn("table path was not set via the path= argument; "
"using default path {}".format(path))
elif path.endswith('/'):
raise ValueError("table path should end with table name, not /")
if '/' in path:
group, name = path.rsplit('/', 1)
else:
group, name = None, path
if isinstance(output, (h5py.File, h5py.Group)):
if group:
try:
output_group = output[group]
except (KeyError, ValueError):
output_group = output.create_group(group)
else:
output_group = output
elif isinstance(output, str):
if os.path.exists(output) and not append:
if overwrite and not append:
os.remove(output)
else:
raise OSError("File exists: {0}".format(output))
# Open the file for appending or writing
f = h5py.File(output, 'a' if append else 'w')
# Recursively call the write function
try:
return write_table_hdf5(table, f, path=path,
compression=compression, append=append,
overwrite=overwrite,
serialize_meta=serialize_meta,
compatibility_mode=compatibility_mode)
finally:
f.close()
else:
raise TypeError('output should be a string or an h5py File or '
'Group object')
# Check whether table already exists
if name in output_group:
if append and overwrite:
# Delete only the dataset itself
del output_group[name]
else:
raise OSError("Table {0} already exists".format(path))
# Encode any mixin columns as plain columns + appropriate metadata
table = _encode_mixins(table)
# Table with numpy unicode strings can't be written in HDF5 so
# to write such a table a copy of table is made containing columns as
# bytestrings. Now this copy of the table can be written in HDF5.
if any(col.info.dtype.kind == 'U' for col in table.itercols()):
table = table.copy(copy_data=False)
table.convert_unicode_to_bytestring()
# Warn if information will be lost when serialize_meta=False. This is
# hardcoded to the set difference between column info attributes and what
# HDF5 can store natively (name, dtype) with no meta.
if serialize_meta is False:
for col in table.itercols():
for attr in ('unit', 'format', 'description', 'meta'):
if getattr(col.info, attr, None) not in (None, {}):
warnings.warn("table contains column(s) with defined 'unit', 'format',"
" 'description', or 'meta' info attributes. These will"
" be dropped since serialize_meta=False.",
AstropyUserWarning)
# Write the table to the file
if compression:
if compression is True:
compression = 'gzip'
dset = output_group.create_dataset(name, data=table.as_array(),
compression=compression)
else:
dset = output_group.create_dataset(name, data=table.as_array())
if serialize_meta:
header_yaml = meta.get_yaml_from_table(table)
header_encoded = [h.encode('utf-8') for h in header_yaml]
if compatibility_mode:
warnings.warn("compatibility mode for writing is deprecated",
AstropyDeprecationWarning)
try:
dset.attrs[META_KEY] = header_encoded
except Exception as e:
warnings.warn(
"Attributes could not be written to the output HDF5 "
"file: {0}".format(e))
else:
output_group.create_dataset(meta_path(name),
data=header_encoded)
else:
# Write the Table meta dict key:value pairs to the file as HDF5
# attributes. This works only for a limited set of scalar data types
# like numbers, strings, etc., but not any complex types. This path
# also ignores column meta like unit or format.
for key in table.meta:
val = table.meta[key]
try:
dset.attrs[key] = val
except TypeError:
warnings.warn("Attribute `{0}` of type {1} cannot be written to "
"HDF5 files - skipping. (Consider specifying "
"serialize_meta=True to write all meta data)".format(key, type(val)),
AstropyUserWarning)
|
5,375 |
def test_present():
"""
Test to verify that the specified host is known by the specified user.
"""
name = "github.com"
user = "root"
key = "16:27:ac:a5:76:28:2d:36:63:1b:56:4d:eb:df:a6:48"
fingerprint = [key]
ret = {"name": name, "changes": {}, "result": False, "comment": ""}
with patch.dict(ssh_known_hosts.__opts__, {"test": True}):
with patch.object(os.path, "isabs", MagicMock(return_value=False)):
comt = 'If not specifying a "user", ' 'specify an absolute "config".'
ret.update({"comment": comt})
assert ssh_known_hosts.present(name) == ret
comt = 'Specify either "key" or "fingerprint", not both.'
ret.update({"comment": comt})
assert ssh_known_hosts.present(name, user, key=key, fingerprint=[key]) == ret
comt = 'Required argument "enc" if using "key" argument.'
ret.update({"comment": comt})
assert ssh_known_hosts.present(name, user, key=key) == ret
mock = MagicMock(side_effect=["exists", "add", "update"])
with patch.dict(ssh_known_hosts.__salt__, {"ssh.check_known_host": mock}):
comt = "Host github.com is already in .ssh/known_hosts"
ret.update({"comment": comt, "result": True})
assert ssh_known_hosts.present(name, user) == ret
comt = "Key for github.com is set to be" " added to .ssh/known_hosts"
ret.update({"comment": comt, "result": None})
assert ssh_known_hosts.present(name, user) == ret
comt = "Key for github.com is set to be " "updated in .ssh/known_hosts"
ret.update({"comment": comt})
assert ssh_known_hosts.present(name, user) == ret
with patch.dict(ssh_known_hosts.__opts__, {"test": False}):
result = {"status": "exists", "error": ""}
mock = MagicMock(return_value=result)
with patch.dict(ssh_known_hosts.__salt__, {"ssh.set_known_host": mock}):
comt = "github.com already exists in .ssh/known_hosts"
ret.update({"comment": comt, "result": True})
assert ssh_known_hosts.present(name, user) == ret
result = {"status": "error", "error": ""}
mock = MagicMock(return_value=result)
with patch.dict(ssh_known_hosts.__salt__, {"ssh.set_known_host": mock}):
ret.update({"comment": "", "result": False})
assert ssh_known_hosts.present(name, user) == ret
result = {
"status": "updated",
"error": "",
"new": [{"fingerprint": fingerprint, "key": key}],
"old": "",
}
mock = MagicMock(return_value=result)
with patch.dict(ssh_known_hosts.__salt__, {"ssh.set_known_host": mock}):
comt = "{}'s key saved to .ssh/known_hosts (key: {})".format(name, key)
ret.update(
{
"comment": comt,
"result": True,
"changes": {
"new": [{"fingerprint": fingerprint, "key": key}],
"old": "",
},
}
)
assert ssh_known_hosts.present(name, user, key=key) == ret
comt = "{}'s key saved to .ssh/known_hosts (fingerprint: {})".format(
name, fingerprint
)
ret.update({"comment": comt})
assert ssh_known_hosts.present(name, user) == ret
|
def test_present():
"""
Test to verify that the specified host is known by the specified user.
"""
name = "github.com"
user = "root"
key = "16:27:ac:a5:76:28:2d:36:63:1b:56:4d:eb:df:a6:48"
fingerprint = [key]
ret = {"name": name, "changes": {}, "result": False, "comment": ""}
with patch.dict(ssh_known_hosts.__opts__, {"test": True}):
with patch.object(os.path, "isabs", MagicMock(return_value=False)):
comt = 'If not specifying a "user", ' 'specify an absolute "config".'
ret.update({"comment": comt})
assert ssh_known_hosts.present(name) == ret
comt = 'Specify either "key" or "fingerprint", not both.'
ret.update({"comment": comt})
assert ssh_known_hosts.present(name, user, key=key, fingerprint=[key]) == ret
comt = 'Required argument "enc" if using "key" argument.'
ret.update({"comment": comt})
assert ssh_known_hosts.present(name, user, key=key) == ret
mock = MagicMock(side_effect=["exists", "add", "update"])
with patch.dict(ssh_known_hosts.__salt__, {"ssh.check_known_host": mock}):
comt = "Host github.com is already in .ssh/known_hosts"
ret.update({"comment": comt, "result": True})
assert ssh_known_hosts.present(name, user) == ret
comt = "Key for github.com is set to be added to .ssh/known_hosts"
ret.update({"comment": comt, "result": None})
assert ssh_known_hosts.present(name, user) == ret
comt = "Key for github.com is set to be " "updated in .ssh/known_hosts"
ret.update({"comment": comt})
assert ssh_known_hosts.present(name, user) == ret
with patch.dict(ssh_known_hosts.__opts__, {"test": False}):
result = {"status": "exists", "error": ""}
mock = MagicMock(return_value=result)
with patch.dict(ssh_known_hosts.__salt__, {"ssh.set_known_host": mock}):
comt = "github.com already exists in .ssh/known_hosts"
ret.update({"comment": comt, "result": True})
assert ssh_known_hosts.present(name, user) == ret
result = {"status": "error", "error": ""}
mock = MagicMock(return_value=result)
with patch.dict(ssh_known_hosts.__salt__, {"ssh.set_known_host": mock}):
ret.update({"comment": "", "result": False})
assert ssh_known_hosts.present(name, user) == ret
result = {
"status": "updated",
"error": "",
"new": [{"fingerprint": fingerprint, "key": key}],
"old": "",
}
mock = MagicMock(return_value=result)
with patch.dict(ssh_known_hosts.__salt__, {"ssh.set_known_host": mock}):
comt = "{}'s key saved to .ssh/known_hosts (key: {})".format(name, key)
ret.update(
{
"comment": comt,
"result": True,
"changes": {
"new": [{"fingerprint": fingerprint, "key": key}],
"old": "",
},
}
)
assert ssh_known_hosts.present(name, user, key=key) == ret
comt = "{}'s key saved to .ssh/known_hosts (fingerprint: {})".format(
name, fingerprint
)
ret.update({"comment": comt})
assert ssh_known_hosts.present(name, user) == ret
|
3,315 |
def fix_spans(event_json, old_span_id):
"""
This function will uupdate the span IDs and timstampes for a transaction
event
"""
trace = event_json["contexts"]["trace"]
new_span_id = trace["span_id"]
trace_id = trace["trace_id"]
update_id_map = {old_span_id: new_span_id}
spans = event_json.get("spans", [])
full_duration = (event_json["timestamp"] - event_json["start_timestamp"]).total_seconds()
while True:
found_any = False
for span in spans:
new_parent_id = update_id_map.get(span["parent_span_id"])
if new_parent_id:
# set the new parent
span["parent_span_id"] = new_parent_id
# generate a new id and set the replacement mappping
new_id = uuid4().hex[:16]
update_id_map[span["span_id"]] = new_id
# update the spn
span["span_id"] = new_id
found_any = True
# quit if we didn't make any updates
if not found_any:
break
# now update every trace id
for span in spans:
span["trace_id"] = trace_id
# create a tree of children and a hashmap of the span by the ID
tree = defaultdict(list)
id_map = {}
for span in spans:
tree[span["parent_span_id"]].append(span)
id_map[span["span_id"]] = span
id_list = [new_span_id]
while id_list:
span_id = id_list.pop()
children = tree.get(span_id, [])
# figure out the offset of the parent span and the end time of the span
if span_id == new_span_id:
span_offset = 0
parent_duration = full_duration
end_of_parent_span = full_duration
else:
parent_span = id_map[span_id]
span_offset = parent_span["data"]["offset"]
parent_duration = parent_span["data"]["duration"]
# end time of the parent span is the offset + duration
end_of_parent_span = span_offset + parent_duration
num_children = len(children)
avg_span_length = parent_duration / max(num_children, 1)
# order each span with the same parent sequentially in time
for i, span in enumerate(children):
if "data" not in span:
span["data"] = {}
span["data"]["offset"] = span_offset
remaining_time = end_of_parent_span - span_offset
# if we are the last child of a span, then
last_index = num_children - 1
if i == last_index:
duration = remaining_time
else:
max_duration = remaining_time - (avg_span_length / 4.0) * (last_index - i)
# pick a random length for the span that's at most 2x the average span length
duration = min(max_duration, random.uniform(0, 2 * avg_span_length))
span["data"]["duration"] = duration
span_offset = duration + span_offset
id_list.append(span["span_id"])
|
def fix_spans(event_json, old_span_id):
"""
This function will update the span IDs and timestamps for a transaction
event
"""
trace = event_json["contexts"]["trace"]
new_span_id = trace["span_id"]
trace_id = trace["trace_id"]
update_id_map = {old_span_id: new_span_id}
spans = event_json.get("spans", [])
full_duration = (event_json["timestamp"] - event_json["start_timestamp"]).total_seconds()
while True:
found_any = False
for span in spans:
new_parent_id = update_id_map.get(span["parent_span_id"])
if new_parent_id:
# set the new parent
span["parent_span_id"] = new_parent_id
# generate a new id and set the replacement mappping
new_id = uuid4().hex[:16]
update_id_map[span["span_id"]] = new_id
# update the spn
span["span_id"] = new_id
found_any = True
# quit if we didn't make any updates
if not found_any:
break
# now update every trace id
for span in spans:
span["trace_id"] = trace_id
# create a tree of children and a hashmap of the span by the ID
tree = defaultdict(list)
id_map = {}
for span in spans:
tree[span["parent_span_id"]].append(span)
id_map[span["span_id"]] = span
id_list = [new_span_id]
while id_list:
span_id = id_list.pop()
children = tree.get(span_id, [])
# figure out the offset of the parent span and the end time of the span
if span_id == new_span_id:
span_offset = 0
parent_duration = full_duration
end_of_parent_span = full_duration
else:
parent_span = id_map[span_id]
span_offset = parent_span["data"]["offset"]
parent_duration = parent_span["data"]["duration"]
# end time of the parent span is the offset + duration
end_of_parent_span = span_offset + parent_duration
num_children = len(children)
avg_span_length = parent_duration / max(num_children, 1)
# order each span with the same parent sequentially in time
for i, span in enumerate(children):
if "data" not in span:
span["data"] = {}
span["data"]["offset"] = span_offset
remaining_time = end_of_parent_span - span_offset
# if we are the last child of a span, then
last_index = num_children - 1
if i == last_index:
duration = remaining_time
else:
max_duration = remaining_time - (avg_span_length / 4.0) * (last_index - i)
# pick a random length for the span that's at most 2x the average span length
duration = min(max_duration, random.uniform(0, 2 * avg_span_length))
span["data"]["duration"] = duration
span_offset = duration + span_offset
id_list.append(span["span_id"])
|
49,580 |
def read_pandas(
reader,
urlpath,
blocksize="default",
lineterminator=None,
compression="infer",
sample=256000,
sample_rows=10,
enforce=False,
assume_missing=False,
storage_options=None,
include_path_column=False,
**kwargs,
):
reader_name = reader.__name__
if lineterminator is not None and len(lineterminator) == 1:
kwargs["lineterminator"] = lineterminator
else:
lineterminator = "\n"
if include_path_column and isinstance(include_path_column, bool):
include_path_column = "path"
if "index" in kwargs or "index_col" in kwargs:
raise ValueError(
"Keywords 'index' and 'index_col' not supported. "
"Use dd.{0}(...).set_index('my-index') "
"instead".format(reader_name)
)
for kw in ["iterator", "chunksize"]:
if kw in kwargs:
raise ValueError("{0} not supported for dd.{1}".format(kw, reader_name))
if kwargs.get("nrows", None):
raise ValueError(
"The 'nrows' keyword is not supported by "
"`dd.{0}`. To achieve the same behavior, it's "
"recommended to use `dd.{0}(...)."
"head(n=nrows)`".format(reader_name)
)
if isinstance(kwargs.get("skiprows"), int):
skiprows = lastskiprow = firstrow = kwargs.get("skiprows")
elif kwargs.get("skiprows") is None:
skiprows = lastskiprow = firstrow = 0
else:
# When skiprows is a list, we expect more than max(skiprows) to
# be included in the sample. This means that [0,2] will work well,
# but [0, 440] might not work.
skiprows = set(kwargs.get("skiprows"))
lastskiprow = max(skiprows)
# find the firstrow that is not skipped, for use as header
firstrow = min(set(range(len(skiprows) + 1)) - set(skiprows))
if isinstance(kwargs.get("header"), list):
raise TypeError(
"List of header rows not supported for dd.{0}".format(reader_name)
)
if isinstance(kwargs.get("converters"), dict) and include_path_column:
path_converter = kwargs.get("converters").get(include_path_column, None)
else:
path_converter = None
# If compression is "infer", inspect the (first) path suffix and
# set the proper compression option if the suffix is recongnized.
if compression == "infer":
# Translate the input urlpath to a simple path list
paths = get_fs_token_paths(urlpath, mode="rb", storage_options=storage_options)[
2
]
# Infer compression from first path
compression = infer_compression(paths[0])
if blocksize == "default":
blocksize = AUTO_BLOCKSIZE
if isinstance(blocksize, str):
blocksize = parse_bytes(blocksize)
if blocksize and compression:
# NONE of the compressions should use chunking
warn(
"Warning %s compression does not support breaking apart files\n"
"Please ensure that each individual file can fit in memory and\n"
"use the keyword ``blocksize=None to remove this message``\n"
"Setting ``blocksize=None``" % compression
)
blocksize = None
if compression not in compr:
raise NotImplementedError("Compression format %s not installed" % compression)
if blocksize and sample and blocksize < sample and lastskiprow != 0:
warn(
"Unexpected behavior can result from passing skiprows when\n"
"blocksize is smaller than sample size.\n"
"Setting ``sample=blocksize``"
)
sample = blocksize
b_lineterminator = lineterminator.encode()
b_out = read_bytes(
urlpath,
delimiter=b_lineterminator,
blocksize=blocksize,
sample=sample,
compression=compression,
include_path=include_path_column,
**(storage_options or {}),
)
if include_path_column:
b_sample, values, paths = b_out
path = (include_path_column, path_converter)
else:
b_sample, values = b_out
path = None
if not isinstance(values[0], (tuple, list)):
values = [values]
# If we have not sampled, then use the first row of the first values
# as a representative sample.
if b_sample is False and len(values[0]):
b_sample = values[0][0].compute()
# Get header row, and check that sample is long enough. If the file
# contains a header row, we need at least 2 nonempty rows + the number of
# rows to skip.
names = kwargs.get("names", None)
header = kwargs.get("header", "infer" if names is None else None)
need = 1 if header is None else 2
parts = b_sample.split(b_lineterminator, lastskiprow + need)
# If the last partition is empty, don't count it
nparts = 0 if not parts else len(parts) - int(not parts[-1])
if sample is not False and nparts < lastskiprow + need and len(b_sample) >= sample:
raise ValueError(
"Sample is not large enough to include at least one "
"row of data. Please increase the number of bytes "
"in `sample` in the call to `read_csv`/`read_table`"
)
header = b"" if header is None else parts[firstrow] + b_lineterminator
# Use sample to infer dtypes and check for presence of include_path_column
head_kwargs = kwargs.copy()
if sample_rows is not None:
head_kwargs["nrows"] = sample_rows
try:
head = reader(BytesIO(b_sample), **head_kwargs)
except pd.errors.ParserError as e:
if "EOF" in str(e):
raise ValueError(
"EOF encountered while reading header. \n"
"Pass argument `sample_rows=` and make sure the value of `sample` "
"is large enough to accommodate that may rows of data"
) from e
raise
if include_path_column and (include_path_column in head.columns):
raise ValueError(
"Files already contain the column name: %s, so the "
"path column cannot use this name. Please set "
"`include_path_column` to a unique name." % include_path_column
)
specified_dtypes = kwargs.get("dtype", {})
if specified_dtypes is None:
specified_dtypes = {}
# If specified_dtypes is a single type, then all columns were specified
if assume_missing and isinstance(specified_dtypes, dict):
# Convert all non-specified integer columns to floats
for c in head.columns:
if is_integer_dtype(head[c].dtype) and c not in specified_dtypes:
head[c] = head[c].astype(float)
values = [[list(dsk.dask.values()) for dsk in block] for block in values]
return text_blocks_to_pandas(
reader,
values,
header,
head,
kwargs,
enforce=enforce,
specified_dtypes=specified_dtypes,
path=path,
blocksize=blocksize,
)
|
def read_pandas(
reader,
urlpath,
blocksize="default",
lineterminator=None,
compression="infer",
sample=256000,
sample_rows=10,
enforce=False,
assume_missing=False,
storage_options=None,
include_path_column=False,
**kwargs,
):
reader_name = reader.__name__
if lineterminator is not None and len(lineterminator) == 1:
kwargs["lineterminator"] = lineterminator
else:
lineterminator = "\n"
if include_path_column and isinstance(include_path_column, bool):
include_path_column = "path"
if "index" in kwargs or "index_col" in kwargs:
raise ValueError(
"Keywords 'index' and 'index_col' not supported. "
"Use dd.{0}(...).set_index('my-index') "
"instead".format(reader_name)
)
for kw in ["iterator", "chunksize"]:
if kw in kwargs:
raise ValueError("{0} not supported for dd.{1}".format(kw, reader_name))
if kwargs.get("nrows", None):
raise ValueError(
"The 'nrows' keyword is not supported by "
"`dd.{0}`. To achieve the same behavior, it's "
"recommended to use `dd.{0}(...)."
"head(n=nrows)`".format(reader_name)
)
if isinstance(kwargs.get("skiprows"), int):
skiprows = lastskiprow = firstrow = kwargs.get("skiprows")
elif kwargs.get("skiprows") is None:
skiprows = lastskiprow = firstrow = 0
else:
# When skiprows is a list, we expect more than max(skiprows) to
# be included in the sample. This means that [0,2] will work well,
# but [0, 440] might not work.
skiprows = set(kwargs.get("skiprows"))
lastskiprow = max(skiprows)
# find the firstrow that is not skipped, for use as header
firstrow = min(set(range(len(skiprows) + 1)) - set(skiprows))
if isinstance(kwargs.get("header"), list):
raise TypeError(
"List of header rows not supported for dd.{0}".format(reader_name)
)
if isinstance(kwargs.get("converters"), dict) and include_path_column:
path_converter = kwargs.get("converters").get(include_path_column, None)
else:
path_converter = None
# If compression is "infer", inspect the (first) path suffix and
# set the proper compression option if the suffix is recongnized.
if compression == "infer":
# Translate the input urlpath to a simple path list
paths = get_fs_token_paths(urlpath, mode="rb", storage_options=storage_options)[
2
]
# Infer compression from first path
compression = infer_compression(paths[0])
if blocksize == "default":
blocksize = AUTO_BLOCKSIZE
if isinstance(blocksize, str):
blocksize = parse_bytes(blocksize)
if blocksize and compression:
# NONE of the compressions should use chunking
warn(
"Warning %s compression does not support breaking apart files\n"
"Please ensure that each individual file can fit in memory and\n"
"use the keyword ``blocksize=None to remove this message``\n"
"Setting ``blocksize=None``" % compression
)
blocksize = None
if compression not in compr:
raise NotImplementedError("Compression format %s not installed" % compression)
if blocksize and sample and blocksize < sample and lastskiprow != 0:
warn(
"Unexpected behavior can result from passing skiprows when\n"
"blocksize is smaller than sample size.\n"
"Setting ``sample=blocksize``"
)
sample = blocksize
b_lineterminator = lineterminator.encode()
b_out = read_bytes(
urlpath,
delimiter=b_lineterminator,
blocksize=blocksize,
sample=sample,
compression=compression,
include_path=include_path_column,
**(storage_options or {}),
)
if include_path_column:
b_sample, values, paths = b_out
path = (include_path_column, path_converter)
else:
b_sample, values = b_out
path = None
if not isinstance(values[0], (tuple, list)):
values = [values]
# If we have not sampled, then use the first row of the first values
# as a representative sample.
if b_sample is False and len(values[0]):
b_sample = values[0][0].compute()
# Get header row, and check that sample is long enough. If the file
# contains a header row, we need at least 2 nonempty rows + the number of
# rows to skip.
names = kwargs.get("names", None)
header = kwargs.get("header", "infer" if names is None else None)
need = 1 if header is None else 2
parts = b_sample.split(b_lineterminator, lastskiprow + need)
# If the last partition is empty, don't count it
nparts = 0 if not parts else len(parts) - int(not parts[-1])
if sample is not False and nparts < lastskiprow + need and len(b_sample) >= sample:
raise ValueError(
"Sample is not large enough to include at least one "
"row of data. Please increase the number of bytes "
"in `sample` in the call to `read_csv`/`read_table`"
)
header = b"" if header is None else parts[firstrow] + b_lineterminator
# Use sample to infer dtypes and check for presence of include_path_column
try:
head = reader(BytesIO(b_sample), nrows=sample_rows, **kwargs)
except pd.errors.ParserError as e:
if "EOF" in str(e):
raise ValueError(
"EOF encountered while reading header. \n"
"Pass argument `sample_rows=` and make sure the value of `sample` "
"is large enough to accommodate that may rows of data"
) from e
raise
if include_path_column and (include_path_column in head.columns):
raise ValueError(
"Files already contain the column name: %s, so the "
"path column cannot use this name. Please set "
"`include_path_column` to a unique name." % include_path_column
)
specified_dtypes = kwargs.get("dtype", {})
if specified_dtypes is None:
specified_dtypes = {}
# If specified_dtypes is a single type, then all columns were specified
if assume_missing and isinstance(specified_dtypes, dict):
# Convert all non-specified integer columns to floats
for c in head.columns:
if is_integer_dtype(head[c].dtype) and c not in specified_dtypes:
head[c] = head[c].astype(float)
values = [[list(dsk.dask.values()) for dsk in block] for block in values]
return text_blocks_to_pandas(
reader,
values,
header,
head,
kwargs,
enforce=enforce,
specified_dtypes=specified_dtypes,
path=path,
blocksize=blocksize,
)
|
7,165 |
def tvl1(I0, I1, dt=0.2, lambda_=15, tau=0.3, nwarp=5, niter=10,
tol=1e-4, prefilter=False):
"""Coarse to fine TV-L1 optical flow estimator.
TV-L1 ia popular algorithm for optical flow estimation intrudced
by Zack et al. [1]_, improved in [2]_ and detailed in [3]_.
Parameters
----------
I0 : ~numpy.ndarray
The first gray scale image of the sequence.
I1 : ~numpy.ndarray
The second gray scale image of the sequence.
dt : float
Time step of the numerical scheme. Convergence is proved for
values dt < 0.125, but it can be larger for faster
convergence.
lambda_ : float
Attachement parameter. The smaller this parameter is,
the smoother is the solutions.
tau : float
Tightness parameter. It should have a small value in order to
maintain attachement and regularization parts in
correspondence.
nwarp : int
Number of times I1 is warped.
niter : int
Number of fixed point iteration.
tol : float
Tolerance used as stopping criterion based on the L² distance
between two consecutive values of (u, v).
prefilter : bool
whether to prefilter the estimated optical flow before each
image warp.
Returns
-------
flow : tuple[~numpy.ndarray]
The estimated optical flow.
References
----------
.. [1] Zach, C., Pock, T., & Bischof, H. (2007, September). A
duality based approach for realtime TV-L 1 optical flow. In Joint
pattern recognition symposium (pp. 214-223). Springer, Berlin,
Heidelberg.
.. [2] Wedel, A., Pock, T., Zach, C., Bischof, H., & Cremers,
D. (2009). An improved algorithm for TV-L 1 optical flow. In
Statistical and geometrical approaches to visual motion analysis
(pp. 23-45). Springer, Berlin, Heidelberg.
.. [3] Pérez, J. S., Meinhardt-Llopis, E., & Facciolo,
G. (2013). TV-L1 optical flow estimation. Image Processing On
Line, 2013, 137-150.
Examples
--------
>>> from skimage.color import rgb2gray
>>> from skimage.data import stereo_motorcycle
>>> from skimage.registration import tvl1
>>> I0, I1, disp = stereo_motorcycle()
>>> # --- Convert the images to gray level: color is not supported.
>>> I0 = rgb2gray(I0)
>>> I1 = rgb2gray(I1)
>>> flow = tvl1(I1, I0)
"""
solver = partial(_tvl1, dt=dt, lambda_=lambda_, tau=tau,
nwarp=nwarp, niter=niter, tol=tol,
prefilter=prefilter)
return coarse_to_fine(I0, I1, solver)
|
def tvl1(I0, I1, dt=0.2, lambda_=15, tau=0.3, nwarp=5, niter=10,
tol=1e-4, prefilter=False):
"""Coarse to fine TV-L1 optical flow estimator.
TV-L1 ia popular algorithm for optical flow estimation intrudced
by Zack et al. [1]_, improved in [2]_ and detailed in [3]_.
Parameters
----------
I0 : ~numpy.ndarray
The first gray scale image of the sequence.
I1 : ~numpy.ndarray
The second gray scale image of the sequence.
dt : float
Time step of the numerical scheme. Convergence is proved for
values dt < 0.125, but it can be larger for faster
convergence.
lambda_ : float
Attachement parameter. The smaller this parameter is,
the smoother is the solutions.
tau : float
Tightness parameter. It should have a small value in order to
maintain attachment and regularization parts in
correspondence.
nwarp : int
Number of times I1 is warped.
niter : int
Number of fixed point iteration.
tol : float
Tolerance used as stopping criterion based on the L² distance
between two consecutive values of (u, v).
prefilter : bool
whether to prefilter the estimated optical flow before each
image warp.
Returns
-------
flow : tuple[~numpy.ndarray]
The estimated optical flow.
References
----------
.. [1] Zach, C., Pock, T., & Bischof, H. (2007, September). A
duality based approach for realtime TV-L 1 optical flow. In Joint
pattern recognition symposium (pp. 214-223). Springer, Berlin,
Heidelberg.
.. [2] Wedel, A., Pock, T., Zach, C., Bischof, H., & Cremers,
D. (2009). An improved algorithm for TV-L 1 optical flow. In
Statistical and geometrical approaches to visual motion analysis
(pp. 23-45). Springer, Berlin, Heidelberg.
.. [3] Pérez, J. S., Meinhardt-Llopis, E., & Facciolo,
G. (2013). TV-L1 optical flow estimation. Image Processing On
Line, 2013, 137-150.
Examples
--------
>>> from skimage.color import rgb2gray
>>> from skimage.data import stereo_motorcycle
>>> from skimage.registration import tvl1
>>> I0, I1, disp = stereo_motorcycle()
>>> # --- Convert the images to gray level: color is not supported.
>>> I0 = rgb2gray(I0)
>>> I1 = rgb2gray(I1)
>>> flow = tvl1(I1, I0)
"""
solver = partial(_tvl1, dt=dt, lambda_=lambda_, tau=tau,
nwarp=nwarp, niter=niter, tol=tol,
prefilter=prefilter)
return coarse_to_fine(I0, I1, solver)
|
33,457 |
def main():
parser = argparse.ArgumentParser(
"python -m can.logconvert",
description="Convert a log file from one format to another.",
)
parser.add_argument(
"-o",
"--output",
dest="output",
help="""Output filename, type dependent on suffix see can.LogReader.""",
default=None,
required=True,
)
parser.add_argument(
"-s",
"--file_size",
dest="file_size",
type=int,
help="""Maximum file size in bytes. Rotate log file when size threshold is reached.""",
default=None,
)
parser.add_argument(
"infile",
metavar="input-file",
type=str,
help="Log file to convert from. For supported types see can.LogReader.",
)
# print help message when no arguments were given
if len(sys.argv) < 2:
parser.print_help(sys.stderr)
raise SystemExit(errno.EINVAL)
results = parser.parse_args()
reader = LogReader(results.infile)
if results.file_size:
logger = SizedRotatingLogger(
base_filename=results.output, max_bytes=results.file_size
)
else:
logger = Logger(filename=results.output)
try:
for m in reader: # pylint: disable=not-an-iterable
logger(m)
except KeyboardInterrupt:
pass
finally:
logger.stop()
|
def main():
parser = argparse.ArgumentParser(
"python -m can.logconvert",
description="Convert a log file from one format to another.",
)
parser.add_argument(
"-o",
"--output",
dest="output",
help="Output filename, type dependent on suffix see can.LogReader.",
default=None,
required=True,
)
parser.add_argument(
"-s",
"--file_size",
dest="file_size",
type=int,
help="""Maximum file size in bytes. Rotate log file when size threshold is reached.""",
default=None,
)
parser.add_argument(
"infile",
metavar="input-file",
type=str,
help="Log file to convert from. For supported types see can.LogReader.",
)
# print help message when no arguments were given
if len(sys.argv) < 2:
parser.print_help(sys.stderr)
raise SystemExit(errno.EINVAL)
results = parser.parse_args()
reader = LogReader(results.infile)
if results.file_size:
logger = SizedRotatingLogger(
base_filename=results.output, max_bytes=results.file_size
)
else:
logger = Logger(filename=results.output)
try:
for m in reader: # pylint: disable=not-an-iterable
logger(m)
except KeyboardInterrupt:
pass
finally:
logger.stop()
|
30,979 |
def update_user_command(client, args):
old_scim = verify_and_load_scim_data(args.get('oldScim'))
new_scim = verify_and_load_scim_data(args.get('newScim'))
parsed_old_scim = map_scim(old_scim)
parsed_new_scim = map_scim(new_scim)
user_id = parsed_old_scim.get('id')
if not user_id:
raise Exception('You must provide id of the user')
twic_extension = client.build_twic_extension(args, new_scim, CUSTOM_MAPPING_UPDATE,
parsed_new_scim.get('office_country'))
# building twic extension schema
if twic_extension:
new_scim[TWIC_EXTENSION_SCHEMA] = twic_extension
if SCIM_EXTENSION_SCHEMA in new_scim:
new_scim.pop(SCIM_EXTENSION_SCHEMA)
# Removing userName and emails from new_scim
if "userName" in new_scim:
new_scim.pop("userName")
if "emails" in new_scim:
new_scim.pop("emails")
res = client.update_user_profile(user_term=user_id, data=new_scim)
if res.status_code == 200:
res_json = res.json()
generic_iam_context = OutputContext(success=True,
iden=res_json.get('id'),
details=res_json,
active=res_json.get('active'))
else:
res_json = res.json()
generic_iam_context = OutputContext(success=False,
iden=user_id,
errorCode=res.status_code,
errorMessage=res_json.get('detail'),
details=res_json)
generic_iam_context_dt = f'{generic_iam_context.command}(val.id == obj.id && val.instanceName == obj.instanceName)'
outputs = {
generic_iam_context_dt: generic_iam_context.data
}
readable_output = tableToMarkdown(name='Update Twic User:',
t=generic_iam_context.data,
headers=["brand", "instanceName", "success", "active", "id", "username",
"email", "errorCode", "errorMessage", "details"],
removeNull=True)
return (
readable_output,
outputs,
generic_iam_context.data
)
|
def update_user_command(client, args):
old_scim = verify_and_load_scim_data(args.get('oldScim'))
new_scim = verify_and_load_scim_data(args.get('newScim'))
parsed_old_scim = map_scim(old_scim)
parsed_new_scim = map_scim(new_scim)
user_id = parsed_old_scim.get('id')
if not user_id:
raise DemistoException('You must provide id of the user')
twic_extension = client.build_twic_extension(args, new_scim, CUSTOM_MAPPING_UPDATE,
parsed_new_scim.get('office_country'))
# building twic extension schema
if twic_extension:
new_scim[TWIC_EXTENSION_SCHEMA] = twic_extension
if SCIM_EXTENSION_SCHEMA in new_scim:
new_scim.pop(SCIM_EXTENSION_SCHEMA)
# Removing userName and emails from new_scim
if "userName" in new_scim:
new_scim.pop("userName")
if "emails" in new_scim:
new_scim.pop("emails")
res = client.update_user_profile(user_term=user_id, data=new_scim)
if res.status_code == 200:
res_json = res.json()
generic_iam_context = OutputContext(success=True,
iden=res_json.get('id'),
details=res_json,
active=res_json.get('active'))
else:
res_json = res.json()
generic_iam_context = OutputContext(success=False,
iden=user_id,
errorCode=res.status_code,
errorMessage=res_json.get('detail'),
details=res_json)
generic_iam_context_dt = f'{generic_iam_context.command}(val.id == obj.id && val.instanceName == obj.instanceName)'
outputs = {
generic_iam_context_dt: generic_iam_context.data
}
readable_output = tableToMarkdown(name='Update Twic User:',
t=generic_iam_context.data,
headers=["brand", "instanceName", "success", "active", "id", "username",
"email", "errorCode", "errorMessage", "details"],
removeNull=True)
return (
readable_output,
outputs,
generic_iam_context.data
)
|
8,477 |
def _getImports_ldd(pth):
"""
Find the binary dependencies of PTH.
This implementation is for ldd platforms (mostly unix).
"""
rslt = set()
if is_aix:
# Match libs of the form
# 'archivelib.a(objectmember.so/.o)'
# or
# 'sharedlib.so'
# Will not match the fake lib '/unix'
lddPattern = re.compile(r"^\s*(((?P<libarchive>(.*\.a))(?P<objectmember>\(.*\)))|((?P<libshared>(.*\.so))))$")
elif is_hpux:
# Match libs of the form
# 'sharedlib.so => full-path-to-lib
# e.g.
# 'libpython2.7.so => /usr/local/lib/hpux32/libpython2.7.so'
lddPattern = re.compile(r"^\s+(.*)\s+=>\s+(.*)$")
elif is_solar:
# Match libs of the form
# 'sharedlib.so => full-path-to-lib
# e.g.
# 'libpython2.7.so.1.0 => /usr/local/lib/libpython2.7.so.1.0'
# Will not match the platform specific libs starting with '/platform'
lddPattern = re.compile(r"^\s+(.*)\s+=>\s+(.*)$")
else:
lddPattern = re.compile(r"\s*(.*?)\s+=>\s+(.*?)\s+\(.*\)")
ldd_output = compat.exec_command('ldd', pth)
# Fix shaky ldd output in cygwin
if is_cygwin:
while ( '???' in ldd_output ):
ldd_output = compat.exec_command('ldd', pth)
for line in ldd_output.splitlines():
m = lddPattern.search(line)
if m:
if is_aix:
libarchive = m.group('libarchive')
if libarchive:
# We matched an archive lib with a request for a particular
# embedded shared object.
# 'archivelib.a(objectmember.so/.o)'
lib = libarchive
name = os.path.basename(lib) + m.group('objectmember')
else:
# We matched a stand-alone shared library.
# 'sharedlib.so'
lib = m.group('libshared')
name = os.path.basename(lib)
elif is_hpux:
name, lib = m.group(1), m.group(2)
else:
name, lib = m.group(1), m.group(2)
if name[:10] in ('linux-gate', 'linux-vdso'):
# linux-gate is a fake library which does not exist and
# should be ignored. See also:
# http://www.trilithium.com/johan/2005/08/linux-gate/
continue
if is_cygwin and lib.lower().find('/cygdrive/c/windows/system') == 0:
# exclude Windows system library
continue
if os.path.exists(lib):
# Add lib if it is not already found.
if lib not in rslt:
rslt.add(lib)
else:
logger.error('Can not find %s in path %s (needed by %s)',
name, lib, pth)
return rslt
|
def _getImports_ldd(pth):
"""
Find the binary dependencies of PTH.
This implementation is for ldd platforms (mostly unix).
"""
rslt = set()
if is_aix:
# Match libs of the form
# 'archivelib.a(objectmember.so/.o)'
# or
# 'sharedlib.so'
# Will not match the fake lib '/unix'
lddPattern = re.compile(r"^\s*(((?P<libarchive>(.*\.a))(?P<objectmember>\(.*\)))|((?P<libshared>(.*\.so))))$")
elif is_hpux:
# Match libs of the form
# 'sharedlib.so => full-path-to-lib
# e.g.
# 'libpython2.7.so => /usr/local/lib/hpux32/libpython2.7.so'
lddPattern = re.compile(r"^\s+(.*)\s+=>\s+(.*)$")
elif is_solar:
# Match libs of the form
# 'sharedlib.so => full-path-to-lib
# e.g.
# 'libpython2.7.so.1.0 => /usr/local/lib/libpython2.7.so.1.0'
# Will not match the platform specific libs starting with '/platform'
lddPattern = re.compile(r"^\s+(.*)\s+=>\s+(.*)$")
else:
lddPattern = re.compile(r"\s*(.*?)\s+=>\s+(.*?)\s+\(.*\)")
ldd_output = compat.exec_command('ldd', pth)
# Fix shaky ldd output in cygwin
if is_cygwin:
while '???' in ldd_output:
ldd_output = compat.exec_command('ldd', pth)
for line in ldd_output.splitlines():
m = lddPattern.search(line)
if m:
if is_aix:
libarchive = m.group('libarchive')
if libarchive:
# We matched an archive lib with a request for a particular
# embedded shared object.
# 'archivelib.a(objectmember.so/.o)'
lib = libarchive
name = os.path.basename(lib) + m.group('objectmember')
else:
# We matched a stand-alone shared library.
# 'sharedlib.so'
lib = m.group('libshared')
name = os.path.basename(lib)
elif is_hpux:
name, lib = m.group(1), m.group(2)
else:
name, lib = m.group(1), m.group(2)
if name[:10] in ('linux-gate', 'linux-vdso'):
# linux-gate is a fake library which does not exist and
# should be ignored. See also:
# http://www.trilithium.com/johan/2005/08/linux-gate/
continue
if is_cygwin and lib.lower().find('/cygdrive/c/windows/system') == 0:
# exclude Windows system library
continue
if os.path.exists(lib):
# Add lib if it is not already found.
if lib not in rslt:
rslt.add(lib)
else:
logger.error('Can not find %s in path %s (needed by %s)',
name, lib, pth)
return rslt
|
40,041 |
def create_expectation_suite(
context,
datasource_name=None,
generator_name=None,
generator_asset=None,
batch_kwargs=None,
expectation_suite_name=None,
additional_batch_kwargs=None,
show_intro_message=False,
open_docs=False
):
"""
:param context:
:param datasource_name:
:param generator_name:
:param generator_asset:
:param batch_kwargs:
:param expectation_suite_name:
:param additional_batch_kwargs:
:return: a tuple: (datasource_name, generator_name, data_asset_name, batch_kwargs, profiling_results)
"""
msg_intro = """
<cyan>========== Create sample Expectations ==========</cyan>
"""
msg_some_data_assets_not_found = """Some of the data assets you specified were not found: {0:s}
"""
msg_prompt_what_will_profiler_do = """
Great Expectations will choose a couple of columns and generate expectations about them
to demonstrate some examples of assertions you can make about your data.
Press any key to continue...
"""
msg_prompt_expectation_suite_name = """
Name the new expectation sute"""
msg_data_doc_intro = """
<cyan>========== Data Docs ==========</cyan>"""
if show_intro_message:
cli_message(msg_intro)
data_source = select_datasource(context, datasource_name=datasource_name)
if data_source is None:
raise ge_exceptions.DataContextError("No datasources found in the context")
datasource_name = data_source.name
if generator_name is None or generator_asset is None or batch_kwargs is None:
datasource_name, generator_name, generator_asset, batch_kwargs = get_batch_kwargs(context,
datasource_name=datasource_name,
generator_name=generator_name,
generator_asset=generator_asset,
additional_batch_kwargs=additional_batch_kwargs)
if expectation_suite_name is None:
expectation_suite_name = click.prompt(msg_prompt_expectation_suite_name, default="warning", show_default=True)
profiler = SampleExpectationsDatasetProfiler
click.prompt(msg_prompt_what_will_profiler_do, default="Enter", hide_input=True)
cli_message("\nProfiling {0:s}...".format(generator_asset))
run_id = datetime.datetime.now().isoformat().replace(":", "") + "Z"
profiling_results = context.profile_data_asset(
datasource_name,
generator_name=generator_name,
data_asset_name=generator_asset,
batch_kwargs=batch_kwargs,
profiler=profiler,
expectation_suite_name=expectation_suite_name,
run_id=run_id,
additional_batch_kwargs=additional_batch_kwargs
)
if profiling_results['success']:
build_docs(context)
if open_docs: # This is mostly to keep tests from spawning windows
from great_expectations.data_context.types import DataAssetIdentifier, ExpectationSuiteIdentifier, \
ValidationResultIdentifier
data_asset_id = DataAssetIdentifier(datasource=datasource_name, generator=generator_name,
generator_asset=generator_asset)
expectation_suite_identifier = ExpectationSuiteIdentifier(
data_asset_name=data_asset_id,
expectation_suite_name=expectation_suite_name
)
validation_result_identifier = ValidationResultIdentifier(
expectation_suite_identifier=expectation_suite_identifier,
run_id=run_id,
)
context.open_data_docs(resource_identifier=validation_result_identifier)
return (datasource_name, generator_name, generator_asset, batch_kwargs, profiling_results)
if profiling_results['error']['code'] == DataContext.PROFILING_ERROR_CODE_SPECIFIED_DATA_ASSETS_NOT_FOUND:
raise ge_exceptions.DataContextError(msg_some_data_assets_not_found.format(",".join(profiling_results['error']['not_found_data_assets'])))
if not profiling_results['success']: # unknown error
raise ge_exceptions.DataContextError("Unknown profiling error code: " + profiling_results['error']['code'])
|
def create_expectation_suite(
context,
datasource_name=None,
generator_name=None,
generator_asset=None,
batch_kwargs=None,
expectation_suite_name=None,
additional_batch_kwargs=None,
show_intro_message=False,
open_docs=False
):
"""
:param context:
:param datasource_name:
:param generator_name:
:param generator_asset:
:param batch_kwargs:
:param expectation_suite_name:
:param additional_batch_kwargs:
:return: a tuple: (datasource_name, generator_name, data_asset_name, batch_kwargs, profiling_results)
"""
msg_intro = """
<cyan>========== Create sample Expectations ==========</cyan>
"""
msg_some_data_assets_not_found = """Some of the data assets you specified were not found: {0:s}
"""
msg_prompt_what_will_profiler_do = """
Great Expectations will choose a couple of columns and generate expectations about them
to demonstrate some examples of assertions you can make about your data.
Press any key to continue...
"""
msg_prompt_expectation_suite_name = """
Name the new expectation sute"""
msg_data_doc_intro = """
<cyan>========== Data Docs ==========</cyan>"""
if show_intro_message:
cli_message(msg_intro)
data_source = select_datasource(context, datasource_name=datasource_name)
if data_source is None:
raise ge_exceptions.DataContextError("No datasources found in the context")
datasource_name = data_source.name
if generator_name is None or generator_asset is None or batch_kwargs is None:
datasource_name, generator_name, generator_asset, batch_kwargs = get_batch_kwargs(context,
datasource_name=datasource_name,
generator_name=generator_name,
generator_asset=generator_asset,
additional_batch_kwargs=additional_batch_kwargs)
if expectation_suite_name is None:
expectation_suite_name = click.prompt(msg_prompt_expectation_suite_name, default="warning", show_default=True)
profiler = SampleExpectationsDatasetProfiler
click.prompt(msg_prompt_what_will_profiler_do, default="Enter", hide_input=True)
cli_message("\nProfiling {0:s}...".format(generator_asset))
run_id = datetime.datetime.utcnow().strftime("%Y%m%dT%H%M%S.%fZ")
profiling_results = context.profile_data_asset(
datasource_name,
generator_name=generator_name,
data_asset_name=generator_asset,
batch_kwargs=batch_kwargs,
profiler=profiler,
expectation_suite_name=expectation_suite_name,
run_id=run_id,
additional_batch_kwargs=additional_batch_kwargs
)
if profiling_results['success']:
build_docs(context)
if open_docs: # This is mostly to keep tests from spawning windows
from great_expectations.data_context.types import DataAssetIdentifier, ExpectationSuiteIdentifier, \
ValidationResultIdentifier
data_asset_id = DataAssetIdentifier(datasource=datasource_name, generator=generator_name,
generator_asset=generator_asset)
expectation_suite_identifier = ExpectationSuiteIdentifier(
data_asset_name=data_asset_id,
expectation_suite_name=expectation_suite_name
)
validation_result_identifier = ValidationResultIdentifier(
expectation_suite_identifier=expectation_suite_identifier,
run_id=run_id,
)
context.open_data_docs(resource_identifier=validation_result_identifier)
return (datasource_name, generator_name, generator_asset, batch_kwargs, profiling_results)
if profiling_results['error']['code'] == DataContext.PROFILING_ERROR_CODE_SPECIFIED_DATA_ASSETS_NOT_FOUND:
raise ge_exceptions.DataContextError(msg_some_data_assets_not_found.format(",".join(profiling_results['error']['not_found_data_assets'])))
if not profiling_results['success']: # unknown error
raise ge_exceptions.DataContextError("Unknown profiling error code: " + profiling_results['error']['code'])
|
2,851 |
def test_arccos_metric():
arccos = DistanceMetric.get_metric('arccos')
def arccos_slow(x1, x2):
inprod, ss1, ss2 = 0, 0, 0
for x1_, x2_ in zip(x1, x2):
inprod += x1_ * x2_
ss1 += x1_ * x1_
ss2 += x2_ * x2_
return np.arccos(inprod / np.sqrt(ss1 * ss2)) / np.pi
a = np.random.random_sample((10, 5))
b = arccos.pairwise(a)
c = np.zeros_like(b)
for i, x1 in enumerate(a):
for j, x2 in enumerate(a):
c[i, j] = arccos_slow(x1, x2)
assert_array_almost_equal(b, c)
|
def test_arccos_metric():
arccos = DistanceMetric.get_metric('arccos')
def arccos_slow(x1, x2):
inprod, ss1, ss2 = 0, 0, 0
for x1_, x2_ in zip(x1, x2):
inprod += x1_ * x2_
ss1 += x1_ * x1_
ss2 += x2_ * x2_
return np.arccos(inprod / np.sqrt(ss1 * ss2)) / np.pi
a = np.random.random_sample((10, 5))
b = arccos.pairwise(a)
c = np.zeros_like(b)
for i, x1 in enumerate(a):
for j, x2 in enumerate(a):
c[i, j] = arccos_slow(x1, x2)
assert_allclose(b, c)
|
39,182 |
def _skipIf(condition, reason, key):
if not condition:
return _pass
# In CI, default to fail, so as to prevent accidental skip.
# In other env, default to skip
var = f"TORCHAUDIO_TEST_ALLOW_SKIP_IF_{key}"
skip_allowed = _eval_env(var, default=False if _IN_CI else True)
if skip_allowed:
return unittest.skip(reason)
return _fail(f"{reason} But the test cannot be skipped. (CI={_IN_CI}, {var}={skip_allowed}.)")
|
def _skipIf(condition, reason, key):
if not condition:
return _pass
# In CI, default to fail, so as to prevent accidental skip.
# In other env, default to skip
var = f"TORCHAUDIO_TEST_ALLOW_SKIP_IF_{key}"
skip_allowed = _eval_env(var, default=not _IN_CI)
if skip_allowed:
return unittest.skip(reason)
return _fail(f"{reason} But the test cannot be skipped. (CI={_IN_CI}, {var}={skip_allowed}.)")
|
42,418 |
def crop_image(raster, geoms, all_touched=True):
"""Crop a single file using geometry objects.
Parameters
----------
raster : rasterio.io.DatasetReader object
The rasterio object to be cropped.
geoms : geopandas geodataframe or list of polygons
The spatial polygon boundaries in GeoJSON-like dict format
to be used to crop the image. All data outside of the polygon
boundaries will be set to nodata and/or removed from the image.
all_touched : bool (default=True)
Include a pixel in the mask if it touches any of the
shapes. If False, include a pixel only if its center is within one of
the shapes, or if it is selected by Bresenham's line algorithm.
(from rasterio)
Returns
----------
tuple
out_image: cropped numpy array
A numpy array that is cropped to the geoms object
extent with shape (bands, rows, columns)
out_meta: dict
A dictionary containing updated metadata for the cropped raster,
including extent (shape elements) and transform properties.
Example
-------
>>> import geopandas as gpd
>>> import rasterio as rio
>>> import earthpy.spatial as es
>>> from earthpy.io import path_to_example
>>> # Clip an RGB image to the extent of Rocky Mountain National Park
>>> rmnp = gpd.read_file(path_to_example("rmnp.shp"))
>>> with rio.open(path_to_example("rmnp-rgb.tif")) as src:
... in_image = src.read()
... out_image, out_meta = es.crop_image(src, rmnp)
>>> in_image.shape
(3, 373, 485)
>>> out_image.shape
(3, 265, 281)
"""
if isinstance(geoms, gpd.geodataframe.GeoDataFrame):
clip_extent = [extent_to_json(geoms)]
else:
clip_extent = geoms
out_image, out_transform = mask(
raster, clip_extent, crop=True, all_touched=all_touched
)
out_meta = raster.meta.copy()
out_meta.update(
{
"driver": "GTiff",
"height": out_image.shape[1],
"width": out_image.shape[2],
"transform": out_transform,
}
)
return out_image, out_meta
|
def crop_image(raster, geoms, all_touched=True):
"""Crop a single file using geometry objects.
Parameters
----------
raster : rasterio.io.DatasetReader object
The rasterio object to be cropped.
geoms : geopandas geodataframe or list of polygons
The spatial polygon boundaries in GeoJSON-like dict format
to be used to crop the image. All data outside of the polygon
boundaries will be set to nodata and/or removed from the image.
all_touched : bool (default=True)
Include a pixel in the mask if it touches any of the
shapes. If False, include a pixel only if its center is within one of
the shapes, or if it is selected by Bresenham's line algorithm.
(from rasterio)
Returns
----------
tuple
out_image: cropped numpy array
A numpy array that is cropped to the geoms object
extent with shape (bands, rows, columns)
out_meta: dict
A dictionary containing updated metadata for the cropped raster,
including extent (shape elements) and transform properties.
Example
-------
>>> import geopandas as gpd
>>> import rasterio as rio
>>> import earthpy.spatial as es
>>> from earthpy.io import path_to_example
>>> # Clip an RGB image to the extent of Rocky Mountain National Park
>>> rmnp = gpd.read_file(path_to_example("rmnp.shp"))
>>> with rio.open(path_to_example("rmnp-rgb.tif")) as src:
... in_image = src.read()
... out_image, out_meta = es.crop_image(src, rmnp)
>>> in_image.shape
(3, 373, 485)
>>> cropped_raster.shape
(3, 265, 281)
"""
if isinstance(geoms, gpd.geodataframe.GeoDataFrame):
clip_extent = [extent_to_json(geoms)]
else:
clip_extent = geoms
out_image, out_transform = mask(
raster, clip_extent, crop=True, all_touched=all_touched
)
out_meta = raster.meta.copy()
out_meta.update(
{
"driver": "GTiff",
"height": out_image.shape[1],
"width": out_image.shape[2],
"transform": out_transform,
}
)
return out_image, out_meta
|
20,028 |
def image_fusion(img1, img2, wvs1, wvs2, array_type = None, filename = None):
""" Fuse two images of the same size together with given wavelengths representing and make a Spectral_data instance
img1: 1st image to be fused
img2: 2nd image to be fused
wvs1: list of wavelengths represent bands in img1
wvs2: list of wavelengths represent bands in img2
array_type: (optional) description of the fused array
filename: (optional) desired filename of the fused array
:param img1: np.ndarray
:param img2: np.ndarray
:param wvs1: list
:param wvs2: list
:param array_type: str
:param filename: str
:return: fused_array (a Spectral_data instance)
"""
if len(img1.shape) == 2:
img1 = np.expand_dims(img1,axis=2)
r1, c1, b1 = img1.shape
if len(img2.shape) == 2:
img2 = np.expand_dims(img2,axis=2)
r2, c2, b2 = img2.shape
if (r1,c1) != (r2,c2):
fatal_error("Input images should have the same image size")
array_data = np.concatenate((img1, img2), axis=2)
# sort all wavelengths
wavelengths = np.array(wvs1 + wvs2)
ind = np.argsort(wavelengths)
wavelengths = wavelengths[ind]
wavelength_dict = dict()
for (idx, wv) in enumerate(wavelengths):
wavelength_dict[wv] = float(idx)
# sort array_data based on wavelengths
array_data = array_data[:,:,ind]
array_data = (array_data / 255).astype(np.float32)
max_pixel = float(np.amax(array_data))
min_pixel = float(np.amin(array_data))
d_type = array_data.dtype
r, c, b = array_data.shape
fused_array = Spectral_data(array_data=array_data,
max_wavelength=float(max(wavelengths)),
min_wavelength=float(min(wavelengths)),
max_value=max_pixel, min_value=min_pixel,
d_type=d_type,
wavelength_dict=wavelength_dict, samples=int(r * c),
lines=int(b), interleave="bil",
wavelength_units="nm", array_type=array_type,
pseudo_rgb=None, filename=filename, default_bands=None)
# Make pseudo-rgb image and replace it inside the class instance object
pseudo_rgb = _make_pseudo_rgb(fused_array)
fused_array.pseudo_rgb = pseudo_rgb
_debug(visual=pseudo_rgb, filename=os.path.join(params.debug_outdir, str(params.device) + "_fused_pseudo_rgb.png"))
return fused_array
|
def image_fusion(img1, img2, wvs1, wvs2, array_type = None, filename = None):
""" Fuse two images of the same size together with given wavelengths representing and make a Spectral_data instance
img1: 1st image to be fused
img2: 2nd image to be fused
wvs1: list of wavelengths represent bands in img1
wvs2: list of wavelengths represent bands in img2
array_type: (optional) description of the fused array
filename: (optional) desired filename of the fused array
:param img1: np.ndarray
:param img2: np.ndarray
:param wvs1: list
:param wvs2: list
:param array_type: str
:param filename: str
:return: fused_array (a Spectral_data instance)
"""
if len(img1.shape) == 2:
img1 = np.expand_dims(img1,axis=2)
r1, c1, b1 = img1.shape
if len(img2.shape) == 2:
img2 = np.expand_dims(img2,axis=2)
r2, c2, b2 = img2.shape
if (r1, c1) != (r2, c2):
fatal_error("Input images should have the same image size")
array_data = np.concatenate((img1, img2), axis=2)
# sort all wavelengths
wavelengths = np.array(wvs1 + wvs2)
ind = np.argsort(wavelengths)
wavelengths = wavelengths[ind]
wavelength_dict = dict()
for (idx, wv) in enumerate(wavelengths):
wavelength_dict[wv] = float(idx)
# sort array_data based on wavelengths
array_data = array_data[:,:,ind]
array_data = (array_data / 255).astype(np.float32)
max_pixel = float(np.amax(array_data))
min_pixel = float(np.amin(array_data))
d_type = array_data.dtype
r, c, b = array_data.shape
fused_array = Spectral_data(array_data=array_data,
max_wavelength=float(max(wavelengths)),
min_wavelength=float(min(wavelengths)),
max_value=max_pixel, min_value=min_pixel,
d_type=d_type,
wavelength_dict=wavelength_dict, samples=int(r * c),
lines=int(b), interleave="bil",
wavelength_units="nm", array_type=array_type,
pseudo_rgb=None, filename=filename, default_bands=None)
# Make pseudo-rgb image and replace it inside the class instance object
pseudo_rgb = _make_pseudo_rgb(fused_array)
fused_array.pseudo_rgb = pseudo_rgb
_debug(visual=pseudo_rgb, filename=os.path.join(params.debug_outdir, str(params.device) + "_fused_pseudo_rgb.png"))
return fused_array
|
43,618 |
def unflatten_tf(flat, model):
"""Restores an arbitrary nested structure to a flattened TF tensor.
See also :func:`_unflatten`.
Args:
flat (tf.Tensor): 1D tensor of items
model (array, Iterable, Number): model nested structure
Returns:
Union[tf.Tensor, list], array: first elements of flat arranged into the nested
structure of model, unused elements of flat
Raises:
TypeError: if ``model`` contains an object of unsupported type
"""
if isinstance(model, (numbers.Number, str)):
return flat[0], flat[1:]
if isinstance(model, (tf.Tensor, tf.Variable)):
idx = tf.size(model)
res = tf.reshape(flat[:idx], model.shape)
return res, flat[idx:]
if isinstance(model, Iterable):
res = []
for x in model:
val, flat = unflatten_tf(flat, x)
res.append(val)
return res, flat
raise TypeError("Unsupported type in the model: {}".format(type(model)))
|
def unflatten_tf(flat, model):
"""Restores an arbitrary nested structure to a flattened TF tensor.
See also :func:`~.unflatten`.
Args:
flat (tf.Tensor): 1D tensor of items
model (array, Iterable, Number): model nested structure
Returns:
Union[tf.Tensor, list], array: first elements of flat arranged into the nested
structure of model, unused elements of flat
Raises:
TypeError: if ``model`` contains an object of unsupported type
"""
if isinstance(model, (numbers.Number, str)):
return flat[0], flat[1:]
if isinstance(model, (tf.Tensor, tf.Variable)):
idx = tf.size(model)
res = tf.reshape(flat[:idx], model.shape)
return res, flat[idx:]
if isinstance(model, Iterable):
res = []
for x in model:
val, flat = unflatten_tf(flat, x)
res.append(val)
return res, flat
raise TypeError("Unsupported type in the model: {}".format(type(model)))
|
32,505 |
def http_request(method, api_endpoint, payload=None, params={}, user_auth=True, is_file=False, headers=None):
is_user_auth = True
url = BASE_URL + api_endpoint
# 2 types of auth, user and non user, mostly user is needed
if user_auth:
demisto.info("in if statement")
headers = headers or generate_user_auth_headers(api_endpoint)
else:
demisto.info("in else statement")
# This type of auth is only supported for basic commands: login/discover/refresh-token
is_user_auth = False
auth = base64.b64encode((EMAIL_ADDRESS + ':' + PASSWORD).encode("utf-8")).decode()
auth_type = 'Basic-Cloud'
auth_header = auth_type + ' ' + auth
headers = {
'x-mc-app-id': APP_ID,
'Content-Type': 'application/json',
'Authorization': auth_header
}
LOG('running %s request with url=%s\tparams=%s\tdata=%s\tis user auth=%s' % (
method, url, json.dumps(params), json.dumps(payload), is_user_auth))
try:
res = requests.request(
method,
url,
verify=USE_SSL,
params=params,
headers=headers,
data=json.dumps(payload)
)
res.raise_for_status()
if is_file:
return res
return res.json()
except HTTPError as e:
LOG(e)
if e.response.status_code == 418: # type: ignore # pylint: disable=no-member
if not APP_ID or not EMAIL_ADDRESS or not PASSWORD:
raise Exception(
'Credentials provided are expired, could not automatically refresh tokens.'
' App ID + Email Address '
'+ Password are required.')
else:
raise
except Exception as e:
LOG(e)
raise
|
def http_request(method, api_endpoint, payload=None, params={}, user_auth=True, is_file=False, headers=None):
is_user_auth = True
url = BASE_URL + api_endpoint
# 2 types of auth, user and non user, mostly user is needed
if user_auth:
demisto.info("in if statement")
headers = headers or generate_user_auth_headers(api_endpoint)
else:
demisto.info("in else statement")
# This type of auth is only supported for basic commands: login/discover/refresh-token
is_user_auth = False
auth = base64.b64encode((EMAIL_ADDRESS + ':' + PASSWORD).encode("utf-8")).decode()
auth_type = 'Basic-Cloud'
auth_header = auth_type + ' ' + auth
headers = {
'x-mc-app-id': APP_ID,
'Content-Type': 'application/json',
'Authorization': auth_header
}
LOG('running %s request with url=%s\tparams=%s\tdata=%s\tis user auth=%s' % (
method, url, json.dumps(params), json.dumps(payload), is_user_auth))
try:
res = requests.request(
method,
url,
verify=USE_SSL,
params=params,
headers=headers,
json=payload
)
res.raise_for_status()
if is_file:
return res
return res.json()
except HTTPError as e:
LOG(e)
if e.response.status_code == 418: # type: ignore # pylint: disable=no-member
if not APP_ID or not EMAIL_ADDRESS or not PASSWORD:
raise Exception(
'Credentials provided are expired, could not automatically refresh tokens.'
' App ID + Email Address '
'+ Password are required.')
else:
raise
except Exception as e:
LOG(e)
raise
|
5,562 |
def test_copy():
"""Test that the copy method works for all classes in `declarative.py`."""
# Copies of objects should not point to the same location in memory
objects = [ImagePlot(), ContourPlot(), FilledContourPlot(), BarbPlot(),
MapPanel(), PanelContainer(), PlotObs()]
for obj in objects:
copied_obj = obj.copy()
assert obj != copied_obj
# Copies of plots in MapPanels should not point to same location in memory
obj = MapPanel()
obj.plots = [PlotObs(), BarbPlot(), FilledContourPlot(), ContourPlot(), ImagePlot()]
copied_obj = obj.copy()
for i in range(len(obj.plots)):
assert obj.plots[i] != copied_obj.plots[i]
|
def test_copy():
"""Test that the copy method works for all classes in `declarative.py`."""
# Copies of objects should not point to the same location in memory
objects = [ImagePlot(), ContourPlot(), FilledContourPlot(), BarbPlot(),
MapPanel(), PanelContainer(), PlotObs()]
for obj in objects:
copied_obj = obj.copy()
assert obj is not copied_obj
# Copies of plots in MapPanels should not point to same location in memory
obj = MapPanel()
obj.plots = [PlotObs(), BarbPlot(), FilledContourPlot(), ContourPlot(), ImagePlot()]
copied_obj = obj.copy()
for i in range(len(obj.plots)):
assert obj.plots[i] != copied_obj.plots[i]
|
37,484 |
def assemble(
experiments: Union[
QuantumCircuit,
List[QuantumCircuit],
Schedule,
List[Schedule],
ScheduleBlock,
Union[ScheduleBlock],
],
backend: Optional[Union[Backend, BaseBackend]] = None,
qobj_id: Optional[str] = None,
qobj_header: Optional[Union[QobjHeader, Dict]] = None,
shots: Optional[int] = None,
memory: Optional[bool] = False,
max_credits: Optional[int] = None,
seed_simulator: Optional[int] = None,
qubit_lo_freq: Optional[List[float]] = None,
meas_lo_freq: Optional[List[float]] = None,
qubit_lo_range: Optional[List[float]] = None,
meas_lo_range: Optional[List[float]] = None,
schedule_los: Optional[
Union[
List[Union[Dict[PulseChannel, float], LoConfig]],
Union[Dict[PulseChannel, float], LoConfig],
]
] = None,
meas_level: Union[int, MeasLevel] = MeasLevel.CLASSIFIED,
meas_return: Union[str, MeasReturnType] = MeasReturnType.AVERAGE,
meas_map: Optional[List[List[Qubit]]] = None,
memory_slot_size: int = 100,
rep_time: Optional[int] = None,
rep_delay: Optional[float] = None,
parameter_binds: Optional[List[Dict[Parameter, float]]] = None,
parametric_pulses: Optional[List[str]] = None,
init_qubits: Optional[bool] = True,
use_measure_esp: Optional[bool] = None,
**run_config: Dict,
) -> Qobj:
"""Assemble a list of circuits or pulse schedules into a ``Qobj``.
This function serializes the payloads, which could be either circuits or schedules,
to create ``Qobj`` "experiments". It further annotates the experiment payload with
header and configurations.
Args:
experiments: Circuit(s) or pulse schedule(s) to execute
backend: If set, some runtime options are automatically grabbed from
``backend.configuration()`` and ``backend.defaults()``.
If any other option is explicitly set (e.g., ``rep_time``), it
will override the backend's.
If any other options is set in the run_config, it will
also override the backend's.
qobj_id: String identifier to annotate the ``Qobj``
qobj_header: User input that will be inserted in ``Qobj`` header, and will also be
copied to the corresponding Result header. Headers do not affect the run.
shots: Number of repetitions of each circuit, for sampling. Default: 1024
or ``max_shots`` from the backend configuration, whichever is smaller
memory: If ``True``, per-shot measurement bitstrings are returned as well
(provided the backend supports it). For OpenPulse jobs, only
measurement level 2 supports this option.
max_credits: Maximum credits to spend on job. Default: 10
seed_simulator: Random seed to control sampling, for when backend is a simulator
qubit_lo_freq: List of job level qubit drive LO frequencies in Hz. Overridden by
``schedule_los`` if specified. Must have length ``n_qubits.``
meas_lo_freq: List of measurement LO frequencies in Hz. Overridden by ``schedule_los`` if
specified. Must have length ``n_qubits.``
qubit_lo_range: List of job level drive LO ranges each of form ``[range_min, range_max]``
in Hz. Used to validate ``qubit_lo_freq``. Must have length ``n_qubits.``
meas_lo_range: List of job level measurement LO ranges each of form
``[range_min, range_max]`` in Hz. Used to validate ``meas_lo_freq``. Must have length
``n_qubits.``
schedule_los: Experiment level (ie circuit or schedule) LO frequency configurations for
qubit drive and measurement channels. These values override the job level values from
``default_qubit_los`` and ``default_meas_los``. Frequencies are in Hz. Settable for qasm
and pulse jobs.
meas_level: Set the appropriate level of the measurement output for pulse experiments.
meas_return: Level of measurement data for the backend to return.
For ``meas_level`` 0 and 1:
* ``single`` returns information from every shot.
* ``avg`` returns average measurement output (averaged over number of shots).
meas_map: List of lists, containing qubits that must be measured together.
memory_slot_size: Size of each memory slot if the output is Level 0.
rep_time (int): Time per program execution in seconds. Must be from the list provided
by the backend (``backend.configuration().rep_times``). Defaults to the first entry.
rep_delay (float): Delay between programs in seconds. Only supported on certain
backends (if ``backend.configuration().dynamic_reprate_enabled=True``). If supported,
``rep_delay`` will be used instead of ``rep_time`` and must be from the range supplied
by the backend (``backend.configuration().rep_delay_range``). Default is given by
``backend.configuration().default_rep_delay``.
parameter_binds: List of Parameter bindings over which the set of experiments will be
executed. Each list element (bind) should be of the form
{Parameter1: value1, Parameter2: value2, ...}. All binds will be
executed across all experiments; e.g., if parameter_binds is a
length-n list, and there are m experiments, a total of m x n
experiments will be run (one for each experiment/bind pair).
parametric_pulses: A list of pulse shapes which are supported internally on the backend.
Example::
['gaussian', 'constant']
init_qubits: Whether to reset the qubits to the ground state for each shot.
Default: ``True``.
use_measure_esp: Whether to use ESP (excited state promoted) readout for the final
measurement in each circuit. ESP readout can offer higher fidelity than standard
measurement sequences. See `here <https://arxiv.org/pdf/2008.08571.pdf>`_.
Default (set on backend): ``True`` if backend supports ESP readout, else ``False``.
**run_config: Extra arguments used to configure the run (e.g., for Aer configurable
backends). Refer to the backend documentation for details on these
arguments.
Returns:
A ``Qobj`` that can be run on a backend. Depending on the type of input,
this will be either a ``QasmQobj`` or a ``PulseQobj``.
Raises:
QiskitError: if the input cannot be interpreted as either circuits or schedules
"""
start_time = time()
experiments = experiments if isinstance(experiments, list) else [experiments]
qobj_id, qobj_header, run_config_common_dict = _parse_common_args(
backend,
qobj_id,
qobj_header,
shots,
memory,
max_credits,
seed_simulator,
init_qubits,
use_measure_esp,
rep_delay,
qubit_lo_freq,
meas_lo_freq,
qubit_lo_range,
meas_lo_range,
schedule_los,
**run_config,
)
# assemble either circuits or schedules
if all(isinstance(exp, QuantumCircuit) for exp in experiments):
run_config = _parse_circuit_args(
parameter_binds,
backend,
meas_level,
meas_return,
parametric_pulses,
**run_config_common_dict,
)
# If circuits are parameterized, bind parameters and remove from run_config
bound_experiments, run_config = _expand_parameters(
circuits=experiments, run_config=run_config
)
end_time = time()
_log_assembly_time(start_time, end_time)
return assemble_circuits(
circuits=bound_experiments,
qobj_id=qobj_id,
qobj_header=qobj_header,
run_config=run_config,
)
elif all(isinstance(exp, (ScheduleBlock, Schedule, Instruction)) for exp in experiments):
run_config = _parse_pulse_args(
backend,
meas_level,
meas_return,
meas_map,
memory_slot_size,
rep_time,
parametric_pulses,
**run_config_common_dict,
)
end_time = time()
_log_assembly_time(start_time, end_time)
return assemble_schedules(
schedules=experiments, qobj_id=qobj_id, qobj_header=qobj_header, run_config=run_config
)
else:
raise QiskitError(
"bad input to assemble() function; " "must be either circuits or schedules"
)
|
def assemble(
experiments: Union[
QuantumCircuit,
List[QuantumCircuit],
Schedule,
List[Schedule],
ScheduleBlock,
Union[ScheduleBlock],
],
backend: Optional[Union[Backend, BaseBackend]] = None,
qobj_id: Optional[str] = None,
qobj_header: Optional[Union[QobjHeader, Dict]] = None,
shots: Optional[int] = None,
memory: Optional[bool] = False,
max_credits: Optional[int] = None,
seed_simulator: Optional[int] = None,
qubit_lo_freq: Optional[List[float]] = None,
meas_lo_freq: Optional[List[float]] = None,
qubit_lo_range: Optional[List[float]] = None,
meas_lo_range: Optional[List[float]] = None,
schedule_los: Optional[
Union[
List[Union[Dict[PulseChannel, float], LoConfig]],
Union[Dict[PulseChannel, float], LoConfig],
]
] = None,
meas_level: Union[int, MeasLevel] = MeasLevel.CLASSIFIED,
meas_return: Union[str, MeasReturnType] = MeasReturnType.AVERAGE,
meas_map: Optional[List[List[Qubit]]] = None,
memory_slot_size: int = 100,
rep_time: Optional[int] = None,
rep_delay: Optional[float] = None,
parameter_binds: Optional[List[Dict[Parameter, float]]] = None,
parametric_pulses: Optional[List[str]] = None,
init_qubits: Optional[bool] = True,
use_measure_esp: Optional[bool] = None,
**run_config: Dict,
) -> Qobj:
"""Assemble a list of circuits or pulse schedules into a ``Qobj``.
This function serializes the payloads, which could be either circuits or schedules,
to create ``Qobj`` "experiments". It further annotates the experiment payload with
header and configurations.
Args:
experiments: Circuit(s) or pulse schedule(s) to execute
backend: If set, some runtime options are automatically grabbed from
``backend.configuration()`` and ``backend.defaults()``.
If any other option is explicitly set (e.g., ``rep_time``), it
will override the backend's.
If any other options is set in the run_config, it will
also override the backend's.
qobj_id: String identifier to annotate the ``Qobj``
qobj_header: User input that will be inserted in ``Qobj`` header, and will also be
copied to the corresponding Result header. Headers do not affect the run.
shots: Number of repetitions of each circuit, for sampling. Default: 1024
or ``max_shots`` from the backend configuration, whichever is smaller
memory: If ``True``, per-shot measurement bitstrings are returned as well
(provided the backend supports it). For OpenPulse jobs, only
measurement level 2 supports this option.
max_credits: Maximum credits to spend on job. Default: 10
seed_simulator: Random seed to control sampling, for when backend is a simulator
qubit_lo_freq: List of job level qubit drive LO frequencies in Hz. Overridden by
``schedule_los`` if specified. Must have length ``n_qubits.``
meas_lo_freq: List of measurement LO frequencies in Hz. Overridden by ``schedule_los`` if
specified. Must have length ``n_qubits.``
qubit_lo_range: List of job level drive LO ranges each of form ``[range_min, range_max]``
in Hz. Used to validate ``qubit_lo_freq``. Must have length ``n_qubits.``
meas_lo_range: List of job level measurement LO ranges each of form
``[range_min, range_max]`` in Hz. Used to validate ``meas_lo_freq``. Must have length
``n_qubits.``
schedule_los: Experiment level (ie circuit or schedule) LO frequency configurations for
qubit drive and measurement channels. These values override the job level values from
``default_qubit_los`` and ``default_meas_los``. Frequencies are in Hz. Settable for qasm
and pulse jobs.
meas_level: Set the appropriate level of the measurement output for pulse experiments.
meas_return: Level of measurement data for the backend to return.
For ``meas_level`` 0 and 1:
* ``single`` returns information from every shot.
* ``avg`` returns average measurement output (averaged over number of shots).
meas_map: List of lists, containing qubits that must be measured together.
memory_slot_size: Size of each memory slot if the output is Level 0.
rep_time (int): Time per program execution in seconds. Must be from the list provided
by the backend (``backend.configuration().rep_times``). Defaults to the first entry.
rep_delay (float): Delay between programs in seconds. Only supported on certain
backends (if ``backend.configuration().dynamic_reprate_enabled=True``). If supported,
``rep_delay`` will be used instead of ``rep_time`` and must be from the range supplied
by the backend (``backend.configuration().rep_delay_range``). Default is given by
``backend.configuration().default_rep_delay``.
parameter_binds: List of Parameter bindings over which the set of experiments will be
executed. Each list element (bind) should be of the form
{Parameter1: value1, Parameter2: value2, ...}. All binds will be
executed across all experiments; e.g., if parameter_binds is a
length-n list, and there are m experiments, a total of m x n
experiments will be run (one for each experiment/bind pair).
parametric_pulses: A list of pulse shapes which are supported internally on the backend.
Example::
['gaussian', 'constant']
init_qubits: Whether to reset the qubits to the ground state for each shot.
Default: ``True``.
use_measure_esp: Whether to use excited state promoted (ESP) readout for the final
measurement in each circuit. ESP readout can offer higher fidelity than standard
measurement sequences. See `here <https://arxiv.org/pdf/2008.08571.pdf>`_.
Default (set on backend): ``True`` if backend supports ESP readout, else ``False``.
**run_config: Extra arguments used to configure the run (e.g., for Aer configurable
backends). Refer to the backend documentation for details on these
arguments.
Returns:
A ``Qobj`` that can be run on a backend. Depending on the type of input,
this will be either a ``QasmQobj`` or a ``PulseQobj``.
Raises:
QiskitError: if the input cannot be interpreted as either circuits or schedules
"""
start_time = time()
experiments = experiments if isinstance(experiments, list) else [experiments]
qobj_id, qobj_header, run_config_common_dict = _parse_common_args(
backend,
qobj_id,
qobj_header,
shots,
memory,
max_credits,
seed_simulator,
init_qubits,
use_measure_esp,
rep_delay,
qubit_lo_freq,
meas_lo_freq,
qubit_lo_range,
meas_lo_range,
schedule_los,
**run_config,
)
# assemble either circuits or schedules
if all(isinstance(exp, QuantumCircuit) for exp in experiments):
run_config = _parse_circuit_args(
parameter_binds,
backend,
meas_level,
meas_return,
parametric_pulses,
**run_config_common_dict,
)
# If circuits are parameterized, bind parameters and remove from run_config
bound_experiments, run_config = _expand_parameters(
circuits=experiments, run_config=run_config
)
end_time = time()
_log_assembly_time(start_time, end_time)
return assemble_circuits(
circuits=bound_experiments,
qobj_id=qobj_id,
qobj_header=qobj_header,
run_config=run_config,
)
elif all(isinstance(exp, (ScheduleBlock, Schedule, Instruction)) for exp in experiments):
run_config = _parse_pulse_args(
backend,
meas_level,
meas_return,
meas_map,
memory_slot_size,
rep_time,
parametric_pulses,
**run_config_common_dict,
)
end_time = time()
_log_assembly_time(start_time, end_time)
return assemble_schedules(
schedules=experiments, qobj_id=qobj_id, qobj_header=qobj_header, run_config=run_config
)
else:
raise QiskitError(
"bad input to assemble() function; " "must be either circuits or schedules"
)
|
31,126 |
def main() -> None:
base_url = demisto.params().get('base_url')
access_id = demisto.params().get('access_id')
secret_key = demisto.params().get('secret_key')
verify = demisto.params().get('insecure')
proxy = demisto.params().get('proxy', False)
proxies = handle_proxy() if proxy else {}
demisto.debug(f'Command being called is {demisto.command()}')
try:
client = Client(
base_url=base_url,
access_id=access_id,
secret_key=secret_key,
verify=verify,
proxies=proxies,
)
if demisto.command() == 'test-module':
test_module(client)
elif demisto.command() == 'ip':
return_results(ip_details_command(client, demisto.args()))
elif demisto.command() == 'domain':
return_results(domain_details_command(client, demisto.args()))
elif demisto.command() == 'url':
return_results(url_details_command(client, demisto.args()))
elif demisto.command() == 'file':
return_results(file_details_command(client, demisto.args()))
except Exception as e:
demisto.error(traceback.format_exc()) # print the traceback
return_error(f'Failed to execute {demisto.command()} command.\nError:\n{str(e)}')
|
def main() -> None:
base_url = demisto.params().get('base_url')
access_id = demisto.params().get('access_id')
secret_key = demisto.params().get('secret_key')
verify = demisto.params().get('insecure')
proxy = demisto.params().get('proxy', False)
proxies = handle_proxy()
demisto.debug(f'Command being called is {demisto.command()}')
try:
client = Client(
base_url=base_url,
access_id=access_id,
secret_key=secret_key,
verify=verify,
proxies=proxies,
)
if demisto.command() == 'test-module':
test_module(client)
elif demisto.command() == 'ip':
return_results(ip_details_command(client, demisto.args()))
elif demisto.command() == 'domain':
return_results(domain_details_command(client, demisto.args()))
elif demisto.command() == 'url':
return_results(url_details_command(client, demisto.args()))
elif demisto.command() == 'file':
return_results(file_details_command(client, demisto.args()))
except Exception as e:
demisto.error(traceback.format_exc()) # print the traceback
return_error(f'Failed to execute {demisto.command()} command.\nError:\n{str(e)}')
|
46,021 |
def build_laplacian_pyramid(
input: torch.Tensor, max_level: int, border_type: str = 'reflect', align_corners: bool = False
) -> List[torch.Tensor]:
r"""Construct the Laplacian pyramid for an image.
.. image:: _static/img/build_pyramid.png
The function constructs a vector of images and builds the Laplacian pyramid
by recursively computing the difference after applying
pyrUp to the adjacent layer in it's Gaussian pyramid.
Args:
input : the tensor to be used to construct the pyramid.
max_level: 0-based index of the last (the smallest) pyramid layer.
It must be non-negative.
border_type: the padding mode to be applied before convolving.
The expected modes are: ``'constant'``, ``'reflect'``,
``'replicate'`` or ``'circular'``.
align_corners: interpolation flag.
Shape:
- Input: :math:`(B, C, H, W)`
- Output :math:`[(B, C, H, W), (B, C, H/2, W/2), ...]`
"""
if not isinstance(input, torch.Tensor):
raise TypeError(f"Input type is not a torch.Tensor. Got {type(input)}")
if not len(input.shape) == 4:
raise ValueError(f"Invalid input shape, we expect BxCxHxW. Got: {input.shape}")
if not isinstance(max_level, int) or max_level < 0:
raise ValueError(f"Invalid max_level, it must be a positive integer. Got: {max_level}")
# create gaussian pyramid
gaussian_pyramid: List[torch.Tensor] = build_pyramid(input, max_level)
# create empty list
laplacian_pyramid: List[torch.Tensor] = []
for i in range(max_level - 1):
img_expand: torch.Tensor = pyrup(gaussian_pyramid[i + 1])
laplacian: torch.Tensor = gaussian_pyramid[i] - img_expand
laplacian_pyramid.append(laplacian)
laplacian_pyramid.append(gaussian_pyramid[-1])
return laplacian_pyramid
|
def build_laplacian_pyramid(
input: torch.Tensor, max_level: int, border_type: str = 'reflect', align_corners: bool = False
) -> List[torch.Tensor]:
r"""Construct the Laplacian pyramid for an image.
.. image:: _static/img/build_pyramid.png
The function constructs a vector of images and builds the Laplacian pyramid
by recursively computing the difference after applying
pyrUp to the adjacent layer in it's Gaussian pyramid.
Args:
input : the tensor to be used to construct the pyramid.
max_level: 0-based index of the last (the smallest) pyramid layer.
It must be non-negative.
border_type: the padding mode to be applied before convolving.
The expected modes are: ``'constant'``, ``'reflect'``,
``'replicate'`` or ``'circular'``.
align_corners: interpolation flag.
Shape:
- Input: :math:`(B, C, H, W)`
- Output :math:`[(B, C, H, W), (B, C, H/2, W/2), ...]`
"""
if not isinstance(input, torch.Tensor):
raise TypeError(f"Input type is not a torch.Tensor. Got {type(input)}")
if not len(input.shape) == 4:
raise ValueError(f"Invalid input shape, we expect BxCxHxW. Got: {input.shape}")
if not isinstance(max_level, int) or max_level < 0:
raise ValueError(f"Invalid max_level, it must be a positive integer. Got: {max_level}")
# create gaussian pyramid
gaussian_pyramid: List[torch.Tensor] = build_pyramid(input, max_level)
# create empty list
laplacian_pyramid: List[torch.Tensor] = []
for i in range(max_level - 1):
img_expand: torch.Tensor = pyrup(gaussian_pyramid[i + 1], align_corners)
laplacian: torch.Tensor = gaussian_pyramid[i] - img_expand
laplacian_pyramid.append(laplacian)
laplacian_pyramid.append(gaussian_pyramid[-1])
return laplacian_pyramid
|
34,413 |
def write_file_config(file_config) -> Text:
return write_temp_file(file_config, "_tmp_config_file.yml")
|
def write_file_config(file_config: Text) -> Text:
return write_temp_file(file_config, "_tmp_config_file.yml")
|
28,142 |
def get_ramp_down_order(messages: List[str]) -> List[str]:
order = []
for msg in messages:
if "CONF:FIELD:TARG" not in msg:
continue
g = re.search(r"\[(.*).*\] Writing: CONF:FIELD:TARG", msg)
if g is None:
raise RuntimeError("No match is found")
name = g.groups()[0]
order.append(name)
return order
|
def get_ramp_down_order(messages: List[str]) -> List[str]:
order = []
for msg in messages:
if "CONF:FIELD:TARG" not in msg:
continue
g = re.search(r"\[(.*).*\] Writing: CONF:FIELD:TARG", msg)
if g is None:
raise RuntimeError(f"No match found in {msg!r} when getting ramp down order")
name = g.groups()[0]
order.append(name)
return order
|
48,971 |
def lintify(meta, recipe_dir=None, conda_forge=False):
lints = []
hints = []
major_sections = list(meta.keys())
# If the recipe_dir exists (no guarantee within this function) , we can
# find the meta.yaml within it.
meta_fname = os.path.join(recipe_dir or "", "meta.yaml")
sources_section = get_section(meta, "source", lints)
build_section = get_section(meta, "build", lints)
requirements_section = get_section(meta, "requirements", lints)
test_section = get_section(meta, "test", lints)
about_section = get_section(meta, "about", lints)
extra_section = get_section(meta, "extra", lints)
package_section = get_section(meta, "package", lints)
outputs_section = get_section(meta, "outputs", lints)
recipe_dirname = os.path.basename(recipe_dir) if recipe_dir else "recipe"
is_staged_recipes = recipe_dirname != "recipe"
# 0: Top level keys should be expected
unexpected_sections = []
for section in major_sections:
if section not in EXPECTED_SECTION_ORDER:
lints.append(
"The top level meta key {} is unexpected".format(section)
)
unexpected_sections.append(section)
for section in unexpected_sections:
major_sections.remove(section)
# 1: Top level meta.yaml keys should have a specific order.
lint_section_order(major_sections, lints)
# 2: The about section should have a home, license and summary.
lint_about_contents(about_section, lints)
# 3a: The recipe should have some maintainers.
if not extra_section.get("recipe-maintainers", []):
lints.append(
"The recipe could do with some maintainers listed in "
"the `extra/recipe-maintainers` section."
)
# 3b: Maintainers should be a list
if not (
isinstance(extra_section.get("recipe-maintainers", []), Sequence)
and not isinstance(
extra_section.get("recipe-maintainers", []), str_type
)
):
lints.append("Recipe maintainers should be a json list.")
# 4: The recipe should have some tests.
if not any(key in TEST_KEYS for key in test_section):
a_test_file_exists = recipe_dir is not None and any(
os.path.exists(os.path.join(recipe_dir, test_file))
for test_file in TEST_FILES
)
if not a_test_file_exists:
has_outputs_test = False
no_test_hints = []
if outputs_section:
for out in outputs_section:
test_out = get_section(out, "test", lints)
if any(key in TEST_KEYS for key in test_out):
has_outputs_test = True
elif test_out.get("script", "").endswith((".bat", ".sh")):
has_outputs_test = True
else:
no_test_hints.append(
"It looks like the '{}' output doesn't "
"have any tests.".format(out.get("name", "???"))
)
if has_outputs_test:
hints.extend(no_test_hints)
else:
lints.append("The recipe must have some tests.")
# 5: License cannot be 'unknown.'
license = about_section.get("license", "").lower()
if "unknown" == license.strip():
lints.append("The recipe license cannot be unknown.")
# 6: Selectors should be in a tidy form.
if recipe_dir is not None and os.path.exists(meta_fname):
bad_selectors, bad_lines = [], []
pyXY_selectors_lint, pyXY_lines_lint = [], []
pyXY_selectors_hint, pyXY_lines_hint = [], []
# Good selectors look like ".*\s\s#\s[...]"
good_selectors_pat = re.compile(r"(.+?)\s{2,}#\s\[(.+)\](?(2).*)$")
# Look out for py27, py35 selectors; we prefer py==35
pyXY_selectors_pat = re.compile(r".+#\s*\[.*?(py\d{2,3}).*\]")
with io.open(meta_fname, "rt") as fh:
for selector_line, line_number in selector_lines(fh):
if not good_selectors_pat.match(selector_line):
bad_selectors.append(selector_line)
bad_lines.append(line_number)
pyXY_matches = pyXY_selectors_pat.match(selector_line)
if pyXY_matches:
for pyXY in pyXY_matches.groups():
if int(pyXY[2:]) in (27, 34, 35, 36):
# py27, py35 and so on are ok up to py36 (included); only warn
pyXY_selectors_hint.append(selector_line)
pyXY_lines_hint.append(line_number)
else:
pyXY_selectors_lint.append(selector_line)
pyXY_lines_lint.append(line_number)
if bad_selectors:
lints.append(
"Selectors are suggested to take a "
"``<two spaces>#<one space>[<expression>]`` form."
" See lines {}".format(bad_lines)
)
if pyXY_selectors_hint:
hints.append(
"Old-style Python selectors (py27, py34, py35, py36) are "
"deprecated. Instead, consider using the int ``py``. For "
"example: ``# [py>=36]``. See lines {}".format(pyXY_lines_hint)
)
if pyXY_selectors_lint:
lints.append(
"Old-style Python selectors (py27, py35, etc) are only available "
"for Python 2.7, 3.4, 3.5, and 3.6. Please use the int ``py``. For "
"example: ``# [py>=37]``. See lines {}".format(pyXY_lines_lint)
)
# 7: The build section should have a build number.
if build_section.get("number", None) is None:
lints.append("The recipe must have a `build/number` section.")
# 8: The build section should be before the run section in requirements.
seen_requirements = [
k for k in requirements_section if k in REQUIREMENTS_ORDER
]
requirements_order_sorted = sorted(
seen_requirements, key=REQUIREMENTS_ORDER.index
)
if seen_requirements != requirements_order_sorted:
lints.append(
"The `requirements/` sections should be defined "
"in the following order: "
+ ", ".join(REQUIREMENTS_ORDER)
+ "; instead saw: "
+ ", ".join(seen_requirements)
+ "."
)
# 9: Files downloaded should have a hash.
for source_section in sources_section:
if "url" in source_section and not (
{"sha1", "sha256", "md5"} & set(source_section.keys())
):
lints.append(
"When defining a source/url please add a sha256, sha1 "
"or md5 checksum (sha256 preferably)."
)
# 10: License should not include the word 'license'.
license = about_section.get("license", "").lower()
if (
"license" in license.lower()
and "unlicense" not in license.lower()
and "licenseref" not in license.lower()
and "-license" not in license.lower()
):
lints.append(
"The recipe `license` should not include the word " '"License".'
)
# 11: There should be one empty line at the end of the file.
if recipe_dir is not None and os.path.exists(meta_fname):
with io.open(meta_fname, "r") as f:
lines = f.read().split("\n")
# Count the number of empty lines from the end of the file
empty_lines = itertools.takewhile(lambda x: x == "", reversed(lines))
end_empty_lines_count = len(list(empty_lines))
if end_empty_lines_count > 1:
lints.append(
"There are {} too many lines. "
"There should be one empty line at the end of the "
"file.".format(end_empty_lines_count - 1)
)
elif end_empty_lines_count < 1:
lints.append(
"There are too few lines. There should be one empty "
"line at the end of the file."
)
# 12: License family must be valid (conda-build checks for that)
try:
ensure_valid_license_family(meta)
except RuntimeError as e:
lints.append(str(e))
# 12a: License family must be valid (conda-build checks for that)
license_family = about_section.get("license_family", license).lower()
license_file = about_section.get("license_file", None)
if not license_file and any(
f for f in NEEDED_FAMILIES if f in license_family
):
lints.append("license_file entry is missing, but is required.")
# 13: Check that the recipe name is valid
recipe_name = package_section.get("name", "").strip()
if re.match(r"^[a-z0-9_\-.]+$", recipe_name) is None:
lints.append(
"Recipe name has invalid characters. only lowercase alpha, numeric, "
"underscores, hyphens and dots allowed"
)
# 14: Run conda-forge specific lints
if conda_forge:
run_conda_forge_specific(meta, recipe_dir, lints, hints)
# 15: Check if we are using legacy patterns
build_reqs = requirements_section.get("build", None)
if build_reqs and ("numpy x.x" in build_reqs):
lints.append(
"Using pinned numpy packages is a deprecated pattern. Consider "
"using the method outlined "
"[here](https://conda-forge.org/docs/maintainer/knowledge_base.html#linking-numpy)."
)
# 16: Subheaders should be in the allowed subheadings
for section in major_sections:
expected_subsections = FIELDS.get(section, [])
if not expected_subsections:
continue
for subsection in get_section(meta, section, lints):
if (
section != "source"
and section != "outputs"
and subsection not in expected_subsections
):
lints.append(
"The {} section contained an unexpected "
"subsection name. {} is not a valid subsection"
" name.".format(section, subsection)
)
elif section == "source" or section == "outputs":
for source_subsection in subsection:
if source_subsection not in expected_subsections:
lints.append(
"The {} section contained an unexpected "
"subsection name. {} is not a valid subsection"
" name.".format(section, source_subsection)
)
# 17: Validate noarch
noarch_value = build_section.get("noarch")
if noarch_value is not None:
valid_noarch_values = ["python", "generic"]
if noarch_value not in valid_noarch_values:
valid_noarch_str = "`, `".join(valid_noarch_values)
lints.append(
"Invalid `noarch` value `{}`. Should be one of `{}`.".format(
noarch_value, valid_noarch_str
)
)
# 18: noarch doesn't work with selectors for runtime dependencies
if noarch_value is not None and os.path.exists(meta_fname):
with io.open(meta_fname, "rt") as fh:
in_runreqs = False
for line in fh:
line_s = line.strip()
if line_s == "host:" or line_s == "run:":
in_runreqs = True
runreqs_spacing = line[: -len(line.lstrip())]
continue
if line_s.startswith("skip:") and is_selector_line(line):
lints.append(
"`noarch` packages can't have selectors. If "
"the selectors are necessary, please remove "
"`noarch: {}`.".format(noarch_value)
)
break
if in_runreqs:
if runreqs_spacing == line[: -len(line.lstrip())]:
in_runreqs = False
continue
if is_selector_line(line):
lints.append(
"`noarch` packages can't have selectors. If "
"the selectors are necessary, please remove "
"`noarch: {}`.".format(noarch_value)
)
break
# 19: check version
if package_section.get("version") is not None:
ver = str(package_section.get("version"))
try:
conda_build.conda_interface.VersionOrder(ver)
except:
lints.append(
"Package version {} doesn't match conda spec".format(ver)
)
# 20: Jinja2 variable definitions should be nice.
if recipe_dir is not None and os.path.exists(meta_fname):
bad_jinja = []
bad_lines = []
# Good Jinja2 variable definitions look like "{% set .+ = .+ %}"
good_jinja_pat = re.compile(r"\s*\{%\s(set)\s[^\s]+\s=\s[^\s]+\s%\}")
with io.open(meta_fname, "rt") as fh:
for jinja_line, line_number in jinja_lines(fh):
if not good_jinja_pat.match(jinja_line):
bad_jinja.append(jinja_line)
bad_lines.append(line_number)
if bad_jinja:
lints.append(
"Jinja2 variable definitions are suggested to "
"take a ``{{%<one space>set<one space>"
"<variable name><one space>=<one space>"
"<expression><one space>%}}`` form. See lines "
"{}".format(bad_lines)
)
# 21: Legacy usage of compilers
if build_reqs and ("toolchain" in build_reqs):
lints.append(
"Using toolchain directly in this manner is deprecated. Consider "
"using the compilers outlined "
"[here](https://conda-forge.org/docs/maintainer/knowledge_base.html#compilers)."
)
# 22: Single space in pinned requirements
for section, requirements in requirements_section.items():
for requirement in requirements or []:
req, _, _ = requirement.partition("#")
if "{{" in req:
continue
parts = req.split()
if len(parts) > 2 and parts[1] in [
"!=",
"=",
"==",
">",
"<",
"<=",
">=",
]:
# check for too many spaces
lints.append(
(
"``requirements: {section}: {requirement}`` should not "
"contain a space between relational operator and the version, i.e. "
"``{name} {pin}``"
).format(
section=section,
requirement=requirement,
name=parts[0],
pin="".join(parts[1:]),
)
)
continue
# check that there is a space if there is a pin
bad_char_idx = [(parts[0].find(c), c) for c in "><="]
bad_char_idx = [bci for bci in bad_char_idx if bci[0] >= 0]
if bad_char_idx:
bad_char_idx.sort()
i = bad_char_idx[0][0]
lints.append(
(
"``requirements: {section}: {requirement}`` must "
"contain a space between the name and the pin, i.e. "
"``{name} {pin}``"
).format(
section=section,
requirement=requirement,
name=parts[0][:i],
pin=parts[0][i:] + "".join(parts[1:]),
)
)
continue
# 23: non noarch builds shouldn't use version constraints on python and r-base
check_languages = ["python", "r-base"]
host_reqs = requirements_section.get("host") or []
run_reqs = requirements_section.get("run") or []
for language in check_languages:
if noarch_value is None and not outputs_section:
filtered_host_reqs = [
req
for req in host_reqs
if req.partition(" ")[0] == str(language)
]
filtered_run_reqs = [
req
for req in run_reqs
if req.partition(" ")[0] == str(language)
]
if filtered_host_reqs and not filtered_run_reqs:
lints.append(
"If {0} is a host requirement, it should be a run requirement.".format(
str(language)
)
)
for reqs in [filtered_host_reqs, filtered_run_reqs]:
if str(language) in reqs:
continue
for req in reqs:
constraint = req.split(" ", 1)[1]
if constraint.startswith(">") or constraint.startswith(
"<"
):
lints.append(
"Non noarch packages should have {0} requirement without any version constraints.".format(
str(language)
)
)
# 24: jinja2 variable references should be {{<one space>var<one space>}}
if recipe_dir is not None and os.path.exists(meta_fname):
bad_vars = []
bad_lines = []
with io.open(meta_fname, "rt") as fh:
for i, line in enumerate(fh.readlines()):
for m in JINJA_VAR_PAT.finditer(line):
if m.group(1) is not None:
var = m.group(1)
if var != " %s " % var.strip():
bad_vars.append(m.group(1).strip())
bad_lines.append(i + 1)
if bad_vars:
hints.append(
"Jinja2 variable references are suggested to "
"take a ``{{<one space><variable name><one space>}}``"
" form. See lines %s." % (bad_lines,)
)
# 25: require a lower bound on python version
if noarch_value == "python" and not outputs_section:
for req in run_reqs:
if (req.strip().split()[0] == "python") and (req != "python"):
break
else:
lints.append(
"noarch: python recipes are required to have a lower bound "
"on the python version. Typically this means putting "
"`python >=3.6` in **both** `host` and `run` but you should check "
"upstream for the package's Python compatibility."
)
# hints
# 1: suggest pip
if "script" in build_section:
scripts = build_section["script"]
if isinstance(scripts, str):
scripts = [scripts]
for script in scripts:
if "python setup.py install" in script:
hints.append(
"Whenever possible python packages should use pip. "
"See https://conda-forge.org/docs/maintainer/adding_pkgs.html#use-pip"
)
# 2: suggest python noarch (skip on feedstocks)
if (
noarch_value is None
and build_reqs
and not any(["_compiler_stub" in b for b in build_reqs])
and ("pip" in build_reqs)
and (is_staged_recipes or not conda_forge)
):
with io.open(meta_fname, "rt") as fh:
in_runreqs = False
no_arch_possible = True
for line in fh:
line_s = line.strip()
if line_s == "host:" or line_s == "run:":
in_runreqs = True
runreqs_spacing = line[: -len(line.lstrip())]
continue
if line_s.startswith("skip:") and is_selector_line(line):
no_arch_possible = False
break
if in_runreqs:
if runreqs_spacing == line[: -len(line.lstrip())]:
in_runreqs = False
continue
if is_selector_line(line):
no_arch_possible = False
break
if no_arch_possible:
hints.append(
"Whenever possible python packages should use noarch. "
"See https://conda-forge.org/docs/maintainer/knowledge_base.html#noarch-builds"
)
# 3: suggest fixing all recipe/*.sh shellcheck findings
shellcheck_enabled = False
shell_scripts = []
if recipe_dir:
shell_scripts = glob(os.path.join(recipe_dir, "*.sh"))
# support feedstocks and staged-recipes
forge_yaml = glob(
os.path.join(recipe_dir, "..", "conda-forge.yml")
) or glob(
os.path.join(recipe_dir, "..", "..", "conda-forge.yml"),
)
if shell_scripts and forge_yaml:
with open(forge_yaml[0], "r") as fh:
code = get_yaml().load(fh)
shellcheck_enabled = code.get("shellcheck", {}).get(
"enabled", shellcheck_enabled
)
if shellcheck_enabled and shutil.which("shellcheck") and shell_scripts:
MAX_SHELLCHECK_LINES = 50
cmd = [
"shellcheck",
"--enable=all",
"--shell=bash",
# SC2154: var is referenced but not assigned,
# see https://github.com/koalaman/shellcheck/wiki/SC2154
"--exclude=SC2154",
]
p = subprocess.Popen(
cmd + shell_scripts,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
env={
"PATH": os.getenv("PATH")
}, # exclude other env variables to protect against token leakage
)
sc_stdout, _ = p.communicate()
if p.returncode == 1:
# All files successfully scanned with some issues.
findings = (
sc_stdout.decode(sys.stdout.encoding)
.replace("\r\n", "\n")
.splitlines()
)
hints.append(
"Whenever possible fix all shellcheck findings ('"
+ " ".join(cmd)
+ " recipe/*.sh -f diff | git apply' helps)"
)
hints.extend(findings[:50])
if len(findings) > MAX_SHELLCHECK_LINES:
hints.append(
"Output restricted, there are '%s' more lines."
% (len(findings) - MAX_SHELLCHECK_LINES)
)
elif p.returncode != 0:
# Something went wrong.
hints.append(
"There have been errors while scanning with shellcheck."
)
# 4: Check for SPDX
import license_expression
license = about_section.get("license", "")
licensing = license_expression.Licensing()
parsed_exceptions = []
try:
parsed_licenses = []
parsed_licenses_with_exception = licensing.license_symbols(
license.strip(), decompose=False
)
for l in parsed_licenses_with_exception:
if isinstance(l, license_expression.LicenseWithExceptionSymbol):
parsed_licenses.append(l.license_symbol.key)
parsed_exceptions.append(l.exception_symbol.key)
else:
parsed_licenses.append(l.key)
except license_expression.ExpressionError:
parsed_licenses = [license]
licenseref_regex = re.compile(r"^LicenseRef[a-zA-Z0-9\-.]*$")
filtered_licenses = []
for license in parsed_licenses:
if not licenseref_regex.match(license):
filtered_licenses.append(license)
with open(
os.path.join(os.path.dirname(__file__), "licenses.txt"), "r"
) as f:
expected_licenses = f.readlines()
expected_licenses = set([l.strip() for l in expected_licenses])
with open(
os.path.join(os.path.dirname(__file__), "license_exceptions.txt"), "r"
) as f:
expected_exceptions = f.readlines()
expected_exceptions = set([l.strip() for l in expected_exceptions])
if set(filtered_licenses) - expected_licenses:
hints.append(
"License is not an SPDX identifier (or a custom LicenseRef) nor an SPDX license expression.\n\n"
"Documentation on acceptable licenses can be found "
"[here]( https://conda-forge.org/docs/maintainer/adding_pkgs.html#spdx-identifiers-and-expressions )."
)
if set(parsed_exceptions) - expected_exceptions:
hints.append(
"License exception is not an SPDX exception.\n\n"
"Documentation on acceptable licenses can be found "
"[here]( https://conda-forge.org/docs/maintainer/adding_pkgs.html#spdx-identifiers-and-expressions )."
)
return lints, hints
|
def lintify(meta, recipe_dir=None, conda_forge=False):
lints = []
hints = []
major_sections = list(meta.keys())
# If the recipe_dir exists (no guarantee within this function) , we can
# find the meta.yaml within it.
meta_fname = os.path.join(recipe_dir or "", "meta.yaml")
sources_section = get_section(meta, "source", lints)
build_section = get_section(meta, "build", lints)
requirements_section = get_section(meta, "requirements", lints)
test_section = get_section(meta, "test", lints)
about_section = get_section(meta, "about", lints)
extra_section = get_section(meta, "extra", lints)
package_section = get_section(meta, "package", lints)
outputs_section = get_section(meta, "outputs", lints)
recipe_dirname = os.path.basename(recipe_dir) if recipe_dir else "recipe"
is_staged_recipes = recipe_dirname != "recipe"
# 0: Top level keys should be expected
unexpected_sections = []
for section in major_sections:
if section not in EXPECTED_SECTION_ORDER:
lints.append(
"The top level meta key {} is unexpected".format(section)
)
unexpected_sections.append(section)
for section in unexpected_sections:
major_sections.remove(section)
# 1: Top level meta.yaml keys should have a specific order.
lint_section_order(major_sections, lints)
# 2: The about section should have a home, license and summary.
lint_about_contents(about_section, lints)
# 3a: The recipe should have some maintainers.
if not extra_section.get("recipe-maintainers", []):
lints.append(
"The recipe could do with some maintainers listed in "
"the `extra/recipe-maintainers` section."
)
# 3b: Maintainers should be a list
if not (
isinstance(extra_section.get("recipe-maintainers", []), Sequence)
and not isinstance(
extra_section.get("recipe-maintainers", []), str_type
)
):
lints.append("Recipe maintainers should be a json list.")
# 4: The recipe should have some tests.
if not any(key in TEST_KEYS for key in test_section):
a_test_file_exists = recipe_dir is not None and any(
os.path.exists(os.path.join(recipe_dir, test_file))
for test_file in TEST_FILES
)
if not a_test_file_exists:
has_outputs_test = False
no_test_hints = []
if outputs_section:
for out in outputs_section:
test_out = get_section(out, "test", lints)
if any(key in TEST_KEYS for key in test_out):
has_outputs_test = True
elif test_out.get("script", "").endswith((".bat", ".sh")):
has_outputs_test = True
else:
no_test_hints.append(
"It looks like the '{}' output doesn't "
"have any tests.".format(out.get("name", "???"))
)
if has_outputs_test:
hints.extend(no_test_hints)
else:
lints.append("The recipe must have some tests.")
# 5: License cannot be 'unknown.'
license = about_section.get("license", "").lower()
if "unknown" == license.strip():
lints.append("The recipe license cannot be unknown.")
# 6: Selectors should be in a tidy form.
if recipe_dir is not None and os.path.exists(meta_fname):
bad_selectors, bad_lines = [], []
pyXY_selectors_lint, pyXY_lines_lint = [], []
pyXY_selectors_hint, pyXY_lines_hint = [], []
# Good selectors look like ".*\s\s#\s[...]"
good_selectors_pat = re.compile(r"(.+?)\s{2,}#\s\[(.+)\](?(2).*)$")
# Look out for py27, py35 selectors; we prefer py==35
pyXY_selectors_pat = re.compile(r".+#\s*\[.*?(py\d{2,3}).*\]")
with io.open(meta_fname, "rt") as fh:
for selector_line, line_number in selector_lines(fh):
if not good_selectors_pat.match(selector_line):
bad_selectors.append(selector_line)
bad_lines.append(line_number)
pyXY_matches = pyXY_selectors_pat.match(selector_line)
if pyXY_matches:
for pyXY in pyXY_matches.groups():
if int(pyXY[2:]) in (27, 34, 35, 36):
# py27, py35 and so on are ok up to py36 (included); only warn
pyXY_selectors_hint.append(selector_line)
pyXY_lines_hint.append(line_number)
else:
pyXY_selectors_lint.append(selector_line)
pyXY_lines_lint.append(line_number)
if bad_selectors:
lints.append(
"Selectors are suggested to take a "
"``<two spaces>#<one space>[<expression>]`` form."
" See lines {}".format(bad_lines)
)
if pyXY_selectors_hint:
hints.append(
"Old-style Python selectors (py27, py34, py35, py36) are "
"deprecated. Instead, consider using the int ``py``. For "
"example: ``# [py>=36]``. See lines {}".format(pyXY_lines_hint)
)
if pyXY_selectors_lint:
lints.append(
"Old-style Python selectors (py27, py35, etc) are only available "
"for Python 2.7, 3.4, 3.5, and 3.6. Please use explicit comparisons "
"with the integer ``py``, e.g. ``# [py==37]`` or ``# [py>=37]``. "
"See lines {}".format(pyXY_lines_lint)
)
# 7: The build section should have a build number.
if build_section.get("number", None) is None:
lints.append("The recipe must have a `build/number` section.")
# 8: The build section should be before the run section in requirements.
seen_requirements = [
k for k in requirements_section if k in REQUIREMENTS_ORDER
]
requirements_order_sorted = sorted(
seen_requirements, key=REQUIREMENTS_ORDER.index
)
if seen_requirements != requirements_order_sorted:
lints.append(
"The `requirements/` sections should be defined "
"in the following order: "
+ ", ".join(REQUIREMENTS_ORDER)
+ "; instead saw: "
+ ", ".join(seen_requirements)
+ "."
)
# 9: Files downloaded should have a hash.
for source_section in sources_section:
if "url" in source_section and not (
{"sha1", "sha256", "md5"} & set(source_section.keys())
):
lints.append(
"When defining a source/url please add a sha256, sha1 "
"or md5 checksum (sha256 preferably)."
)
# 10: License should not include the word 'license'.
license = about_section.get("license", "").lower()
if (
"license" in license.lower()
and "unlicense" not in license.lower()
and "licenseref" not in license.lower()
and "-license" not in license.lower()
):
lints.append(
"The recipe `license` should not include the word " '"License".'
)
# 11: There should be one empty line at the end of the file.
if recipe_dir is not None and os.path.exists(meta_fname):
with io.open(meta_fname, "r") as f:
lines = f.read().split("\n")
# Count the number of empty lines from the end of the file
empty_lines = itertools.takewhile(lambda x: x == "", reversed(lines))
end_empty_lines_count = len(list(empty_lines))
if end_empty_lines_count > 1:
lints.append(
"There are {} too many lines. "
"There should be one empty line at the end of the "
"file.".format(end_empty_lines_count - 1)
)
elif end_empty_lines_count < 1:
lints.append(
"There are too few lines. There should be one empty "
"line at the end of the file."
)
# 12: License family must be valid (conda-build checks for that)
try:
ensure_valid_license_family(meta)
except RuntimeError as e:
lints.append(str(e))
# 12a: License family must be valid (conda-build checks for that)
license_family = about_section.get("license_family", license).lower()
license_file = about_section.get("license_file", None)
if not license_file and any(
f for f in NEEDED_FAMILIES if f in license_family
):
lints.append("license_file entry is missing, but is required.")
# 13: Check that the recipe name is valid
recipe_name = package_section.get("name", "").strip()
if re.match(r"^[a-z0-9_\-.]+$", recipe_name) is None:
lints.append(
"Recipe name has invalid characters. only lowercase alpha, numeric, "
"underscores, hyphens and dots allowed"
)
# 14: Run conda-forge specific lints
if conda_forge:
run_conda_forge_specific(meta, recipe_dir, lints, hints)
# 15: Check if we are using legacy patterns
build_reqs = requirements_section.get("build", None)
if build_reqs and ("numpy x.x" in build_reqs):
lints.append(
"Using pinned numpy packages is a deprecated pattern. Consider "
"using the method outlined "
"[here](https://conda-forge.org/docs/maintainer/knowledge_base.html#linking-numpy)."
)
# 16: Subheaders should be in the allowed subheadings
for section in major_sections:
expected_subsections = FIELDS.get(section, [])
if not expected_subsections:
continue
for subsection in get_section(meta, section, lints):
if (
section != "source"
and section != "outputs"
and subsection not in expected_subsections
):
lints.append(
"The {} section contained an unexpected "
"subsection name. {} is not a valid subsection"
" name.".format(section, subsection)
)
elif section == "source" or section == "outputs":
for source_subsection in subsection:
if source_subsection not in expected_subsections:
lints.append(
"The {} section contained an unexpected "
"subsection name. {} is not a valid subsection"
" name.".format(section, source_subsection)
)
# 17: Validate noarch
noarch_value = build_section.get("noarch")
if noarch_value is not None:
valid_noarch_values = ["python", "generic"]
if noarch_value not in valid_noarch_values:
valid_noarch_str = "`, `".join(valid_noarch_values)
lints.append(
"Invalid `noarch` value `{}`. Should be one of `{}`.".format(
noarch_value, valid_noarch_str
)
)
# 18: noarch doesn't work with selectors for runtime dependencies
if noarch_value is not None and os.path.exists(meta_fname):
with io.open(meta_fname, "rt") as fh:
in_runreqs = False
for line in fh:
line_s = line.strip()
if line_s == "host:" or line_s == "run:":
in_runreqs = True
runreqs_spacing = line[: -len(line.lstrip())]
continue
if line_s.startswith("skip:") and is_selector_line(line):
lints.append(
"`noarch` packages can't have selectors. If "
"the selectors are necessary, please remove "
"`noarch: {}`.".format(noarch_value)
)
break
if in_runreqs:
if runreqs_spacing == line[: -len(line.lstrip())]:
in_runreqs = False
continue
if is_selector_line(line):
lints.append(
"`noarch` packages can't have selectors. If "
"the selectors are necessary, please remove "
"`noarch: {}`.".format(noarch_value)
)
break
# 19: check version
if package_section.get("version") is not None:
ver = str(package_section.get("version"))
try:
conda_build.conda_interface.VersionOrder(ver)
except:
lints.append(
"Package version {} doesn't match conda spec".format(ver)
)
# 20: Jinja2 variable definitions should be nice.
if recipe_dir is not None and os.path.exists(meta_fname):
bad_jinja = []
bad_lines = []
# Good Jinja2 variable definitions look like "{% set .+ = .+ %}"
good_jinja_pat = re.compile(r"\s*\{%\s(set)\s[^\s]+\s=\s[^\s]+\s%\}")
with io.open(meta_fname, "rt") as fh:
for jinja_line, line_number in jinja_lines(fh):
if not good_jinja_pat.match(jinja_line):
bad_jinja.append(jinja_line)
bad_lines.append(line_number)
if bad_jinja:
lints.append(
"Jinja2 variable definitions are suggested to "
"take a ``{{%<one space>set<one space>"
"<variable name><one space>=<one space>"
"<expression><one space>%}}`` form. See lines "
"{}".format(bad_lines)
)
# 21: Legacy usage of compilers
if build_reqs and ("toolchain" in build_reqs):
lints.append(
"Using toolchain directly in this manner is deprecated. Consider "
"using the compilers outlined "
"[here](https://conda-forge.org/docs/maintainer/knowledge_base.html#compilers)."
)
# 22: Single space in pinned requirements
for section, requirements in requirements_section.items():
for requirement in requirements or []:
req, _, _ = requirement.partition("#")
if "{{" in req:
continue
parts = req.split()
if len(parts) > 2 and parts[1] in [
"!=",
"=",
"==",
">",
"<",
"<=",
">=",
]:
# check for too many spaces
lints.append(
(
"``requirements: {section}: {requirement}`` should not "
"contain a space between relational operator and the version, i.e. "
"``{name} {pin}``"
).format(
section=section,
requirement=requirement,
name=parts[0],
pin="".join(parts[1:]),
)
)
continue
# check that there is a space if there is a pin
bad_char_idx = [(parts[0].find(c), c) for c in "><="]
bad_char_idx = [bci for bci in bad_char_idx if bci[0] >= 0]
if bad_char_idx:
bad_char_idx.sort()
i = bad_char_idx[0][0]
lints.append(
(
"``requirements: {section}: {requirement}`` must "
"contain a space between the name and the pin, i.e. "
"``{name} {pin}``"
).format(
section=section,
requirement=requirement,
name=parts[0][:i],
pin=parts[0][i:] + "".join(parts[1:]),
)
)
continue
# 23: non noarch builds shouldn't use version constraints on python and r-base
check_languages = ["python", "r-base"]
host_reqs = requirements_section.get("host") or []
run_reqs = requirements_section.get("run") or []
for language in check_languages:
if noarch_value is None and not outputs_section:
filtered_host_reqs = [
req
for req in host_reqs
if req.partition(" ")[0] == str(language)
]
filtered_run_reqs = [
req
for req in run_reqs
if req.partition(" ")[0] == str(language)
]
if filtered_host_reqs and not filtered_run_reqs:
lints.append(
"If {0} is a host requirement, it should be a run requirement.".format(
str(language)
)
)
for reqs in [filtered_host_reqs, filtered_run_reqs]:
if str(language) in reqs:
continue
for req in reqs:
constraint = req.split(" ", 1)[1]
if constraint.startswith(">") or constraint.startswith(
"<"
):
lints.append(
"Non noarch packages should have {0} requirement without any version constraints.".format(
str(language)
)
)
# 24: jinja2 variable references should be {{<one space>var<one space>}}
if recipe_dir is not None and os.path.exists(meta_fname):
bad_vars = []
bad_lines = []
with io.open(meta_fname, "rt") as fh:
for i, line in enumerate(fh.readlines()):
for m in JINJA_VAR_PAT.finditer(line):
if m.group(1) is not None:
var = m.group(1)
if var != " %s " % var.strip():
bad_vars.append(m.group(1).strip())
bad_lines.append(i + 1)
if bad_vars:
hints.append(
"Jinja2 variable references are suggested to "
"take a ``{{<one space><variable name><one space>}}``"
" form. See lines %s." % (bad_lines,)
)
# 25: require a lower bound on python version
if noarch_value == "python" and not outputs_section:
for req in run_reqs:
if (req.strip().split()[0] == "python") and (req != "python"):
break
else:
lints.append(
"noarch: python recipes are required to have a lower bound "
"on the python version. Typically this means putting "
"`python >=3.6` in **both** `host` and `run` but you should check "
"upstream for the package's Python compatibility."
)
# hints
# 1: suggest pip
if "script" in build_section:
scripts = build_section["script"]
if isinstance(scripts, str):
scripts = [scripts]
for script in scripts:
if "python setup.py install" in script:
hints.append(
"Whenever possible python packages should use pip. "
"See https://conda-forge.org/docs/maintainer/adding_pkgs.html#use-pip"
)
# 2: suggest python noarch (skip on feedstocks)
if (
noarch_value is None
and build_reqs
and not any(["_compiler_stub" in b for b in build_reqs])
and ("pip" in build_reqs)
and (is_staged_recipes or not conda_forge)
):
with io.open(meta_fname, "rt") as fh:
in_runreqs = False
no_arch_possible = True
for line in fh:
line_s = line.strip()
if line_s == "host:" or line_s == "run:":
in_runreqs = True
runreqs_spacing = line[: -len(line.lstrip())]
continue
if line_s.startswith("skip:") and is_selector_line(line):
no_arch_possible = False
break
if in_runreqs:
if runreqs_spacing == line[: -len(line.lstrip())]:
in_runreqs = False
continue
if is_selector_line(line):
no_arch_possible = False
break
if no_arch_possible:
hints.append(
"Whenever possible python packages should use noarch. "
"See https://conda-forge.org/docs/maintainer/knowledge_base.html#noarch-builds"
)
# 3: suggest fixing all recipe/*.sh shellcheck findings
shellcheck_enabled = False
shell_scripts = []
if recipe_dir:
shell_scripts = glob(os.path.join(recipe_dir, "*.sh"))
# support feedstocks and staged-recipes
forge_yaml = glob(
os.path.join(recipe_dir, "..", "conda-forge.yml")
) or glob(
os.path.join(recipe_dir, "..", "..", "conda-forge.yml"),
)
if shell_scripts and forge_yaml:
with open(forge_yaml[0], "r") as fh:
code = get_yaml().load(fh)
shellcheck_enabled = code.get("shellcheck", {}).get(
"enabled", shellcheck_enabled
)
if shellcheck_enabled and shutil.which("shellcheck") and shell_scripts:
MAX_SHELLCHECK_LINES = 50
cmd = [
"shellcheck",
"--enable=all",
"--shell=bash",
# SC2154: var is referenced but not assigned,
# see https://github.com/koalaman/shellcheck/wiki/SC2154
"--exclude=SC2154",
]
p = subprocess.Popen(
cmd + shell_scripts,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
env={
"PATH": os.getenv("PATH")
}, # exclude other env variables to protect against token leakage
)
sc_stdout, _ = p.communicate()
if p.returncode == 1:
# All files successfully scanned with some issues.
findings = (
sc_stdout.decode(sys.stdout.encoding)
.replace("\r\n", "\n")
.splitlines()
)
hints.append(
"Whenever possible fix all shellcheck findings ('"
+ " ".join(cmd)
+ " recipe/*.sh -f diff | git apply' helps)"
)
hints.extend(findings[:50])
if len(findings) > MAX_SHELLCHECK_LINES:
hints.append(
"Output restricted, there are '%s' more lines."
% (len(findings) - MAX_SHELLCHECK_LINES)
)
elif p.returncode != 0:
# Something went wrong.
hints.append(
"There have been errors while scanning with shellcheck."
)
# 4: Check for SPDX
import license_expression
license = about_section.get("license", "")
licensing = license_expression.Licensing()
parsed_exceptions = []
try:
parsed_licenses = []
parsed_licenses_with_exception = licensing.license_symbols(
license.strip(), decompose=False
)
for l in parsed_licenses_with_exception:
if isinstance(l, license_expression.LicenseWithExceptionSymbol):
parsed_licenses.append(l.license_symbol.key)
parsed_exceptions.append(l.exception_symbol.key)
else:
parsed_licenses.append(l.key)
except license_expression.ExpressionError:
parsed_licenses = [license]
licenseref_regex = re.compile(r"^LicenseRef[a-zA-Z0-9\-.]*$")
filtered_licenses = []
for license in parsed_licenses:
if not licenseref_regex.match(license):
filtered_licenses.append(license)
with open(
os.path.join(os.path.dirname(__file__), "licenses.txt"), "r"
) as f:
expected_licenses = f.readlines()
expected_licenses = set([l.strip() for l in expected_licenses])
with open(
os.path.join(os.path.dirname(__file__), "license_exceptions.txt"), "r"
) as f:
expected_exceptions = f.readlines()
expected_exceptions = set([l.strip() for l in expected_exceptions])
if set(filtered_licenses) - expected_licenses:
hints.append(
"License is not an SPDX identifier (or a custom LicenseRef) nor an SPDX license expression.\n\n"
"Documentation on acceptable licenses can be found "
"[here]( https://conda-forge.org/docs/maintainer/adding_pkgs.html#spdx-identifiers-and-expressions )."
)
if set(parsed_exceptions) - expected_exceptions:
hints.append(
"License exception is not an SPDX exception.\n\n"
"Documentation on acceptable licenses can be found "
"[here]( https://conda-forge.org/docs/maintainer/adding_pkgs.html#spdx-identifiers-and-expressions )."
)
return lints, hints
|
43,217 |
def op(
name: Optional[Union[Callable[..., Any], str]] = None,
description: Optional[str] = None,
ins: Optional[Dict[str, In]] = None,
out: Optional[Union[Out, Dict[str, Out]]] = None,
config_schema: Optional[ConfigSchemaType] = None,
required_resource_keys: Optional[Set[str]] = None,
tags: Optional[Dict[str, Any]] = None,
version: Optional[str] = None,
retry_policy: Optional[RetryPolicy] = None,
input_defs: Optional[List[InputDefinition]] = None,
output_defs: Optional[List[OutputDefinition]] = None,
) -> Union[OpDefinition, _Op]:
"""
Create an op with the specified parameters from the decorated function.
Ins and outs will be inferred from the type signature of the decorated function
if not explicitly provided.
The decorated function will be used as the op's compute function. The signature of the
decorated function is more flexible than that of the ``compute_fn`` in the core API; it may:
1. Return a value. This value will be wrapped in an :py:class:`Output` and yielded by the compute function.
2. Return an :py:class:`Output`. This output will be yielded by the compute function.
3. Yield :py:class:`Output` or other :ref:`event objects <events>`. Same as default compute behavior.
Note that options 1) and 2) are incompatible with yielding other events -- if you would like
to decorate a function that yields events, it must also wrap its eventual output in an
:py:class:`Output` and yield it.
@op supports ``async def`` functions as well, including async generators when yielding multiple
events or outputs. Note that async ops will generally be run on their own unless using a custom
:py:class:`Executor` implementation that supports running them together.
Args:
name (Optional[str]): Name of op. Must be unique within any :py:class:`GraphDefinition`
using the op.
description (Optional[str]): Human-readable description of this op. If not provided, and
the decorated function has docstring, that docstring will be used as the description.
ins (Optional[Dict[str, In]]):
Information about the inputs to the op. Information provided here will be combined
with what can be inferred from the function signature.
out (Optional[Union[Out, Dict[str, Out]]]):
Information about the op outputs. Information provided here will be combined with
what can be inferred from the return type signature if the function does not use yield.
config_schema (Optional[ConfigSchema): The schema for the config. If set, Dagster will check
that config provided for the op matches this schema and fail if it does not. If not
set, Dagster will accept any config provided for the op.
required_resource_keys (Optional[Set[str]]): Set of resource handles required by this op.
tags (Optional[Dict[str, Any]]): Arbitrary metadata for the op. Frameworks may
expect and require certain metadata to be attached to a op. Values that are not strings
will be json encoded and must meet the criteria that `json.loads(json.dumps(value)) == value`.
version (Optional[str]): (Experimental) The version of the op's compute_fn. Two ops should have
the same version if and only if they deterministically produce the same outputs when
provided the same inputs.
retry_policy (Optional[RetryPolicy]): The retry policy for this op.
input_defs (Optional[List[InputDefinition]]):
(legacy) Preserved to ease migration from :py:class:`solid`. Can be used in place of ins argument.
output_defs (Optional[List[OutputDefinition]]):
(legacy) Preserved to ease migration from :py:class:`solid`. Can be used in place of out argument.
Examples:
.. code-block:: python
@op
def hello_world():
print('hello')
@op
def echo(msg: str) -> str:
return msg
@op(
ins={'msg': In(str)},
out=Out(str)
)
def echo_2(msg): # same as above
return msg
@op(
out={'word': Out(), 'num': Out()}
)
def multi_out() -> Tuple[str, int]:
return 'cool', 4
"""
# This case is for when decorator is used bare, without arguments. e.g. @op versus @op()
if callable(name):
check.invariant(input_defs is None)
check.invariant(output_defs is None)
check.invariant(description is None)
check.invariant(config_schema is None)
check.invariant(required_resource_keys is None)
check.invariant(tags is None)
check.invariant(version is None)
return _Op()(name)
return _Op(
name=name,
description=description,
input_defs=input_defs,
output_defs=output_defs,
config_schema=config_schema,
required_resource_keys=required_resource_keys,
tags=tags,
version=version,
retry_policy=retry_policy,
ins=ins,
out=out,
)
|
def op(
name: Optional[Union[Callable[..., Any], str]] = None,
description: Optional[str] = None,
ins: Optional[Dict[str, In]] = None,
out: Optional[Union[Out, Dict[str, Out]]] = None,
config_schema: Optional[ConfigSchemaType] = None,
required_resource_keys: Optional[Set[str]] = None,
tags: Optional[Dict[str, Any]] = None,
version: Optional[str] = None,
retry_policy: Optional[RetryPolicy] = None,
input_defs: Optional[List[InputDefinition]] = None,
output_defs: Optional[List[OutputDefinition]] = None,
) -> Union["OpDefinition", _Op]:
"""
Create an op with the specified parameters from the decorated function.
Ins and outs will be inferred from the type signature of the decorated function
if not explicitly provided.
The decorated function will be used as the op's compute function. The signature of the
decorated function is more flexible than that of the ``compute_fn`` in the core API; it may:
1. Return a value. This value will be wrapped in an :py:class:`Output` and yielded by the compute function.
2. Return an :py:class:`Output`. This output will be yielded by the compute function.
3. Yield :py:class:`Output` or other :ref:`event objects <events>`. Same as default compute behavior.
Note that options 1) and 2) are incompatible with yielding other events -- if you would like
to decorate a function that yields events, it must also wrap its eventual output in an
:py:class:`Output` and yield it.
@op supports ``async def`` functions as well, including async generators when yielding multiple
events or outputs. Note that async ops will generally be run on their own unless using a custom
:py:class:`Executor` implementation that supports running them together.
Args:
name (Optional[str]): Name of op. Must be unique within any :py:class:`GraphDefinition`
using the op.
description (Optional[str]): Human-readable description of this op. If not provided, and
the decorated function has docstring, that docstring will be used as the description.
ins (Optional[Dict[str, In]]):
Information about the inputs to the op. Information provided here will be combined
with what can be inferred from the function signature.
out (Optional[Union[Out, Dict[str, Out]]]):
Information about the op outputs. Information provided here will be combined with
what can be inferred from the return type signature if the function does not use yield.
config_schema (Optional[ConfigSchema): The schema for the config. If set, Dagster will check
that config provided for the op matches this schema and fail if it does not. If not
set, Dagster will accept any config provided for the op.
required_resource_keys (Optional[Set[str]]): Set of resource handles required by this op.
tags (Optional[Dict[str, Any]]): Arbitrary metadata for the op. Frameworks may
expect and require certain metadata to be attached to a op. Values that are not strings
will be json encoded and must meet the criteria that `json.loads(json.dumps(value)) == value`.
version (Optional[str]): (Experimental) The version of the op's compute_fn. Two ops should have
the same version if and only if they deterministically produce the same outputs when
provided the same inputs.
retry_policy (Optional[RetryPolicy]): The retry policy for this op.
input_defs (Optional[List[InputDefinition]]):
(legacy) Preserved to ease migration from :py:class:`solid`. Can be used in place of ins argument.
output_defs (Optional[List[OutputDefinition]]):
(legacy) Preserved to ease migration from :py:class:`solid`. Can be used in place of out argument.
Examples:
.. code-block:: python
@op
def hello_world():
print('hello')
@op
def echo(msg: str) -> str:
return msg
@op(
ins={'msg': In(str)},
out=Out(str)
)
def echo_2(msg): # same as above
return msg
@op(
out={'word': Out(), 'num': Out()}
)
def multi_out() -> Tuple[str, int]:
return 'cool', 4
"""
# This case is for when decorator is used bare, without arguments. e.g. @op versus @op()
if callable(name):
check.invariant(input_defs is None)
check.invariant(output_defs is None)
check.invariant(description is None)
check.invariant(config_schema is None)
check.invariant(required_resource_keys is None)
check.invariant(tags is None)
check.invariant(version is None)
return _Op()(name)
return _Op(
name=name,
description=description,
input_defs=input_defs,
output_defs=output_defs,
config_schema=config_schema,
required_resource_keys=required_resource_keys,
tags=tags,
version=version,
retry_policy=retry_policy,
ins=ins,
out=out,
)
|
55,808 |
def run(dataset: Dataset, config: TaskConfig):
log.info(f"\n**** MLNet [v{config.framework_version}] ****\n")
avaible_task_list = ['classification', 'regression']
if config.type not in avaible_task_list:
raise ValueError(f'{config.type} is not supported.')
dir_path = os.path.dirname(os.path.realpath(__file__))
DOTNET_INSTALL_DIR = os.path.join(dir_path, 'lib')
os.environ['DOTNET_ROOT'] = DOTNET_INSTALL_DIR
os.environ['MLNetCLIEnablePredict'] = 'True'
threads_count_per_core = psutil.cpu_count() / psutil.cpu_count(logical=False)
os.environ['MLNET_MAX_THREAD'] = str(config.cores * threads_count_per_core)
mlnet = os.path.join(DOTNET_INSTALL_DIR, 'mlnet')
train_time_in_seconds = config.max_runtime_seconds
sub_command = config.type
# set up MODELBUILDER_AUTOML
MODELBUILDER_AUTOML = config.framework_params.get('automl_type', 'NNI')
os.environ['MODELBUILDER_AUTOML'] = MODELBUILDER_AUTOML
artifacts = config.framework_params.get('_save_artifacts', [])
allowedMultiClassifiers = config.framework_params.get('classifiers','LGBM;RF;FASTTREE;LBFGS;SDCA')
allowedRegressors = config.framework_params.get('regressors','LGBM;RF;FASTTREE;LBFGS;SDCA')
os.environ['AutoMLServiceMultiClassifiers'] = allowedMultiClassifiers
os.environ['AutoMLServiceRegressors'] = allowedRegressors
tmpdir = tempfile.mkdtemp()
tmp_output_folder = os.path.join(tmpdir, str(config.fold))
output_dir = output_subdir('models', config=config) if 'models' in artifacts else tmp_output_folder
log_dir = output_subdir('logs', config=config) if 'logs' in artifacts else tmp_output_folder
log_path = os.path.join(log_dir, 'log.txt')
try:
label = dataset.target.index
train_dataset_path = dataset.train.data_path('csv')
test_dataset_path = dataset.test.data_path('csv')
log.info(f'train dataset: {train_dataset_path}')
log.info(f'test dataset: {test_dataset_path}')
cmd = (f"{mlnet} {sub_command}"
f" --dataset {train_dataset_path} --test-dataset {test_dataset_path} --train-time {train_time_in_seconds}"
f" --label-col {label} --output {os.path.dirname(output_dir)} --name {config.fold}"
f" --verbosity q --log-file-path {log_path}")
with Timer() as training:
run_cmd(cmd)
train_result_json = os.path.join(output_dir, '{}.mbconfig'.format(config.fold))
if not os.path.exists(train_result_json):
raise NoResultError("MLNet failed producing any prediction.")
with open(train_result_json, 'r') as f:
json_str = f.read()
mb_config = json.loads(json_str)
model_path = os.path.join(output_dir, f"{config.fold}.zip")
output_prediction_path = os.path.join(log_dir, "prediction.txt") # keeping this in log dir as it contains useful error when prediction fails
models_count = len(mb_config['RunHistory']['Trials'])
# predict
predict_cmd = (f"{mlnet} predict --task-type {config.type}"
f" --model {model_path} --dataset {test_dataset_path} > {output_prediction_path}")
with Timer() as prediction:
run_cmd(predict_cmd)
if config.type == 'classification':
prediction_df = pd.read_csv(output_prediction_path, dtype={'PredictedLabel': 'object'})
save_predictions(
dataset=dataset,
output_file=config.output_predictions_file,
predictions=prediction_df['PredictedLabel'].values,
truth=dataset.test.y,
probabilities=prediction_df.values[:,:-1],
probabilities_labels=list(prediction_df.columns.values[:-1]),
)
if config.type == 'regression':
prediction_df = pd.read_csv(output_prediction_path)
save_predictions(
dataset=dataset,
output_file=config.output_predictions_file,
predictions=prediction_df['Score'].values,
truth=dataset.test.y,
)
return dict(
models_count=models_count,
training_duration=training.duration,
predict_duration=prediction.duration,
)
finally:
if 'logs' in artifacts:
logs_zip = os.path.join(log_dir, "logs.zip")
zip_path(log_dir, logs_zip)
clean_dir(log_dir, filtr=lambda p: p != logs_zip)
if 'models' in artifacts:
models_zip = os.path.join(output_dir, "models.zip")
zip_path(output_dir, models_zip)
clean_dir(output_dir, filtr=lambda p: p != models_zip)
shutil.rmtree(tmpdir, ignore_errors=True)
|
def run(dataset: Dataset, config: TaskConfig):
log.info(f"\n**** MLNet [v{config.framework_version}] ****\n")
avaible_task_list = ['classification', 'regression']
if config.type not in avaible_task_list:
raise ValueError(f'{config.type} is not supported.')
dir_path = os.path.dirname(os.path.realpath(__file__))
DOTNET_INSTALL_DIR = os.path.join(dir_path, 'lib')
os.environ['DOTNET_ROOT'] = DOTNET_INSTALL_DIR
os.environ['MLNetCLIEnablePredict'] = 'True'
threads_count_per_core = psutil.cpu_count() / psutil.cpu_count(logical=False)
os.environ['MLNET_MAX_THREAD'] = str(config.cores * threads_count_per_core)
mlnet = os.path.join(DOTNET_INSTALL_DIR, 'mlnet')
train_time_in_seconds = config.max_runtime_seconds
sub_command = config.type
# set up MODELBUILDER_AUTOML
MODELBUILDER_AUTOML = config.framework_params.get('automl_type', 'NNI')
os.environ['MODELBUILDER_AUTOML'] = MODELBUILDER_AUTOML
artifacts = config.framework_params.get('_save_artifacts', [])
allowedMultiClassifiers = config.framework_params.get('_classifiers')
allowedRegressors = config.framework_params.get('_regressors')
if allowedMultiClassifiers is not None:
os.environ['AutoMLServiceMultiClassifiers'] = allowedMultiClassifiers
if allowedRegressors is not None:
os.environ['AutoMLServiceRegressors'] = allowedRegressors
tmpdir = tempfile.mkdtemp()
tmp_output_folder = os.path.join(tmpdir, str(config.fold))
output_dir = output_subdir('models', config=config) if 'models' in artifacts else tmp_output_folder
log_dir = output_subdir('logs', config=config) if 'logs' in artifacts else tmp_output_folder
log_path = os.path.join(log_dir, 'log.txt')
try:
label = dataset.target.index
train_dataset_path = dataset.train.data_path('csv')
test_dataset_path = dataset.test.data_path('csv')
log.info(f'train dataset: {train_dataset_path}')
log.info(f'test dataset: {test_dataset_path}')
cmd = (f"{mlnet} {sub_command}"
f" --dataset {train_dataset_path} --test-dataset {test_dataset_path} --train-time {train_time_in_seconds}"
f" --label-col {label} --output {os.path.dirname(output_dir)} --name {config.fold}"
f" --verbosity q --log-file-path {log_path}")
with Timer() as training:
run_cmd(cmd)
train_result_json = os.path.join(output_dir, '{}.mbconfig'.format(config.fold))
if not os.path.exists(train_result_json):
raise NoResultError("MLNet failed producing any prediction.")
with open(train_result_json, 'r') as f:
json_str = f.read()
mb_config = json.loads(json_str)
model_path = os.path.join(output_dir, f"{config.fold}.zip")
output_prediction_path = os.path.join(log_dir, "prediction.txt") # keeping this in log dir as it contains useful error when prediction fails
models_count = len(mb_config['RunHistory']['Trials'])
# predict
predict_cmd = (f"{mlnet} predict --task-type {config.type}"
f" --model {model_path} --dataset {test_dataset_path} > {output_prediction_path}")
with Timer() as prediction:
run_cmd(predict_cmd)
if config.type == 'classification':
prediction_df = pd.read_csv(output_prediction_path, dtype={'PredictedLabel': 'object'})
save_predictions(
dataset=dataset,
output_file=config.output_predictions_file,
predictions=prediction_df['PredictedLabel'].values,
truth=dataset.test.y,
probabilities=prediction_df.values[:,:-1],
probabilities_labels=list(prediction_df.columns.values[:-1]),
)
if config.type == 'regression':
prediction_df = pd.read_csv(output_prediction_path)
save_predictions(
dataset=dataset,
output_file=config.output_predictions_file,
predictions=prediction_df['Score'].values,
truth=dataset.test.y,
)
return dict(
models_count=models_count,
training_duration=training.duration,
predict_duration=prediction.duration,
)
finally:
if 'logs' in artifacts:
logs_zip = os.path.join(log_dir, "logs.zip")
zip_path(log_dir, logs_zip)
clean_dir(log_dir, filtr=lambda p: p != logs_zip)
if 'models' in artifacts:
models_zip = os.path.join(output_dir, "models.zip")
zip_path(output_dir, models_zip)
clean_dir(output_dir, filtr=lambda p: p != models_zip)
shutil.rmtree(tmpdir, ignore_errors=True)
|
5,393 |
def install(name=None, sources=None, saltenv="base", **kwargs):
"""
Install the passed package. Can install packages from the following
sources:
* Locally (package already exists on the minion
* HTTP/HTTPS server
* FTP server
* Salt master
Returns a dict containing the new package names and versions:
.. code-block:: python
{'<package>': {'old': '<old-version>',
'new': '<new-version>'}}
CLI Examples:
.. code-block:: bash
# Installing a data stream pkg that already exists on the minion
salt '*' pkg.install sources='[{"<pkg name>": "/dir/on/minion/<pkg filename>"}]'
salt '*' pkg.install sources='[{"SMClgcc346": "/var/spool/pkg/gcc-3.4.6-sol10-sparc-local.pkg"}]'
# Installing a data stream pkg that exists on the salt master
salt '*' pkg.install sources='[{"<pkg name>": "salt://pkgs/<pkg filename>"}]'
salt '*' pkg.install sources='[{"SMClgcc346": "salt://pkgs/gcc-3.4.6-sol10-sparc-local.pkg"}]'
CLI Example:
.. code-block:: bash
# Installing a data stream pkg that exists on a HTTP server
salt '*' pkg.install sources='[{"<pkg name>": "http://packages.server.com/<pkg filename>"}]'
salt '*' pkg.install sources='[{"SMClgcc346": "http://packages.server.com/gcc-3.4.6-sol10-sparc-local.pkg"}]'
If working with solaris zones and you want to install a package only in the
global zone you can pass 'current_zone_only=True' to salt to have the
package only installed in the global zone. (Behind the scenes this is
passing '-G' to the pkgadd command.) Solaris default when installing a
package in the global zone is to install it in all zones. This overrides
that and installs the package only in the global.
CLI Example:
.. code-block:: bash
# Installing a data stream package only in the global zone:
salt 'global_zone' pkg.install sources='[{"SMClgcc346": "/var/spool/pkg/gcc-3.4.6-sol10-sparc-local.pkg"}]' current_zone_only=True
By default salt automatically provides an adminfile, to automate package
installation, with these options set::
email=
instance=quit
partial=nocheck
runlevel=nocheck
idepend=nocheck
rdepend=nocheck
space=nocheck
setuid=nocheck
conflict=nocheck
action=nocheck
basedir=default
You can override any of these options in two ways. First you can optionally
pass any of the options as a kwarg to the module/state to override the
default value or you can optionally pass the 'admin_source' option
providing your own adminfile to the minions.
Note: You can find all of the possible options to provide to the adminfile
by reading the admin man page:
.. code-block:: bash
man -s 4 admin
CLI Example:
.. code-block:: bash
# Overriding the 'instance' adminfile option when calling the module directly
salt '*' pkg.install sources='[{"<pkg name>": "salt://pkgs/<pkg filename>"}]' instance="overwrite"
SLS Example:
.. code-block:: yaml
# Overriding the 'instance' adminfile option when used in a state
SMClgcc346:
pkg.installed:
- sources:
- SMClgcc346: salt://srv/salt/pkgs/gcc-3.4.6-sol10-sparc-local.pkg
- instance: overwrite
.. note::
The ID declaration is ignored, as the package name is read from the
``sources`` parameter.
CLI Example:
.. code-block:: bash
# Providing your own adminfile when calling the module directly
salt '*' pkg.install sources='[{"<pkg name>": "salt://pkgs/<pkg filename>"}]' admin_source='salt://pkgs/<adminfile filename>'
# Providing your own adminfile when using states
<pkg name>:
pkg.installed:
- sources:
- <pkg name>: salt://pkgs/<pkg filename>
- admin_source: salt://pkgs/<adminfile filename>
.. note::
The ID declaration is ignored, as the package name is read from the
``sources`` parameter.
"""
if salt.utils.data.is_true(kwargs.get("refresh")):
log.warning("'refresh' argument not implemented for solarispkg " "module")
# pkgs is not supported, but must be passed here for API compatibility
pkgs = kwargs.pop("pkgs", None)
try:
pkg_params, pkg_type = __salt__["pkg_resource.parse_targets"](
name, pkgs, sources, **kwargs
)
except MinionError as exc:
raise CommandExecutionError(exc)
if pkg_params is None or len(pkg_params) == 0:
return {}
if not sources:
log.error('"sources" param required for solaris pkg_add installs')
return {}
try:
if "admin_source" in kwargs:
adminfile = __salt__["cp.cache_file"](kwargs["admin_source"], saltenv)
else:
adminfile = _write_adminfile(kwargs)
old = list_pkgs()
cmd_prefix = ["/usr/sbin/pkgadd", "-n", "-a", adminfile]
# Only makes sense in a global zone but works fine in non-globals.
if (
kwargs.get("current_zone_only") is True
or kwargs.get("current_zone_only") == "True"
):
cmd_prefix += "-G "
errors = []
for pkg in pkg_params:
cmd = cmd_prefix + ["-d", pkg, "all"]
# Install the package{s}
out = __salt__["cmd.run_all"](
cmd, output_loglevel="trace", python_shell=False
)
if out["retcode"] != 0 and out["stderr"]:
errors.append(out["stderr"])
__context__.pop("pkg.list_pkgs", None)
new = list_pkgs()
ret = salt.utils.data.compare_dicts(old, new)
if errors:
raise CommandExecutionError(
"Problem encountered installing package(s)",
info={"errors": errors, "changes": ret},
)
finally:
# Remove the temp adminfile
if "admin_source" not in kwargs:
try:
os.remove(adminfile)
except (NameError, OSError):
pass
return ret
|
def install(name=None, sources=None, saltenv="base", **kwargs):
"""
Install the passed package. Can install packages from the following
sources:
* Locally (package already exists on the minion
* HTTP/HTTPS server
* FTP server
* Salt master
Returns a dict containing the new package names and versions:
.. code-block:: python
{'<package>': {'old': '<old-version>',
'new': '<new-version>'}}
CLI Examples:
.. code-block:: bash
# Installing a data stream pkg that already exists on the minion
salt '*' pkg.install sources='[{"<pkg name>": "/dir/on/minion/<pkg filename>"}]'
salt '*' pkg.install sources='[{"SMClgcc346": "/var/spool/pkg/gcc-3.4.6-sol10-sparc-local.pkg"}]'
# Installing a data stream pkg that exists on the salt master
salt '*' pkg.install sources='[{"<pkg name>": "salt://pkgs/<pkg filename>"}]'
salt '*' pkg.install sources='[{"SMClgcc346": "salt://pkgs/gcc-3.4.6-sol10-sparc-local.pkg"}]'
CLI Example:
.. code-block:: bash
# Installing a data stream pkg that exists on a HTTP server
salt '*' pkg.install sources='[{"<pkg name>": "http://packages.server.com/<pkg filename>"}]'
salt '*' pkg.install sources='[{"SMClgcc346": "http://packages.server.com/gcc-3.4.6-sol10-sparc-local.pkg"}]'
If working with solaris zones and you want to install a package only in the
global zone you can pass 'current_zone_only=True' to salt to have the
package only installed in the global zone. (Behind the scenes this is
passing '-G' to the pkgadd command.) Solaris default when installing a
package in the global zone is to install it in all zones. This overrides
that and installs the package only in the global.
CLI Example:
.. code-block:: bash
# Installing a data stream package only in the global zone:
salt 'global_zone' pkg.install sources='[{"SMClgcc346": "/var/spool/pkg/gcc-3.4.6-sol10-sparc-local.pkg"}]' current_zone_only=True
By default salt automatically provides an adminfile, to automate package
installation, with these options set::
email=
instance=quit
partial=nocheck
runlevel=nocheck
idepend=nocheck
rdepend=nocheck
space=nocheck
setuid=nocheck
conflict=nocheck
action=nocheck
basedir=default
You can override any of these options in two ways. First you can optionally
pass any of the options as a kwarg to the module/state to override the
default value or you can optionally pass the 'admin_source' option
providing your own adminfile to the minions.
Note: You can find all of the possible options to provide to the adminfile
by reading the admin man page:
.. code-block:: bash
man -s 4 admin
CLI Example:
.. code-block:: bash
# Overriding the 'instance' adminfile option when calling the module directly
salt '*' pkg.install sources='[{"<pkg name>": "salt://pkgs/<pkg filename>"}]' instance="overwrite"
SLS Example:
.. code-block:: yaml
# Overriding the 'instance' adminfile option when used in a state
SMClgcc346:
pkg.installed:
- sources:
- SMClgcc346: salt://srv/salt/pkgs/gcc-3.4.6-sol10-sparc-local.pkg
- instance: overwrite
.. note::
The ID declaration is ignored, as the package name is read from the
``sources`` parameter.
CLI Example:
.. code-block:: bash
# Providing your own adminfile when calling the module directly
salt '*' pkg.install sources='[{"<pkg name>": "salt://pkgs/<pkg filename>"}]' admin_source='salt://pkgs/<adminfile filename>'
# Providing your own adminfile when using states
<pkg name>:
pkg.installed:
- sources:
- <pkg name>: salt://pkgs/<pkg filename>
- admin_source: salt://pkgs/<adminfile filename>
.. note::
The ID declaration is ignored, as the package name is read from the
``sources`` parameter.
"""
if salt.utils.data.is_true(kwargs.get("refresh")):
log.warning("'refresh' argument not implemented for solarispkg " "module")
# pkgs is not supported, but must be passed here for API compatibility
pkgs = kwargs.pop("pkgs", None)
try:
pkg_params, pkg_type = __salt__["pkg_resource.parse_targets"](
name, pkgs, sources, **kwargs
)
except MinionError as exc:
raise CommandExecutionError(exc)
if pkg_params is None or len(pkg_params) == 0:
return {}
if not sources:
log.error('"sources" param required for solaris pkg_add installs')
return {}
try:
if "admin_source" in kwargs:
adminfile = __salt__["cp.cache_file"](kwargs["admin_source"], saltenv)
else:
adminfile = _write_adminfile(kwargs)
old = list_pkgs()
cmd_prefix = ["/usr/sbin/pkgadd", "-n", "-a", adminfile]
# Only makes sense in a global zone but works fine in non-globals.
if kwargs.get("current_zone_only") in (True, "True"):
cmd_prefix += "-G "
errors = []
for pkg in pkg_params:
cmd = cmd_prefix + ["-d", pkg, "all"]
# Install the package{s}
out = __salt__["cmd.run_all"](
cmd, output_loglevel="trace", python_shell=False
)
if out["retcode"] != 0 and out["stderr"]:
errors.append(out["stderr"])
__context__.pop("pkg.list_pkgs", None)
new = list_pkgs()
ret = salt.utils.data.compare_dicts(old, new)
if errors:
raise CommandExecutionError(
"Problem encountered installing package(s)",
info={"errors": errors, "changes": ret},
)
finally:
# Remove the temp adminfile
if "admin_source" not in kwargs:
try:
os.remove(adminfile)
except (NameError, OSError):
pass
return ret
|
6,213 |
def getAuthorisationServerMetadata(issuer=None):
""" Get authoraisation server metadata
:return: S_OK(dict)/S_ERROR()
"""
data = {'issuer': issuer}
result = gConfig.getSections('/DIRAC')
if result['OK']:
if 'Authorization' in result['Value']:
result = gConfig.getOptionsDictRecursively('/DIRAC/Authorization')
if result['OK']:
data.update(result['Value'])
if not result['OK']:
return result
if not data['issuer']:
data['issuer'] = getAuthAPI()
if not data['issuer']:
return S_ERROR('No issuer found in DIRAC authorization server configuration.')
# Search values with type list
for key, v in data.items():
data[key] = [e for e in v.replace(', ', ',').split(',') if e] if ',' in v else v
return S_OK(data)
|
def getAuthorisationServerMetadata(issuer=None):
""" Get authorization server metadata
:return: S_OK(dict)/S_ERROR()
"""
data = {'issuer': issuer}
result = gConfig.getSections('/DIRAC')
if result['OK']:
if 'Authorization' in result['Value']:
result = gConfig.getOptionsDictRecursively('/DIRAC/Authorization')
if result['OK']:
data.update(result['Value'])
if not result['OK']:
return result
if not data['issuer']:
data['issuer'] = getAuthAPI()
if not data['issuer']:
return S_ERROR('No issuer found in DIRAC authorization server configuration.')
# Search values with type list
for key, v in data.items():
data[key] = [e for e in v.replace(', ', ',').split(',') if e] if ',' in v else v
return S_OK(data)
|
13,592 |
def rand_QB(A, target_rank=None, distribution='normal', oversampling=0, powerIterations=0):
"""
randomisierte QB-Zerlegung
See Algorithm 3.1 in [EMKB19]_.
Parameters
----------
A :
The |VectorArray| for which the randomized QB Decomposition is to be computed.
target_rank : int
The desired rank for the decomposition. If None rank = len(A).
distribution : str
Distribution used for the random projectionmatrix Omega. (`'normal'` or `'uniform'`)
oversampling : int
Oversamplingparameter. Number of extra columns of the projectionmatrix.
powerIterations : int
Number of power Iterations.
Returns
-------
Q :
|VectorArray| containig an approximate optimal Basis for the Image of the Inputmatrix A.
len(Q) = target_rank
B :
Numpy Array. Projection of the Input Matrix into the lower dimensional subspace.
"""
assert isinstance(A, VectorArray)
assert target_rank is None or target_rank <= len(A)
assert distribution in ('normal', 'uniform')
if A.dim == 0 or len(A) == 0:
return A.space.zeros(), np.zeros((target_rank, len(A)))
rank = len(A) if target_rank is None else target_rank + oversampling
target_rank = len(A) if target_rank is None else target_rank
Omega = np.random.normal(0, 1, (rank, len(A))) if distribution == 'normal' else np.random.rand(rank, len(A))
Y = A.lincomb(Omega)[:target_rank]
# Power Iterations
if(powerIterations > 0):
for i in range(powerIterations):
Q = gram_schmidt(Y)[:target_rank]
Z, _ = spla.qr(A.inner(Q))
Y = A.lincomb(Z)[:target_rank]
Q = gram_schmidt(Y)[:target_rank]
B = Q.inner(A)
return Q, B
|
def rand_QB(A, target_rank=None, distribution='normal', oversampling=0, powerIterations=0):
"""
randomisierte QB-Zerlegung
See Algorithm 3.1 in [EMKB19]_.
Parameters
----------
A :
The |VectorArray| for which the randomized QB Decomposition is to be computed.
target_rank : int
The desired rank for the decomposition. If None rank = len(A).
distribution : str
Distribution used for the random projectionmatrix Omega. (`'normal'` or `'uniform'`)
oversampling : int
Oversamplingparameter. Number of extra columns of the projectionmatrix.
powerIterations : int
Number of power iterations.
Returns
-------
Q :
|VectorArray| containig an approximate optimal Basis for the Image of the Inputmatrix A.
len(Q) = target_rank
B :
Numpy Array. Projection of the Input Matrix into the lower dimensional subspace.
"""
assert isinstance(A, VectorArray)
assert target_rank is None or target_rank <= len(A)
assert distribution in ('normal', 'uniform')
if A.dim == 0 or len(A) == 0:
return A.space.zeros(), np.zeros((target_rank, len(A)))
rank = len(A) if target_rank is None else target_rank + oversampling
target_rank = len(A) if target_rank is None else target_rank
Omega = np.random.normal(0, 1, (rank, len(A))) if distribution == 'normal' else np.random.rand(rank, len(A))
Y = A.lincomb(Omega)[:target_rank]
# Power Iterations
if(powerIterations > 0):
for i in range(powerIterations):
Q = gram_schmidt(Y)[:target_rank]
Z, _ = spla.qr(A.inner(Q))
Y = A.lincomb(Z)[:target_rank]
Q = gram_schmidt(Y)[:target_rank]
B = Q.inner(A)
return Q, B
|
5,723 |
def compare_medians_ms(group_1, group_2, axis=None):
"""
Compares the medians from two independent groups along the given axis.
The comparison is performed using the McKean-Schrader estimate of the
standard error of the medians.
Parameters
----------
group_1 : array_like
First dataset. Has to be of size >=7.
group_2 : array_like
Second dataset. Has to be of size >=7.
axis : int, optional
Axis along which the medians are estimated. If None, the arrays are
flattened. If `axis` is not None, then `group_1` and `group_2`
should have the same shape.
Returns
-------
compare_medians_ms : {float, ndarray}
If `axis` is None, then returns a float, otherwise returns a 1-D
ndarray of floats with a length equal to the length of `group_1`
along `axis`.
Examples
--------
>>> from scipy import stats
>>> a = [1, 2, 3, 4, 5, 6, 7]
>>> b = [8, 9, 10, 11, 12, 13, 14]
Arrays must be at least of size 7.
>>> stats.mstats.compare_medians_ms(a, b, axis=None)
1.0693225866553746e-05
Since no axis is provided, the result is flattened.
The McKean–Schrader estimate of the standard error is used
in conjunction with a Welch-type test in hypothesis testing.
It is used for the special case of testing the hypothesis of equal medians,
when Yuen-Welch and Box methods are not recommended.
[1]_ Wilcox.
>>> c = [-5, 82, 11, 11, 22, 55, 90]
>>> stats.mstats.compare_medians_ms(b, c)
0.275814234966238
The function is vectorized to compute along a given axis.
Numpy Broadcasting compares the shapes of two arrays element-wise.
Two dimensions are compatible when they are equal or one of them is 1.
[2]_NumPy Broadcasting.
>>> import numpy as np
>>> rng = np.random.default_rng()
>>> x = rng.random(size=(3, 7))
>>> y = rng.random(size=(3, 8))
>>> stats.mstats.compare_medians_ms(x, y, axis=1)
array([0.1718035 , 0.17301209, 0.40799719])
default_rng() is the recommended constructor for generating
random numbers from a variety of probability distributions.
References
----------
[1] Wilcox, Rand R. “Introduction to Robust Estimation and Hypothesis Testing.” Science Direct, Academic Press, an Imprint of Elsevier, 2017, www.sciencedirect.com/book/9780123869838/introduction-to-robust-estimation-and-hypothesis-testing.
[2] “General Broadcasting Rules.” Broadcasting - NumPy v1.20 Manual, 31 Jan. 2021, numpy.org/doc/stable/user/basics.broadcasting.html.
"""
(med_1, med_2) = (ma.median(group_1,axis=axis), ma.median(group_2,axis=axis))
(std_1, std_2) = (mstats.stde_median(group_1, axis=axis),
mstats.stde_median(group_2, axis=axis))
W = np.abs(med_1 - med_2) / ma.sqrt(std_1**2 + std_2**2)
return 1 - norm.cdf(W)
|
def compare_medians_ms(group_1, group_2, axis=None):
"""
Compares the medians from two independent groups along the given axis.
The comparison is performed using the McKean-Schrader estimate of the
standard error of the medians.
Parameters
----------
group_1 : array_like
First dataset. Has to be of size >=7.
group_2 : array_like
Second dataset. Has to be of size >=7.
axis : int, optional
Axis along which the medians are estimated. If None, the arrays are
flattened. If `axis` is not None, then `group_1` and `group_2`
should have the same shape.
Returns
-------
compare_medians_ms : {float, ndarray}
If `axis` is None, then returns a float, otherwise returns a 1-D
ndarray of floats with a length equal to the length of `group_1`
along `axis`.
Examples
--------
>>> from scipy import stats
>>> a = [1, 2, 3, 4, 5, 6, 7]
>>> b = [8, 9, 10, 11, 12, 13, 14]
Arrays must be at least of size 7.
>>> stats.mstats.compare_medians_ms(a, b, axis=None)
1.0693225866553746e-05
Since no axis is provided, the result is flattened.
The McKean–Schrader estimate of the standard error is used
in conjunction with a Welch-type test in hypothesis testing.
It is used for the special case of testing the hypothesis of equal medians,
when Yuen-Welch and Box methods are not recommended.
[1]_
>>> c = [-5, 82, 11, 11, 22, 55, 90]
>>> stats.mstats.compare_medians_ms(b, c)
0.275814234966238
The function is vectorized to compute along a given axis.
Numpy Broadcasting compares the shapes of two arrays element-wise.
Two dimensions are compatible when they are equal or one of them is 1.
[2]_NumPy Broadcasting.
>>> import numpy as np
>>> rng = np.random.default_rng()
>>> x = rng.random(size=(3, 7))
>>> y = rng.random(size=(3, 8))
>>> stats.mstats.compare_medians_ms(x, y, axis=1)
array([0.1718035 , 0.17301209, 0.40799719])
default_rng() is the recommended constructor for generating
random numbers from a variety of probability distributions.
References
----------
[1] Wilcox, Rand R. “Introduction to Robust Estimation and Hypothesis Testing.” Science Direct, Academic Press, an Imprint of Elsevier, 2017, www.sciencedirect.com/book/9780123869838/introduction-to-robust-estimation-and-hypothesis-testing.
[2] “General Broadcasting Rules.” Broadcasting - NumPy v1.20 Manual, 31 Jan. 2021, numpy.org/doc/stable/user/basics.broadcasting.html.
"""
(med_1, med_2) = (ma.median(group_1,axis=axis), ma.median(group_2,axis=axis))
(std_1, std_2) = (mstats.stde_median(group_1, axis=axis),
mstats.stde_median(group_2, axis=axis))
W = np.abs(med_1 - med_2) / ma.sqrt(std_1**2 + std_2**2)
return 1 - norm.cdf(W)
|
49,658 |
def setup_resource_paths(app: Sphinx, pagename: str, templatename: str,
context: Dict, doctree: Node) -> None:
"""Set up relative resource paths."""
pathto = context.get('pathto')
# favicon_url
favicon = context.get('favicon_url')
if favicon and not isurl(favicon):
context['favicon_url'] = pathto('_static/' + favicon, resource=True)
# logo_url
logo = context.get('logo_url')
if logo and not isurl(logo):
context['logo_url'] = pathto('_static/' + logo, resource=True)
|
def setup_resource_paths(app: Sphinx, pagename: str, templatename: str,
context: Dict, doctree: Node) -> None:
"""Set up relative resource paths."""
pathto = context.get('pathto')
# favicon_url
favicon = context.get('favicon_url')
if favicon and not isurl(favicon):
context['favicon_url'] = pathto('_static/' + favicon, resource=True)
# logo_url
logo_url = context.get('logo_url')
if logo_url and not isurl(logo_url):
context['logo_url'] = pathto('_static/' + logo_url, resource=True)
|
2,286 |
def make_er_graph(n, m, directed=False):
'''Generates an Erdos-Reyni (ER) graph.
Erdos Renyi (n, m) graph is a simple graph with n vertices and exactly m
number of total edges.
Parameters
----------
n: int
Number of vertices
m: int
Number of edges, a value between 1 and :math:`n^2`.
directed: boolean, optional (default=False)
If False, output adjacency matrix will be symmetric. Otherwise, output adjacency
matrix will be asymmetric.
Returns
-------
A: ndarray, shape (n, n)
Sampled adjacency matrix
Notes
-----
Code annotated from microsoft/graspologic, simulations.er_nm
'''
A = np.zeros((n, n))
if directed:
idx = np.where(~np.eye(n, dtype=bool))
else:
idx = np.triu_indices(n, k=1)
# get idx in 1d coordinates by ravelling
triu = np.ravel_multi_index(idx, A.shape)
# choose M of them
triu = np.random.choice(triu, size=m, replace=False)
# unravel back
triu = np.unravel_index(triu, A.shape)
A[triu] = 1
if not directed:
A = np.triu(A)
A = A + A.T - np.diag(np.diag(A))
return A
|
def make_erdos_reyni_graph(n, m, directed=False):
'''Generates an Erdos-Reyni (ER) graph.
Erdos Renyi (n, m) graph is a simple graph with n vertices and exactly m
number of total edges.
Parameters
----------
n: int
Number of vertices
m: int
Number of edges, a value between 1 and :math:`n^2`.
directed: boolean, optional (default=False)
If False, output adjacency matrix will be symmetric. Otherwise, output adjacency
matrix will be asymmetric.
Returns
-------
A: ndarray, shape (n, n)
Sampled adjacency matrix
Notes
-----
Code annotated from microsoft/graspologic, simulations.er_nm
'''
A = np.zeros((n, n))
if directed:
idx = np.where(~np.eye(n, dtype=bool))
else:
idx = np.triu_indices(n, k=1)
# get idx in 1d coordinates by ravelling
triu = np.ravel_multi_index(idx, A.shape)
# choose M of them
triu = np.random.choice(triu, size=m, replace=False)
# unravel back
triu = np.unravel_index(triu, A.shape)
A[triu] = 1
if not directed:
A = np.triu(A)
A = A + A.T - np.diag(np.diag(A))
return A
|
1,574 |
def check_as_frame(bunch, fetch_func_partial):
pd = pytest.importorskip('pandas')
frame_bunch = fetch_func_partial(as_frame=True)
assert hasattr(frame_bunch, 'frame') is True
assert isinstance(frame_bunch.frame, pd.DataFrame)
assert isinstance(frame_bunch.data, pd.DataFrame)
assert frame_bunch.data.shape == bunch.data.shape
assert isinstance(frame_bunch.target, pd.DataFrame)
assert frame_bunch.target.shape[0] == bunch.target.shape[0]
|
def check_as_frame(bunch, fetch_func_partial):
pd = pytest.importorskip('pandas')
frame_bunch = fetch_func_partial(as_frame=True)
assert hasattr(frame_bunch, 'frame')
assert isinstance(frame_bunch.frame, pd.DataFrame)
assert isinstance(frame_bunch.data, pd.DataFrame)
assert frame_bunch.data.shape == bunch.data.shape
assert isinstance(frame_bunch.target, pd.DataFrame)
assert frame_bunch.target.shape[0] == bunch.target.shape[0]
|
47,181 |
def main():
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
parser = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments))
if len(sys.argv) == 2 and sys.argv[1].endswith(".json"):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
model_args, data_args, training_args = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1]))
else:
model_args, data_args, training_args = parser.parse_args_into_dataclasses()
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
handlers=[logging.StreamHandler(sys.stdout)],
)
log_level = training_args.get_process_log_level()
logger.setLevel(log_level)
datasets.utils.logging.set_verbosity(log_level)
transformers.utils.logging.set_verbosity(log_level)
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"
+ f"distributed training: {bool(training_args.local_rank != -1)}, 16-bits training: {training_args.fp16}"
)
logger.info(f"Training/evaluation parameters {training_args}")
# Detecting last checkpoint.
last_checkpoint = None
if os.path.isdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir:
last_checkpoint = get_last_checkpoint(training_args.output_dir)
if last_checkpoint is None and len(os.listdir(training_args.output_dir)) > 0:
raise ValueError(
f"Output directory ({training_args.output_dir}) already exists and is not empty. "
"Use --overwrite_output_dir to overcome."
)
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change "
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch."
)
# Set seed before initializing model.
set_seed(training_args.seed)
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
#
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
#
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
raw_datasets = load_dataset(
data_args.dataset_name, data_args.dataset_config_name, cache_dir=model_args.cache_dir
)
if "validation" not in raw_datasets.keys():
raw_datasets["validation"] = load_dataset(
data_args.dataset_name,
data_args.dataset_config_name,
split=f"train[:{data_args.validation_split_percentage}%]",
cache_dir=model_args.cache_dir,
)
raw_datasets["train"] = load_dataset(
data_args.dataset_name,
data_args.dataset_config_name,
split=f"train[{data_args.validation_split_percentage}%:]",
cache_dir=model_args.cache_dir,
)
else:
data_files = {}
if data_args.train_file is not None:
data_files["train"] = data_args.train_file
if data_args.validation_file is not None:
data_files["validation"] = data_args.validation_file
extension = (
data_args.train_file.split(".")[-1]
if data_args.train_file is not None
else data_args.validation_file.split(".")[-1]
)
if extension == "txt":
extension = "text"
raw_datasets = load_dataset(extension, data_files=data_files, cache_dir=model_args.cache_dir)
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
config_kwargs = {
"cache_dir": model_args.cache_dir,
"revision": model_args.model_revision,
"use_auth_token": True if model_args.use_auth_token else None,
}
if model_args.config_name:
config = AutoConfig.from_pretrained(model_args.config_name, **config_kwargs)
elif model_args.model_name_or_path:
config = AutoConfig.from_pretrained(model_args.model_name_or_path, **config_kwargs)
else:
config = CONFIG_MAPPING[model_args.model_type]()
logger.warning("You are instantiating a new config instance from scratch.")
if model_args.config_overrides is not None:
logger.info(f"Overriding config: {model_args.config_overrides}")
config.update_from_string(model_args.config_overrides)
tokenizer_kwargs = {
"cache_dir": model_args.cache_dir,
"use_fast": model_args.use_fast_tokenizer,
"revision": model_args.model_revision,
"use_auth_token": True if model_args.use_auth_token else None,
}
if model_args.tokenizer_name:
tokenizer = AutoTokenizer.from_pretrained(model_args.tokenizer_name, **tokenizer_kwargs)
elif model_args.model_name_or_path:
tokenizer = AutoTokenizer.from_pretrained(model_args.model_name_or_path, **tokenizer_kwargs)
else:
raise ValueError(
"You are instantiating a new tokenizer from scratch. This is not supported by this script."
"You can do it from another script, save it, and load it from here, using --tokenizer_name."
)
if model_args.model_name_or_path:
model = AutoModelForCausalLM.from_pretrained(
model_args.model_name_or_path,
from_tf=bool(".ckpt" in model_args.model_name_or_path),
config=config,
cache_dir=model_args.cache_dir,
revision=model_args.model_revision,
use_auth_token=True if model_args.use_auth_token else None,
)
else:
model = AutoModelForCausalLM.from_config(config)
n_params = sum(dict((p.data_ptr(), p.numel()) for p in model.parameters()).values())
logger.info(f"Training new model from scratch - Total size={n_params/2**20:.2f}M params")
model.resize_token_embeddings(len(tokenizer))
# Preprocessing the datasets.
# First we tokenize all the texts.
if training_args.do_train:
column_names = raw_datasets["train"].column_names
else:
column_names = raw_datasets["validation"].column_names
text_column_name = "text" if "text" in column_names else column_names[0]
# since this will be pickled to avoid _LazyModule error in Hasher force logger loading before tokenize_function
tok_logger = transformers.utils.logging.get_logger("transformers.tokenization_utils_base")
def tokenize_function(examples):
with CaptureLogger(tok_logger) as cl:
output = tokenizer(examples[text_column_name])
# clm input could be much much longer than block_size
if "Token indices sequence length is longer than the" in cl.out:
tok_logger.warning(
"^^^^^^^^^^^^^^^^ Please ignore the warning above - this long input will be chunked into smaller bits before being passed to the model."
)
return output
with training_args.main_process_first(desc="dataset map tokenization"):
tokenized_datasets = raw_datasets.map(
tokenize_function,
batched=True,
num_proc=data_args.preprocessing_num_workers,
remove_columns=column_names,
load_from_cache_file=not data_args.overwrite_cache,
desc="Running tokenizer on dataset",
)
if data_args.block_size is None:
block_size = tokenizer.model_max_length
if block_size > 1024:
logger.warning(
f"The tokenizer picked seems to have a very large `model_max_length` ({tokenizer.model_max_length}). "
"Picking 1024 instead. You can change that default value by passing --block_size xxx."
)
block_size = 1024
else:
if data_args.block_size > tokenizer.model_max_length:
logger.warning(
f"The block_size passed ({data_args.block_size}) is larger than the maximum length for the model"
f"({tokenizer.model_max_length}). Using block_size={tokenizer.model_max_length}."
)
block_size = min(data_args.block_size, tokenizer.model_max_length)
# Main data processing function that will concatenate all texts from our dataset and generate chunks of block_size.
def group_texts(examples):
# Concatenate all texts.
concatenated_examples = {k: sum(examples[k], []) for k in examples.keys()}
total_length = len(concatenated_examples[list(examples.keys())[0]])
# We drop the small remainder, we could add padding if the model supported it instead of this drop, you can
# customize this part to your needs.
total_length = (total_length // block_size) * block_size
# Split by chunks of max_len.
result = {
k: [t[i : i + block_size] for i in range(0, total_length, block_size)]
for k, t in concatenated_examples.items()
}
result["labels"] = result["input_ids"].copy()
return result
# Note that with `batched=True`, this map processes 1,000 texts together, so group_texts throws away a remainder
# for each of those groups of 1,000 texts. You can adjust that batch_size here but a higher value might be slower
# to preprocess.
#
# To speed up this part, we use multiprocessing. See the documentation of the map method for more information:
# https://huggingface.co/docs/datasets/package_reference/main_classes.html#datasets.Dataset.map
with training_args.main_process_first(desc="dataset map pre-processing"):
lm_datasets = tokenized_datasets.map(
group_texts,
batched=True,
num_proc=data_args.preprocessing_num_workers,
load_from_cache_file=not data_args.overwrite_cache,
desc=f"Grouping texts in chunks of {block_size}",
)
if training_args.do_train:
if "train" not in tokenized_datasets:
raise ValueError("--do_train requires a train dataset")
train_dataset = lm_datasets["train"]
if data_args.max_train_samples is not None:
train_dataset = train_dataset.select(range(data_args.max_train_samples))
if training_args.do_eval:
if "validation" not in tokenized_datasets:
raise ValueError("--do_eval requires a validation dataset")
eval_dataset = lm_datasets["validation"]
if data_args.max_eval_samples is not None:
eval_dataset = eval_dataset.select(range(data_args.max_eval_samples))
# Initialize our Trainer
trainer = Trainer(
model=model,
args=training_args,
train_dataset=train_dataset if training_args.do_train else None,
eval_dataset=eval_dataset if training_args.do_eval else None,
tokenizer=tokenizer,
# Data collator will default to DataCollatorWithPadding, so we change it.
data_collator=default_data_collator,
)
# Training
if training_args.do_train:
checkpoint = None
if training_args.resume_from_checkpoint is not None:
checkpoint = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
checkpoint = last_checkpoint
train_result = trainer.train(resume_from_checkpoint=checkpoint)
trainer.save_model() # Saves the tokenizer too for easy upload
metrics = train_result.metrics
max_train_samples = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(train_dataset)
)
metrics["train_samples"] = min(max_train_samples, len(train_dataset))
trainer.log_metrics("train", metrics)
trainer.save_metrics("train", metrics)
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info("*** Evaluate ***")
metrics = trainer.evaluate()
max_eval_samples = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(eval_dataset)
metrics["eval_samples"] = min(max_eval_samples, len(eval_dataset))
try:
perplexity = math.exp(metrics["eval_loss"])
except OverflowError:
perplexity = float("inf")
metrics["perplexity"] = perplexity
trainer.log_metrics("eval", metrics)
trainer.save_metrics("eval", metrics)
if training_args.push_to_hub:
kwargs = {"finetuned_from": model_args.model_name_or_path, "tasks": "text-generation"}
if data_args.dataset_name is not None:
kwargs["dataset_tags"] = data_args.dataset_name
if data_args.dataset_config_name is not None:
kwargs["dataset_args"] = data_args.dataset_config_name
kwargs["dataset"] = f"{data_args.dataset_name} {data_args.dataset_config_name}"
else:
kwargs["dataset"] = data_args.dataset_name
trainer.push_to_hub(**kwargs)
|
def main():
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
parser = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments))
if len(sys.argv) == 2 and sys.argv[1].endswith(".json"):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
model_args, data_args, training_args = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1]))
else:
model_args, data_args, training_args = parser.parse_args_into_dataclasses()
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
handlers=[logging.StreamHandler(sys.stdout)],
)
log_level = training_args.get_process_log_level()
logger.setLevel(log_level)
datasets.utils.logging.set_verbosity(log_level)
transformers.utils.logging.set_verbosity(log_level)
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"
+ f"distributed training: {bool(training_args.local_rank != -1)}, 16-bits training: {training_args.fp16}"
)
logger.info(f"Training/evaluation parameters {training_args}")
# Detecting last checkpoint.
last_checkpoint = None
if os.path.isdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir:
last_checkpoint = get_last_checkpoint(training_args.output_dir)
if last_checkpoint is None and len(os.listdir(training_args.output_dir)) > 0:
raise ValueError(
f"Output directory ({training_args.output_dir}) already exists and is not empty. "
"Use --overwrite_output_dir to overcome."
)
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change "
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch."
)
# Set seed before initializing model.
set_seed(training_args.seed)
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
#
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
#
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
raw_datasets = load_dataset(
data_args.dataset_name, data_args.dataset_config_name, cache_dir=model_args.cache_dir
)
if "validation" not in raw_datasets.keys():
raw_datasets["validation"] = load_dataset(
data_args.dataset_name,
data_args.dataset_config_name,
split=f"train[:{data_args.validation_split_percentage}%]",
cache_dir=model_args.cache_dir,
)
raw_datasets["train"] = load_dataset(
data_args.dataset_name,
data_args.dataset_config_name,
split=f"train[{data_args.validation_split_percentage}%:]",
cache_dir=model_args.cache_dir,
)
else:
data_files = {}
if data_args.train_file is not None:
data_files["train"] = data_args.train_file
if data_args.validation_file is not None:
data_files["validation"] = data_args.validation_file
extension = (
data_args.train_file.split(".")[-1]
if data_args.train_file is not None
else data_args.validation_file.split(".")[-1]
)
if extension == "txt":
extension = "text"
raw_datasets = load_dataset(extension, data_files=data_files, cache_dir=model_args.cache_dir)
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
config_kwargs = {
"cache_dir": model_args.cache_dir,
"revision": model_args.model_revision,
"use_auth_token": True if model_args.use_auth_token else None,
}
if model_args.config_name:
config = AutoConfig.from_pretrained(model_args.config_name, **config_kwargs)
elif model_args.model_name_or_path:
config = AutoConfig.from_pretrained(model_args.model_name_or_path, **config_kwargs)
else:
config = CONFIG_MAPPING[model_args.model_type]()
logger.warning("You are instantiating a new config instance from scratch.")
if model_args.config_overrides is not None:
logger.info(f"Overriding config: {model_args.config_overrides}")
config.update_from_string(model_args.config_overrides)
tokenizer_kwargs = {
"cache_dir": model_args.cache_dir,
"use_fast": model_args.use_fast_tokenizer,
"revision": model_args.model_revision,
"use_auth_token": True if model_args.use_auth_token else None,
}
if model_args.tokenizer_name:
tokenizer = AutoTokenizer.from_pretrained(model_args.tokenizer_name, **tokenizer_kwargs)
elif model_args.model_name_or_path:
tokenizer = AutoTokenizer.from_pretrained(model_args.model_name_or_path, **tokenizer_kwargs)
else:
raise ValueError(
"You are instantiating a new tokenizer from scratch. This is not supported by this script."
"You can do it from another script, save it, and load it from here, using --tokenizer_name."
)
if model_args.model_name_or_path:
model = AutoModelForCausalLM.from_pretrained(
model_args.model_name_or_path,
from_tf=bool(".ckpt" in model_args.model_name_or_path),
config=config,
cache_dir=model_args.cache_dir,
revision=model_args.model_revision,
use_auth_token=True if model_args.use_auth_token else None,
)
else:
model = AutoModelForCausalLM.from_config(config)
n_params = sum(dict((p.data_ptr(), p.numel()) for p in model.parameters()).values())
logger.info(f"Training new model from scratch - Total size={n_params/2**20:.2f}M params")
model.resize_token_embeddings(len(tokenizer))
# Preprocessing the datasets.
# First we tokenize all the texts.
if training_args.do_train:
column_names = raw_datasets["train"].column_names
else:
column_names = raw_datasets["validation"].column_names
text_column_name = "text" if "text" in column_names else column_names[0]
# since this will be pickled to avoid _LazyModule error in Hasher force logger loading before tokenize_function
tok_logger = transformers.utils.logging.get_logger("transformers.tokenization_utils_base")
def tokenize_function(examples):
with CaptureLogger(tok_logger) as cl:
output = tokenizer(examples[text_column_name])
# clm input could be much much longer than block_size
if "Token indices sequence length is longer than the" in cl.out:
tok_logger.warning(
"^^^^^^^^^^^^^^^^ Please ignore the warning above - this long input will be chunked into smaller bits before being passed to the model."
)
return output
with training_args.main_process_first(desc="dataset map tokenization"):
tokenized_datasets = raw_datasets.map(
tokenize_function,
batched=True,
num_proc=data_args.preprocessing_num_workers,
remove_columns=column_names,
load_from_cache_file=not data_args.overwrite_cache,
desc="Running tokenizer on dataset",
)
if data_args.block_size is None:
block_size = tokenizer.model_max_length
if block_size > 1024:
logger.warning(
f"The tokenizer picked seems to have a very large `model_max_length` ({tokenizer.model_max_length}). "
"Picking 1024 instead. You can change that default value by passing --block_size xxx."
)
block_size = 1024
else:
if data_args.block_size > tokenizer.model_max_length:
logger.warning(
f"The block_size passed ({data_args.block_size}) is larger than the maximum length for the model"
f"({tokenizer.model_max_length}). Using block_size={tokenizer.model_max_length}."
)
block_size = min(data_args.block_size, tokenizer.model_max_length)
# Main data processing function that will concatenate all texts from our dataset and generate chunks of block_size.
def group_texts(examples):
# Concatenate all texts.
concatenated_examples = {k: sum(examples[k], []) for k in examples.keys()}
total_length = len(concatenated_examples[list(examples.keys())[0]])
# We drop the small remainder, we could add padding if the model supported it instead of this drop, you can
# customize this part to your needs.
total_length = (total_length // block_size) * block_size
# Split by chunks of max_len.
result = {
k: [t[i : i + block_size] for i in range(0, total_length, block_size)]
for k, t in concatenated_examples.items()
}
result["labels"] = result["input_ids"].copy()
return result
# Note that with `batched=True`, this map processes 1,000 texts together, so group_texts throws away a remainder
# for each of those groups of 1,000 texts. You can adjust that batch_size here but a higher value might be slower
# to preprocess.
#
# To speed up this part, we use multiprocessing. See the documentation of the map method for more information:
# https://huggingface.co/docs/datasets/package_reference/main_classes.html#datasets.Dataset.map
with training_args.main_process_first(desc="grouping texts together"):
lm_datasets = tokenized_datasets.map(
group_texts,
batched=True,
num_proc=data_args.preprocessing_num_workers,
load_from_cache_file=not data_args.overwrite_cache,
desc=f"Grouping texts in chunks of {block_size}",
)
if training_args.do_train:
if "train" not in tokenized_datasets:
raise ValueError("--do_train requires a train dataset")
train_dataset = lm_datasets["train"]
if data_args.max_train_samples is not None:
train_dataset = train_dataset.select(range(data_args.max_train_samples))
if training_args.do_eval:
if "validation" not in tokenized_datasets:
raise ValueError("--do_eval requires a validation dataset")
eval_dataset = lm_datasets["validation"]
if data_args.max_eval_samples is not None:
eval_dataset = eval_dataset.select(range(data_args.max_eval_samples))
# Initialize our Trainer
trainer = Trainer(
model=model,
args=training_args,
train_dataset=train_dataset if training_args.do_train else None,
eval_dataset=eval_dataset if training_args.do_eval else None,
tokenizer=tokenizer,
# Data collator will default to DataCollatorWithPadding, so we change it.
data_collator=default_data_collator,
)
# Training
if training_args.do_train:
checkpoint = None
if training_args.resume_from_checkpoint is not None:
checkpoint = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
checkpoint = last_checkpoint
train_result = trainer.train(resume_from_checkpoint=checkpoint)
trainer.save_model() # Saves the tokenizer too for easy upload
metrics = train_result.metrics
max_train_samples = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(train_dataset)
)
metrics["train_samples"] = min(max_train_samples, len(train_dataset))
trainer.log_metrics("train", metrics)
trainer.save_metrics("train", metrics)
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info("*** Evaluate ***")
metrics = trainer.evaluate()
max_eval_samples = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(eval_dataset)
metrics["eval_samples"] = min(max_eval_samples, len(eval_dataset))
try:
perplexity = math.exp(metrics["eval_loss"])
except OverflowError:
perplexity = float("inf")
metrics["perplexity"] = perplexity
trainer.log_metrics("eval", metrics)
trainer.save_metrics("eval", metrics)
if training_args.push_to_hub:
kwargs = {"finetuned_from": model_args.model_name_or_path, "tasks": "text-generation"}
if data_args.dataset_name is not None:
kwargs["dataset_tags"] = data_args.dataset_name
if data_args.dataset_config_name is not None:
kwargs["dataset_args"] = data_args.dataset_config_name
kwargs["dataset"] = f"{data_args.dataset_name} {data_args.dataset_config_name}"
else:
kwargs["dataset"] = data_args.dataset_name
trainer.push_to_hub(**kwargs)
|
41,686 |
def _maybe_skip_test(item, delayed=False):
"""If necessary skip test at the fixture level, to avoid
loading the selenium_standalone fixture which takes a long time.
"""
skip_msg = None
# Testing a package. Skip the test if the package is not built.
match = re.match(
r".*/packages/(?P<name>[\w\-]+)/test_[\w\-]+\.py", str(item.parent.fspath)
)
if match:
package_name = match.group("name")
if not _package_is_built(package_name):
skip_msg = f"package '{package_name}' is not built."
# Common package import test. Skip it if the package is not built.
if str(item.fspath).endswith("test_packages_common.py") and item.name.startswith(
"test_import"
):
match = re.match(
r"test_import\[(firefox|chrome|node)-(?P<name>[\w-]+)\]", item.name
)
if match:
package_name = match.group("name")
if not _package_is_built(package_name):
# If the test is going to be skipped remove the
# selenium_standalone as it takes a long time to initialize
skip_msg = f"package '{package_name}' is not built."
else:
raise AssertionError(
f"Couldn't parse package name from {item.name}. This should not happen!"
)
# TODO: also use this hook to skip doctests we cannot run (or run them
# inside the selenium wrapper)
if skip_msg is not None:
if delayed:
item.add_marker(pytest.mark.skip(reason=skip_msg))
else:
pytest.skip(skip_msg)
|
def _maybe_skip_test(item, delayed=False):
"""If necessary skip test at the fixture level, to avoid
loading the selenium_standalone fixture which takes a long time.
"""
skip_msg = None
# Testing a package. Skip the test if the package is not built.
match = re.match(
r".*/packages/(?P<name>[\w\-]+)/test_[\w\-]+\.py", str(item.parent.fspath)
)
if match:
package_name = match.group("name")
if not _package_is_built(package_name):
skip_msg = f"package '{package_name}' is not built."
# Common package import test. Skip it if the package is not built.
if not skip_msg and str(item.fspath).endswith("test_packages_common.py") and item.name.startswith(
"test_import"
):
match = re.match(
r"test_import\[(firefox|chrome|node)-(?P<name>[\w-]+)\]", item.name
)
if match:
package_name = match.group("name")
if not _package_is_built(package_name):
# If the test is going to be skipped remove the
# selenium_standalone as it takes a long time to initialize
skip_msg = f"package '{package_name}' is not built."
else:
raise AssertionError(
f"Couldn't parse package name from {item.name}. This should not happen!"
)
# TODO: also use this hook to skip doctests we cannot run (or run them
# inside the selenium wrapper)
if skip_msg is not None:
if delayed:
item.add_marker(pytest.mark.skip(reason=skip_msg))
else:
pytest.skip(skip_msg)
|
34,227 |
def _get_output_channel(
request: Request, tracker: Optional[DialogueStateTracker]
) -> OutputChannel:
"""Returns the `OutputChannel` which should be used for the bot's responses.
Args:
request: HTTP request whose query parameters can specify which `OutputChannel`
should be used.
tracker: Tracker for the conversation. Used to get the latest input channel.
Returns:
`OutputChannel` which should be used to return the bot's responses to.
"""
requested_output_channel = request.args.get(OUTPUT_CHANNEL_QUERY_KEY)
if (
requested_output_channel == USE_LATEST_INPUT_CHANNEL_AS_OUTPUT_CHANNEL
and tracker
):
requested_output_channel = tracker.get_latest_input_channel()
# Interactive training does not set `input_channels`, hence we have to be cautious
registered_input_channels = getattr(request.app, "input_channels", None) or []
matching_channels = [
channel
for channel in registered_input_channels
if channel.name() == requested_output_channel
]
# Check if matching channels can provide a valid output channel,
# otherwise use `CollectingOutputChannel`
return reduce(
lambda output_channel_created_so_far, input_channel: (
input_channel.get_output_channel() or output_channel_created_so_far
),
matching_channels,
CollectingOutputChannel(),
)
|
def _get_output_channel(
request: Request, tracker: Optional[DialogueStateTracker]
) -> OutputChannel:
"""Returns the `OutputChannel` which should be used for the bot's responses.
Args:
request: HTTP request whose query parameters can specify which `OutputChannel`
should be used.
tracker: Tracker for the conversation. Used to get the latest input channel.
Returns:
`OutputChannel` which should be used to return the bot's responses to.
"""
requested_output_channel = request.args.get(OUTPUT_CHANNEL_QUERY_KEY)
if (
requested_output_channel == USE_LATEST_INPUT_CHANNEL_AS_OUTPUT_CHANNEL
and tracker
):
requested_output_channel = tracker.get_latest_input_channel()
# Interactive training does not set `input_channels`, hence we have to be cautious
registered_input_channels = getattr(request.app, "input_channels", [])
matching_channels = [
channel
for channel in registered_input_channels
if channel.name() == requested_output_channel
]
# Check if matching channels can provide a valid output channel,
# otherwise use `CollectingOutputChannel`
return reduce(
lambda output_channel_created_so_far, input_channel: (
input_channel.get_output_channel() or output_channel_created_so_far
),
matching_channels,
CollectingOutputChannel(),
)
|
30,365 |
def is_pr_merged_command():
args = demisto.args()
pull_number = args.get('pull_number')
# raises 404 not found error if the pr was not merged
is_pr_merged(pull_number)
demisto.results(f'Pull Request #{pull_number} was Merged')
|
def is_pr_merged_command():
args = demisto.args()
pull_number = args.get('pull_number')
# raises 404 not found error if the pr was not merged
is_pr_merged(pull_number)
demisto.results(f'Pull Request #{pull_number} was merged')
|
26,663 |
def create_branch_join(branch_operator, *args, **kwargs):
"""
Create a join task for a branching logic. This join task is always executed regardless
of which branches are followed. It is only skipped if the ``branch_operator`` is skipped.
"""
def python_callable(ti, **_):
from airflow.utils.session import create_session
from airflow.exceptions import AirflowSkipException
from airflow.utils.state import State
from airflow.models import TaskInstance
with create_session() as session:
branch_ti = session.query(TaskInstance).filter(
TaskInstance.dag_id == ti.dag_id,
TaskInstance.task_id == branch_operator.task_id,
TaskInstance.execution_date == ti.execution_date
).one_or_none()
if not branch_ti:
return
if branch_ti.state == State.SKIPPED:
raise AirflowSkipException(f"Skipping because parent task {branch_operator.task_id} "
"is skipped.")
return PythonOperator(trigger_rule="none_failed", python_callable=python_callable, *args, **kwargs)
|
def create_branch_join_task(branch_operator, *args, **kwargs):
"""
Create a join task for a branching logic. This join task is always executed regardless
of which branches are followed. It is only skipped if the ``branch_operator`` is skipped.
"""
def python_callable(ti, **_):
from airflow.utils.session import create_session
from airflow.exceptions import AirflowSkipException
from airflow.utils.state import State
from airflow.models import TaskInstance
with create_session() as session:
branch_ti = session.query(TaskInstance).filter(
TaskInstance.dag_id == ti.dag_id,
TaskInstance.task_id == branch_operator.task_id,
TaskInstance.execution_date == ti.execution_date
).one_or_none()
if not branch_ti:
return
if branch_ti.state == State.SKIPPED:
raise AirflowSkipException(f"Skipping because parent task {branch_operator.task_id} "
"is skipped.")
return PythonOperator(trigger_rule="none_failed", python_callable=python_callable, *args, **kwargs)
|
49,870 |
def pvsyst_dc_losses_eq_ohms(V_mp_ref, I_mp_ref, dc_ohmic_percent=0,
modules_per_string=1,
strings_per_inverter=1):
"""
Calculates the equivalent resistance of the wires from a percent
ohmic loss at STC, defined by the user as an input to loss_parameters.
Equivalent resistance is calculated with the fucntion:
.. math::
Rw = (%_loss_at_stc / 100) * (Varray / Iarray)
Parameters
----------
V_mp_ref: numeric
I_mp_ref: numeric
dc_ohmic_percent: numeric, default 0
modules_per_string: numeric, defualt 1
strings_per_inverter: numeric, defualt 1
"""
vmp = modules_per_string * V_mp_ref
imp = strings_per_inverter * I_mp_ref
Rw = (dc_ohmic_percent / 100) * (vmp / imp)
return Rw
|
def pvsyst_dc_losses_eq_ohms(V_mp_ref, I_mp_ref, dc_ohmic_percent=0,
modules_per_string=1,
strings_per_inverter=1):
"""
Calculates the equivalent resistance of the wires from a percent
ohmic loss at STC.
Equivalent resistance is calculated with the fucntion:
.. math::
Rw = (%_loss_at_stc / 100) * (Varray / Iarray)
Parameters
----------
V_mp_ref: numeric
I_mp_ref: numeric
dc_ohmic_percent: numeric, default 0
modules_per_string: numeric, defualt 1
strings_per_inverter: numeric, defualt 1
"""
vmp = modules_per_string * V_mp_ref
imp = strings_per_inverter * I_mp_ref
Rw = (dc_ohmic_percent / 100) * (vmp / imp)
return Rw
|
22,020 |
def test_sklearn_incremental_predictor_classification():
df = vaex.ml.datasets.load_iris_1e5()
df_train, df_test = df.ml.train_test_split(test_size=0.1, verbose=False)
features = df_train.column_names[:4]
target = 'class_'
incremental = IncrementalPredictor(model=SGDClassifier(learning_rate='constant', eta0=0.01),
features=features,
batch_size=10_000,
num_epochs=3,
shuffle=False,
prediction_name='pred')
incremental.fit(df=df_train, target=target, classes=[0, 1, 2])
df_train = incremental.transform(df_train)
# State transfer
state = df_train.state_get()
df_test.state_set(state)
assert df_test.column_count() == 6
assert df_test.pred.values.shape == (10050,)
pred_in_memory = incremental.predict(df_test)
np.testing.assert_array_equal(pred_in_memory, df_test.pred.values)
|
def test_sklearn_incremental_predictor_classification():
df = vaex.ml.datasets.load_iris_1e5()
df_train, df_test = df.ml.train_test_split(test_size=0.1, verbose=False)
features = df_train.column_names[:4]
target = 'class_'
incremental = IncrementalPredictor(model=SGDClassifier(learning_rate='constant', eta0=0.01),
features=features,
batch_size=10_000,
num_epochs=3,
shuffle=False,
prediction_name='pred')
incremental.fit(df=df_train, target=target, classes=[0, 1, 2])
df_train = incremental.transform(df_train)
# State transfer
state = df_train.state_get()
df_test.state_set(state)
assert df_test.column_count() == 6
assert df_test.pred.values.shape == (10050,)
assert df_test.pred.values.shape == (len(df),)
pred_in_memory = incremental.predict(df_test)
np.testing.assert_array_equal(pred_in_memory, df_test.pred.values)
|
31,016 |
def vm_get_remote_command(client, args):
VmID = args.get('VmID')
res = client.send_request(
'GET',
'vms/actions/getremoteaccessfile',
queryParams={k: v for k, v in args.items()}
)
if res.status == 200:
res.content['VmID'] = VmID
md = tableToMarkdown('VM {VmID} remote file:', res.content)
command_results = CommandResults(
outputs_prefix="CloudShare.VM.Remote",
outputs_key_field='VmID',
outputs=res.content if res.content else None,
readable_output=md
)
return_results(command_results)
else:
return_error(f"Error retrieving {vmID} remote file - {res.content}")
|
def vm_get_remote_command(client, args):
VmID = args.get('VmID')
res = client.send_request(
'GET',
'vms/actions/getremoteaccessfile',
queryParams={k: v for k, v in args.items()}
)
if res.status == 200:
res.content['VmID'] = VmID
md = tableToMarkdown('VM {VmID} remote file:', res.content)
command_results = CommandResults(
outputs_prefix="CloudShare.VM.Remote",
outputs_key_field='VmID',
outputs=res.content if res.content else None,
readable_output=md
)
return_results(command_results)
else:
return_error(f"Error retrieving {VmID} remote file - {res.content}")
|
47,278 |
def postprocess_qa_predictions(
examples,
features,
predictions: Tuple[np.ndarray, np.ndarray],
version_2_with_negative: bool = False,
n_best_size: int = 20,
max_answer_length: int = 30,
null_score_diff_threshold: float = 0.0,
output_dir: Optional[str] = None,
prefix: Optional[str] = None,
log_level: Optional[int] = logging.WARNING,
):
"""
Post-processes the predictions of a question-answering model to convert them to answers that are substrings of the
original contexts. This is the base postprocessing functions for models that only return start and end logits.
Args:
examples: The non-preprocessed dataset (see the main script for more information).
features: The processed dataset (see the main script for more information).
predictions (:obj:`Tuple[np.ndarray, np.ndarray]`):
The predictions of the model: two arrays containing the start logits and the end logits respectively. Its
first dimension must match the number of elements of :obj:`features`.
version_2_with_negative (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether or not the underlying dataset contains examples with no answers.
n_best_size (:obj:`int`, `optional`, defaults to 20):
The total number of n-best predictions to generate when looking for an answer.
max_answer_length (:obj:`int`, `optional`, defaults to 30):
The maximum length of an answer that can be generated. This is needed because the start and end predictions
are not conditioned on one another.
null_score_diff_threshold (:obj:`float`, `optional`, defaults to 0):
The threshold used to select the null answer: if the best answer has a score that is less than the score of
the null answer minus this threshold, the null answer is selected for this example (note that the score of
the null answer for an example giving several features is the minimum of the scores for the null answer on
each feature: all features must be aligned on the fact they `want` to predict a null answer).
Only useful when :obj:`version_2_with_negative` is :obj:`True`.
output_dir (:obj:`str`, `optional`):
If provided, the dictionaries of predictions, n_best predictions (with their scores and logits) and, if
:obj:`version_2_with_negative=True`, the dictionary of the scores differences between best and null
answers, are saved in `output_dir`.
prefix (:obj:`str`, `optional`):
If provided, the dictionaries mentioned above are saved with `prefix` added to their names.
log_level (:obj:`int`, `optional`, defaults to ``logging.WARNING``):
``logging`` log level (e.g., ``logging.WARNING``)
"""
if not len(predictions) == 2:
raise ValueError("`predictions` should be a tuple with two elements (start_logits, end_logits).")
all_start_logits, all_end_logits = predictions
if not len(predictions[0]) == len(features):
raise ValueError(f"Got {len(predictions[0])} predictions and {len(features)} features.")
# Build a map example to its corresponding features.
example_id_to_index = {k: i for i, k in enumerate(examples["id"])}
features_per_example = collections.defaultdict(list)
for i, feature in enumerate(features):
features_per_example[example_id_to_index[feature["example_id"]]].append(i)
# The dictionaries we have to fill.
all_predictions = collections.OrderedDict()
all_nbest_json = collections.OrderedDict()
if version_2_with_negative:
scores_diff_json = collections.OrderedDict()
# Logging.
logger.setLevel(log_level)
logger.info(f"Post-processing {len(examples)} example predictions split into {len(features)} features.")
# Let's loop over all the examples!
for example_index, example in enumerate(tqdm(examples)):
# Those are the indices of the features associated to the current example.
feature_indices = features_per_example[example_index]
min_null_prediction = None
prelim_predictions = []
# Looping through all the features associated to the current example.
for feature_index in feature_indices:
# We grab the predictions of the model for this feature.
start_logits = all_start_logits[feature_index]
end_logits = all_end_logits[feature_index]
# This is what will allow us to map some the positions in our logits to span of texts in the original
# context.
offset_mapping = features[feature_index]["offset_mapping"]
# Optional `token_is_max_context`, if provided we will remove answers that do not have the maximum context
# available in the current feature.
token_is_max_context = features[feature_index].get("token_is_max_context", None)
# Update minimum null prediction.
feature_null_score = start_logits[0] + end_logits[0]
if min_null_prediction is None or min_null_prediction["score"] > feature_null_score:
min_null_prediction = {
"offsets": (0, 0),
"score": feature_null_score,
"start_logit": start_logits[0],
"end_logit": end_logits[0],
}
# Go through all possibilities for the `n_best_size` greater start and end logits.
start_indexes = np.argsort(start_logits)[-1 : -n_best_size - 1 : -1].tolist()
end_indexes = np.argsort(end_logits)[-1 : -n_best_size - 1 : -1].tolist()
for start_index in start_indexes:
for end_index in end_indexes:
# Don't consider out-of-scope answers, either because the indices are out of bounds or correspond
# to part of the input_ids that are not in the context.
if (
start_index >= len(offset_mapping)
or end_index >= len(offset_mapping)
or offset_mapping[start_index] is None
or offset_mapping[end_index] is None
):
continue
# Don't consider answers with a length that is either < 0 or > max_answer_length.
if end_index < start_index or end_index - start_index + 1 > max_answer_length:
continue
# Don't consider answer that don't have the maximum context available (if such information is
# provided).
if token_is_max_context is not None and not token_is_max_context.get(str(start_index), False):
continue
prelim_predictions.append(
{
"offsets": (offset_mapping[start_index][0], offset_mapping[end_index][1]),
"score": start_logits[start_index] + end_logits[end_index],
"start_logit": start_logits[start_index],
"end_logit": end_logits[end_index],
}
)
if version_2_with_negative:
# Add the minimum null prediction
prelim_predictions.append(min_null_prediction)
null_score = min_null_prediction["score"]
# Only keep the best `n_best_size` predictions.
predictions = sorted(prelim_predictions, key=lambda x: x["score"], reverse=True)[:n_best_size]
# Add back the minimum null prediction if it was removed because of its low score.
if version_2_with_negative and not any(p["offsets"] == (0, 0) for p in predictions):
predictions.append(min_null_prediction)
# Use the offsets to gather the answer text in the original context.
context = example["context"]
for pred in predictions:
offsets = pred.pop("offsets")
pred["text"] = context[offsets[0] : offsets[1]]
# In the very rare edge case we have not a single non-null prediction, we create a fake prediction to avoid
# failure.
if len(predictions) == 0 or (len(predictions) == 1 and predictions[0]["text"] == ""):
predictions.insert(0, {"text": "empty", "start_logit": 0.0, "end_logit": 0.0, "score": 0.0})
# Compute the softmax of all scores (we do it with numpy to stay independent from torch/tf in this file, using
# the LogSumExp trick).
scores = np.array([pred.pop("score") for pred in predictions])
exp_scores = np.exp(scores - np.max(scores))
probs = exp_scores / exp_scores.sum()
# Include the probabilities in our predictions.
for prob, pred in zip(probs, predictions):
pred["probability"] = prob
# Pick the best prediction. If the null answer is not possible, this is easy.
if not version_2_with_negative:
all_predictions[example["id"]] = predictions[0]["text"]
else:
# Otherwise we first need to find the best non-empty prediction.
i = 0
while predictions[i]["text"] == "":
i += 1
best_non_null_pred = predictions[i]
# Then we compare to the null prediction using the threshold.
score_diff = null_score - best_non_null_pred["start_logit"] - best_non_null_pred["end_logit"]
scores_diff_json[example["id"]] = float(score_diff) # To be JSON-serializable.
if score_diff > null_score_diff_threshold:
all_predictions[example["id"]] = ""
else:
all_predictions[example["id"]] = best_non_null_pred["text"]
# Make `predictions` JSON-serializable by casting np.float back to float.
all_nbest_json[example["id"]] = [
{k: (float(v) if isinstance(v, (np.float16, np.float32, np.float64)) else v) for k, v in pred.items()}
for pred in predictions
]
# If we have an output_dir, let's save all those dicts.
if output_dir is not None:
if not os.path.isdir(output_dir):
raise EnvironmentError(f"{output_dir} is not a directory.")
prediction_file = os.path.join(
output_dir, "predictions.json" if prefix is None else f"{prefix}_predictions.json"
)
nbest_file = os.path.join(
output_dir, "nbest_predictions.json" if prefix is None else f"{prefix}_nbest_predictions.json"
)
if version_2_with_negative:
null_odds_file = os.path.join(
output_dir, "null_odds.json" if prefix is None else f"{prefix}_null_odds.json"
)
logger.info(f"Saving predictions to {prediction_file}.")
with open(prediction_file, "w") as writer:
writer.write(json.dumps(all_predictions, indent=4) + "\n")
logger.info(f"Saving nbest_preds to {nbest_file}.")
with open(nbest_file, "w") as writer:
writer.write(json.dumps(all_nbest_json, indent=4) + "\n")
if version_2_with_negative:
logger.info(f"Saving null_odds to {null_odds_file}.")
with open(null_odds_file, "w") as writer:
writer.write(json.dumps(scores_diff_json, indent=4) + "\n")
return all_predictions
|
def postprocess_qa_predictions(
examples,
features,
predictions: Tuple[np.ndarray, np.ndarray],
version_2_with_negative: bool = False,
n_best_size: int = 20,
max_answer_length: int = 30,
null_score_diff_threshold: float = 0.0,
output_dir: Optional[str] = None,
prefix: Optional[str] = None,
log_level: Optional[int] = logging.WARNING,
):
"""
Post-processes the predictions of a question-answering model to convert them to answers that are substrings of the
original contexts. This is the base postprocessing functions for models that only return start and end logits.
Args:
examples: The non-preprocessed dataset (see the main script for more information).
features: The processed dataset (see the main script for more information).
predictions (:obj:`Tuple[np.ndarray, np.ndarray]`):
The predictions of the model: two arrays containing the start logits and the end logits respectively. Its
first dimension must match the number of elements of :obj:`features`.
version_2_with_negative (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether or not the underlying dataset contains examples with no answers.
n_best_size (:obj:`int`, `optional`, defaults to 20):
The total number of n-best predictions to generate when looking for an answer.
max_answer_length (:obj:`int`, `optional`, defaults to 30):
The maximum length of an answer that can be generated. This is needed because the start and end predictions
are not conditioned on one another.
null_score_diff_threshold (:obj:`float`, `optional`, defaults to 0):
The threshold used to select the null answer: if the best answer has a score that is less than the score of
the null answer minus this threshold, the null answer is selected for this example (note that the score of
the null answer for an example giving several features is the minimum of the scores for the null answer on
each feature: all features must be aligned on the fact they `want` to predict a null answer).
Only useful when :obj:`version_2_with_negative` is :obj:`True`.
output_dir (:obj:`str`, `optional`):
If provided, the dictionaries of predictions, n_best predictions (with their scores and logits) and, if
:obj:`version_2_with_negative=True`, the dictionary of the scores differences between best and null
answers, are saved in `output_dir`.
prefix (:obj:`str`, `optional`):
If provided, the dictionaries mentioned above are saved with `prefix` added to their names.
log_level (:obj:`int`, `optional`, defaults to ``logging.WARNING``):
``logging`` log level (e.g., ``logging.WARNING``)
"""
if len(predictions) != 2:
raise ValueError("`predictions` should be a tuple with two elements (start_logits, end_logits).")
all_start_logits, all_end_logits = predictions
if not len(predictions[0]) == len(features):
raise ValueError(f"Got {len(predictions[0])} predictions and {len(features)} features.")
# Build a map example to its corresponding features.
example_id_to_index = {k: i for i, k in enumerate(examples["id"])}
features_per_example = collections.defaultdict(list)
for i, feature in enumerate(features):
features_per_example[example_id_to_index[feature["example_id"]]].append(i)
# The dictionaries we have to fill.
all_predictions = collections.OrderedDict()
all_nbest_json = collections.OrderedDict()
if version_2_with_negative:
scores_diff_json = collections.OrderedDict()
# Logging.
logger.setLevel(log_level)
logger.info(f"Post-processing {len(examples)} example predictions split into {len(features)} features.")
# Let's loop over all the examples!
for example_index, example in enumerate(tqdm(examples)):
# Those are the indices of the features associated to the current example.
feature_indices = features_per_example[example_index]
min_null_prediction = None
prelim_predictions = []
# Looping through all the features associated to the current example.
for feature_index in feature_indices:
# We grab the predictions of the model for this feature.
start_logits = all_start_logits[feature_index]
end_logits = all_end_logits[feature_index]
# This is what will allow us to map some the positions in our logits to span of texts in the original
# context.
offset_mapping = features[feature_index]["offset_mapping"]
# Optional `token_is_max_context`, if provided we will remove answers that do not have the maximum context
# available in the current feature.
token_is_max_context = features[feature_index].get("token_is_max_context", None)
# Update minimum null prediction.
feature_null_score = start_logits[0] + end_logits[0]
if min_null_prediction is None or min_null_prediction["score"] > feature_null_score:
min_null_prediction = {
"offsets": (0, 0),
"score": feature_null_score,
"start_logit": start_logits[0],
"end_logit": end_logits[0],
}
# Go through all possibilities for the `n_best_size` greater start and end logits.
start_indexes = np.argsort(start_logits)[-1 : -n_best_size - 1 : -1].tolist()
end_indexes = np.argsort(end_logits)[-1 : -n_best_size - 1 : -1].tolist()
for start_index in start_indexes:
for end_index in end_indexes:
# Don't consider out-of-scope answers, either because the indices are out of bounds or correspond
# to part of the input_ids that are not in the context.
if (
start_index >= len(offset_mapping)
or end_index >= len(offset_mapping)
or offset_mapping[start_index] is None
or offset_mapping[end_index] is None
):
continue
# Don't consider answers with a length that is either < 0 or > max_answer_length.
if end_index < start_index or end_index - start_index + 1 > max_answer_length:
continue
# Don't consider answer that don't have the maximum context available (if such information is
# provided).
if token_is_max_context is not None and not token_is_max_context.get(str(start_index), False):
continue
prelim_predictions.append(
{
"offsets": (offset_mapping[start_index][0], offset_mapping[end_index][1]),
"score": start_logits[start_index] + end_logits[end_index],
"start_logit": start_logits[start_index],
"end_logit": end_logits[end_index],
}
)
if version_2_with_negative:
# Add the minimum null prediction
prelim_predictions.append(min_null_prediction)
null_score = min_null_prediction["score"]
# Only keep the best `n_best_size` predictions.
predictions = sorted(prelim_predictions, key=lambda x: x["score"], reverse=True)[:n_best_size]
# Add back the minimum null prediction if it was removed because of its low score.
if version_2_with_negative and not any(p["offsets"] == (0, 0) for p in predictions):
predictions.append(min_null_prediction)
# Use the offsets to gather the answer text in the original context.
context = example["context"]
for pred in predictions:
offsets = pred.pop("offsets")
pred["text"] = context[offsets[0] : offsets[1]]
# In the very rare edge case we have not a single non-null prediction, we create a fake prediction to avoid
# failure.
if len(predictions) == 0 or (len(predictions) == 1 and predictions[0]["text"] == ""):
predictions.insert(0, {"text": "empty", "start_logit": 0.0, "end_logit": 0.0, "score": 0.0})
# Compute the softmax of all scores (we do it with numpy to stay independent from torch/tf in this file, using
# the LogSumExp trick).
scores = np.array([pred.pop("score") for pred in predictions])
exp_scores = np.exp(scores - np.max(scores))
probs = exp_scores / exp_scores.sum()
# Include the probabilities in our predictions.
for prob, pred in zip(probs, predictions):
pred["probability"] = prob
# Pick the best prediction. If the null answer is not possible, this is easy.
if not version_2_with_negative:
all_predictions[example["id"]] = predictions[0]["text"]
else:
# Otherwise we first need to find the best non-empty prediction.
i = 0
while predictions[i]["text"] == "":
i += 1
best_non_null_pred = predictions[i]
# Then we compare to the null prediction using the threshold.
score_diff = null_score - best_non_null_pred["start_logit"] - best_non_null_pred["end_logit"]
scores_diff_json[example["id"]] = float(score_diff) # To be JSON-serializable.
if score_diff > null_score_diff_threshold:
all_predictions[example["id"]] = ""
else:
all_predictions[example["id"]] = best_non_null_pred["text"]
# Make `predictions` JSON-serializable by casting np.float back to float.
all_nbest_json[example["id"]] = [
{k: (float(v) if isinstance(v, (np.float16, np.float32, np.float64)) else v) for k, v in pred.items()}
for pred in predictions
]
# If we have an output_dir, let's save all those dicts.
if output_dir is not None:
if not os.path.isdir(output_dir):
raise EnvironmentError(f"{output_dir} is not a directory.")
prediction_file = os.path.join(
output_dir, "predictions.json" if prefix is None else f"{prefix}_predictions.json"
)
nbest_file = os.path.join(
output_dir, "nbest_predictions.json" if prefix is None else f"{prefix}_nbest_predictions.json"
)
if version_2_with_negative:
null_odds_file = os.path.join(
output_dir, "null_odds.json" if prefix is None else f"{prefix}_null_odds.json"
)
logger.info(f"Saving predictions to {prediction_file}.")
with open(prediction_file, "w") as writer:
writer.write(json.dumps(all_predictions, indent=4) + "\n")
logger.info(f"Saving nbest_preds to {nbest_file}.")
with open(nbest_file, "w") as writer:
writer.write(json.dumps(all_nbest_json, indent=4) + "\n")
if version_2_with_negative:
logger.info(f"Saving null_odds to {null_odds_file}.")
with open(null_odds_file, "w") as writer:
writer.write(json.dumps(scores_diff_json, indent=4) + "\n")
return all_predictions
|
28,304 |
def abstract_instrument(cls: Type) -> Type:
"""
A class decorator to create an abstract instrument. Abstract
instruments are allowed to have abstract parameters, but
their subclasses do not. This works by replacing the
'add_parameter' and '__init_subclass__' methods of the class
dynamically.
Args:
cls: The class to be decorated
Returns:
The decorated class
"""
def __init_subclass__(sub_cls: Type) -> None:
__init__ = sub_cls.__init__
def __init_new__(self, *args: Any, **kwargs: Any) -> None:
"""
Subclasses of an abstract instrument should check
after initialization whether there still are
abstract parameters. If there are, we should raise
an exception.
"""
__init__(self, *args, **kwargs)
# after the usual initialization...
abstract_parameters = [
parameter.name for parameter in self.parameters.values()
if isinstance(parameter, AbstractParameter)
]
if any(abstract_parameters):
cls_name = sub_cls.__name__
raise AbstractParameterException(
f"Class '{cls_name}' has un-implemented Abstract Parameter(s): " +
", ".join([f"'{name}'" for name in abstract_parameters])
)
sub_cls.__init__ = __init_new__
original_add_parameter = cls.add_parameter
def add_parameter(
self, name: str, parameter_class: type = Parameter,
**kwargs: Any
) -> None:
existing_parameter = self.parameters.get(name, None)
if isinstance(existing_parameter, AbstractParameter):
# For abstract parameters, we define special behavior.
existing_unit = getattr(existing_parameter, "unit", None)
new_unit = kwargs.get("unit", None)
if existing_unit and existing_unit != new_unit:
raise AbstractParameterException(
f"The unit of the parameter '{name}' is '{new_unit}', "
f"which is inconsistent with the unit '{existing_unit}' "
f"specified earlier. This is usually because a driver "
f"is a subclass of a baseclass which defines a parameter "
f"of the same name but with different units"
)
param = parameter_class(name=name, instrument=self, **kwargs)
self.parameters[name] = param
else:
# If it is a parameter other then abstract parameter, call the original
# method
original_add_parameter(
self, name, parameter_class, **kwargs
)
cls.__init_subclass__ = classmethod(__init_subclass__)
cls.add_parameter = add_parameter
return cls
|
def abstract_instrument(cls: Type[InstrumentBase]) -> Type[InstrumentBase]:
"""
A class decorator to create an abstract instrument. Abstract
instruments are allowed to have abstract parameters, but
their subclasses do not. This works by replacing the
'add_parameter' and '__init_subclass__' methods of the class
dynamically.
Args:
cls: The class to be decorated
Returns:
The decorated class
"""
def __init_subclass__(sub_cls: Type) -> None:
__init__ = sub_cls.__init__
def __init_new__(self, *args: Any, **kwargs: Any) -> None:
"""
Subclasses of an abstract instrument should check
after initialization whether there still are
abstract parameters. If there are, we should raise
an exception.
"""
__init__(self, *args, **kwargs)
# after the usual initialization...
abstract_parameters = [
parameter.name for parameter in self.parameters.values()
if isinstance(parameter, AbstractParameter)
]
if any(abstract_parameters):
cls_name = sub_cls.__name__
raise AbstractParameterException(
f"Class '{cls_name}' has un-implemented Abstract Parameter(s): " +
", ".join([f"'{name}'" for name in abstract_parameters])
)
sub_cls.__init__ = __init_new__
original_add_parameter = cls.add_parameter
def add_parameter(
self, name: str, parameter_class: type = Parameter,
**kwargs: Any
) -> None:
existing_parameter = self.parameters.get(name, None)
if isinstance(existing_parameter, AbstractParameter):
# For abstract parameters, we define special behavior.
existing_unit = getattr(existing_parameter, "unit", None)
new_unit = kwargs.get("unit", None)
if existing_unit and existing_unit != new_unit:
raise AbstractParameterException(
f"The unit of the parameter '{name}' is '{new_unit}', "
f"which is inconsistent with the unit '{existing_unit}' "
f"specified earlier. This is usually because a driver "
f"is a subclass of a baseclass which defines a parameter "
f"of the same name but with different units"
)
param = parameter_class(name=name, instrument=self, **kwargs)
self.parameters[name] = param
else:
# If it is a parameter other then abstract parameter, call the original
# method
original_add_parameter(
self, name, parameter_class, **kwargs
)
cls.__init_subclass__ = classmethod(__init_subclass__)
cls.add_parameter = add_parameter
return cls
|
5,657 |
def tmean(a, limits=None, inclusive=(True, True), axis=None):
"""
Compute the trimmed mean.
This function finds the arithmetic mean of given values, ignoring values
outside the given `limits`.
Parameters
----------
a : array_like
Array of values.
limits : None or (lower limit, upper limit), optional
Values in the input array less than the lower limit or greater than the
upper limit will be ignored. When limits is None (default), then all
values are used. Either of the limit values in the tuple can also be
None representing a half-open interval.
inclusive : (bool, bool), optional
A tuple consisting of the (lower flag, upper flag). These flags
determine whether values exactly equal to the lower or upper limits
are included. The default value is (True, True).
axis : int or None, optional
Axis along which to compute test. Default is None.
Returns
-------
tmean : ndarray
Trimmed mean.
See Also
--------
trim_mean : Returns mean after trimming a proportion from both tails.
Examples
--------
>>> from scipy import stats
>>> x = np.arange(20)
>>> stats.tmean(x)
9.5
>>> stats.tmean(x, (3,17))
10.0
"""
a = asarray(a)
a = a.astype(float)
if limits is None:
return np.mean(a, axis)
am = _mask_to_limits(a, limits, inclusive)
amnan = am.filled(fill_value=np.nan)
return np.nanmean(amnan, axis=axis)
|
def tmean(a, limits=None, inclusive=(True, True), axis=None):
"""
Compute the trimmed mean.
This function finds the arithmetic mean of given values, ignoring values
outside the given `limits`.
Parameters
----------
a : array_like
Array of values.
limits : None or (lower limit, upper limit), optional
Values in the input array less than the lower limit or greater than the
upper limit will be ignored. When limits is None (default), then all
values are used. Either of the limit values in the tuple can also be
None representing a half-open interval.
inclusive : (bool, bool), optional
A tuple consisting of the (lower flag, upper flag). These flags
determine whether values exactly equal to the lower or upper limits
are included. The default value is (True, True).
axis : int or None, optional
Axis along which to compute test. Default is None.
Returns
-------
tmean : ndarray
Trimmed mean.
See Also
--------
trim_mean : Returns mean after trimming a proportion from both tails.
Examples
--------
>>> from scipy import stats
>>> x = np.arange(20)
>>> stats.tmean(x)
9.5
>>> stats.tmean(x, (3,17))
10.0
"""
a = asarray(a, dtype=np.float64)
if limits is None:
return np.mean(a, axis)
am = _mask_to_limits(a, limits, inclusive)
amnan = am.filled(fill_value=np.nan)
return np.nanmean(amnan, axis=axis)
|
57,990 |
def get_authentication_value(headers, original_authentication_header):
"""
Handel the case where the authentication header is given under a different header.
This header is represented by the 'original_authentication_header' argument. This can happen when there is an
intermediate server which changes the email and holds the original value of the header in a different header.
For more info, see issue #46364.
Args:
headers: The headers dict argument given by the user
original_authentication_header: The name of a header which holds the original value of the
Authentication-Results header.
Returns:
The suitable authenticator header.
"""
headers = [header for header in headers if isinstance(header, dict)]
header_dict = {str(header.get('name')).lower(): header.get('value') for header in headers}
if original_authentication_header and original_authentication_header in header_dict:
authentication_value = header_dict[original_authentication_header]
else:
authentication_value = header_dict.get('authentication-results')
return authentication_value
|
def get_authentication_value(headers, original_authentication_header):
"""
Handles the case where the authentication header is given under a different header.
This header is represented by the 'original_authentication_header' argument. This can happen when an intermediate server changes the email and holds the original value of the header in a different header.
For more info, see issue #46364.
Args:
headers: The headers dict argument given by the user
original_authentication_header: The name of a header which holds the original value of the
Authentication-Results header.
Returns:
The suitable authenticator header.
"""
headers = [header for header in headers if isinstance(header, dict)]
header_dict = {str(header.get('name')).lower(): header.get('value') for header in headers}
if original_authentication_header and original_authentication_header in header_dict:
authentication_value = header_dict[original_authentication_header]
else:
authentication_value = header_dict.get('authentication-results')
return authentication_value
|
6,377 |
def get_cost_centers_with_children(cost_centers):
if not isinstance(cost_centers, list):
cost_centers = [d.strip() for d in cost_centers.strip().split(',') if d]
all_cost_centers = []
for d in cost_centers:
if frappe.db.exists("Cost Center", d):
lft, rgt = frappe.db.get_value("Cost Center", d, ["lft", "rgt"])
children = frappe.get_all("Cost Center", filters={"lft": [">=", lft], "rgt": ["<=", rgt]})
all_cost_centers += [c.name for c in children]
else:
frappe.throw(_("Cost Center: {0} does not exists".format(d)))
return list(set(all_cost_centers))
|
def get_cost_centers_with_children(cost_centers):
if not isinstance(cost_centers, list):
cost_centers = [d.strip() for d in cost_centers.strip().split(',') if d]
all_cost_centers = []
for d in cost_centers:
if frappe.db.exists("Cost Center", d):
lft, rgt = frappe.db.get_value("Cost Center", d, ["lft", "rgt"])
children = frappe.get_all("Cost Center", filters={"lft": [">=", lft], "rgt": ["<=", rgt]})
all_cost_centers += [c.name for c in children]
else:
frappe.throw(_("Cost Center: {0} does not exist".format(d)))
return list(set(all_cost_centers))
|
31,808 |
def main() -> None:
verify_certificate = not demisto.params().get('insecure', False)
proxy = demisto.params().get('proxy', False)
# How much time before the first fetch to retrieve incidents
first_fetch_timestamp = get_first_time_fetch(demisto.params().get('first_fetch'))
demisto.debug(f'Command being called is {demisto.command()}')
try:
# Initialize Client
client = Client(base_url=BASE_URL, verify=verify_certificate, headers={}, proxy=proxy)
# Run the requested command
if demisto.command() == 'test-module':
return_results(test_module(client, first_fetch_timestamp))
elif demisto.command() == 'fetch-incidents':
# Convert the argument to an int using helper function or set to MAX_INCIDENTS_TO_FETCH
max_results = get_max_fetch(demisto.params().get('max_fetch'))
next_run, incidents = fetch_incidents(
client=client,
max_results=max_results,
last_run=demisto.getLastRun(),
first_fetch_time=first_fetch_timestamp,
incident_types=demisto.params().get('incident_types', DEFAULT_INCIDENT_TYPES)
)
# Set last run and create incidents in XSOAR
demisto.setLastRun(next_run)
demisto.incidents(incidents)
elif demisto.command() == 'radark-incident-get-items':
return_results(incident_get_items_command(client, demisto.args()))
elif demisto.command() == 'radark-email-enrich':
return_results(email_enrich_command(client, demisto.args()))
elif demisto.command() == 'radark-item-handle':
return_results(item_handle_command(client, demisto.args()))
elif demisto.command() == 'radark-item-purchase':
return_results(item_purchase_command(client, demisto.args()))
# Log exceptions and return errors
except Exception as e:
demisto.error(traceback.format_exc()) # print the traceback
return_error(f'Failed to execute {demisto.command()} command.\nError:\n{str(e)}')
|
def main() -> None:
verify_certificate = not demisto.params().get('insecure', False)
proxy = params.get('proxy', False)
# How much time before the first fetch to retrieve incidents
first_fetch_timestamp = get_first_time_fetch(demisto.params().get('first_fetch'))
demisto.debug(f'Command being called is {demisto.command()}')
try:
# Initialize Client
client = Client(base_url=BASE_URL, verify=verify_certificate, headers={}, proxy=proxy)
# Run the requested command
if demisto.command() == 'test-module':
return_results(test_module(client, first_fetch_timestamp))
elif demisto.command() == 'fetch-incidents':
# Convert the argument to an int using helper function or set to MAX_INCIDENTS_TO_FETCH
max_results = get_max_fetch(demisto.params().get('max_fetch'))
next_run, incidents = fetch_incidents(
client=client,
max_results=max_results,
last_run=demisto.getLastRun(),
first_fetch_time=first_fetch_timestamp,
incident_types=demisto.params().get('incident_types', DEFAULT_INCIDENT_TYPES)
)
# Set last run and create incidents in XSOAR
demisto.setLastRun(next_run)
demisto.incidents(incidents)
elif demisto.command() == 'radark-incident-get-items':
return_results(incident_get_items_command(client, demisto.args()))
elif demisto.command() == 'radark-email-enrich':
return_results(email_enrich_command(client, demisto.args()))
elif demisto.command() == 'radark-item-handle':
return_results(item_handle_command(client, demisto.args()))
elif demisto.command() == 'radark-item-purchase':
return_results(item_purchase_command(client, demisto.args()))
# Log exceptions and return errors
except Exception as e:
demisto.error(traceback.format_exc()) # print the traceback
return_error(f'Failed to execute {demisto.command()} command.\nError:\n{str(e)}')
|
32,506 |
def search_message_request(args):
"""
Builds payload for the request of search message command.
Args:
args: arguments given to command.
Returns: the payload to be sent to the API.
"""
search_reason = args.get('search_reason')
from_date = arg_to_datetime(args.get('from_date')).isoformat() if args.get('from_date') else None # type: ignore
to_date = arg_to_datetime(args.get('to_date')).isoformat() if args.get('to_date') else None # type: ignore
message_id = args.get('message_id')
advanced = {
'senderIP': args.get('sender_IP'),
'to': args.get('to'),
'from': args.get('from'),
'subject': args.get('subject'),
'route': args.get('route')
}
advanced_is_none = all(value is None for value in advanced.values())
payload = {'data': [
{
'start': from_date,
'end': to_date,
'searchReason': search_reason
}
]}
if advanced_is_none and message_id is None:
raise Exception('Advanced Track And Trace Options or message ID must be given in order to execute the command.')
elif advanced_is_none:
payload.get('data')[0].update({'messageId': message_id}) # type: ignore
elif message_id is None:
payload.get('data')[0].update({'advancedTrackAndTraceOptions': advanced}) # type: ignore
else:
raise Exception('Only one of message id and advance options can contain value.')
return http_request(method='POST',
api_endpoint='/api/message-finder/search',
payload=payload)
|
def search_message_request(args):
"""
Builds payload for the request of search message command.
Args:
args: arguments given to command.
Returns: the payload to be sent to the API.
"""
search_reason = args.get('search_reason')
from_date = arg_to_datetime(args.get('from_date')).isoformat() if args.get('from_date') else None # type: ignore
to_date = arg_to_datetime(args.get('to_date')).isoformat() if args.get('to_date') else None # type: ignore
message_id = args.get('message_id')
advanced = {
'senderIP': args.get('sender_ip'),
'to': args.get('to'),
'from': args.get('from'),
'subject': args.get('subject'),
'route': args.get('route')
}
advanced_is_none = all(value is None for value in advanced.values())
payload = {'data': [
{
'start': from_date,
'end': to_date,
'searchReason': search_reason
}
]}
if advanced_is_none and message_id is None:
raise Exception('Advanced Track And Trace Options or message ID must be given in order to execute the command.')
elif advanced_is_none:
payload.get('data')[0].update({'messageId': message_id}) # type: ignore
elif message_id is None:
payload.get('data')[0].update({'advancedTrackAndTraceOptions': advanced}) # type: ignore
else:
raise Exception('Only one of message id and advance options can contain value.')
return http_request(method='POST',
api_endpoint='/api/message-finder/search',
payload=payload)
|
40,026 |
def maximum_weight_independent_set(edges: Iterable[Tuple[Variable, Variable]],
nodes: Optional[Iterable[Tuple[Variable, float]]] = None,
*,
strength: Optional[float] = None,
strength_multiplier: float = 2,
) -> BinaryQuadraticModel:
"""Return a binary quadratic model encoding a maximum-weight independent set problem.
Given a graph `G`, an independent set is a set of nodes such that the
subgraph of `G` induced by these nodes contains no edges.
A maximum-weight independent set is the independent set with the highest
total node weight.
Args:
edges: The edges of the graph as an iterable of two-tuples.
nodes: The nodes of the graph as an iterable of two-tuples where the
first element of the tuple is the node label and the second element
is the node weight. Nodes not specified are given a weight of ``1``.
strength: The strength of the quadratic biases. Must be strictly
greater than ``1`` in order to enforce the independent set
constraint. If not given, the strength is determined by the
``strength_multiplier``.
strength_multiplier: The strength of the quadratic biases is given by
the maximum node weight multiplied by ``strength_multiplier``.
Returns:
A binary quadratic model. The binary quadratic model will have
variables and interactions corresponding to ``nodes`` and ``edges``.
Examples:
>>> from dimod.generators import maximum_weight_independent_set
Get a maximum independent set binary quadratic model from a list of
edges and nodes.
>>> maximum_weight_independent_set([(0, 1)], [(0, .25), (1, .5), (2, 1)])
BinaryQuadraticModel({0: -0.25, 1: -0.5, 2: -1.0}, {(1, 0): 2.0}, 0.0, 'BINARY')
Get a maximum independent set binary quadratic model from a
:class:`networkx.Graph`.
>>> import networkx as nx
>>> G = nx.Graph()
>>> G.add_edges_from([(0, 1), (1, 2)])
>>> G.add_nodes_from([0, 2], weight=.25)
>>> G.add_node(1, weight=.5)
>>> maximum_weight_independent_set(G.edges, G.nodes('weight'))
BinaryQuadraticModel({0: -0.25, 1: -0.5, 2: -0.25}, {(1, 0): 1.0, (2, 1): 1.0}, 0.0, 'BINARY')
"""
bqm = independent_set(edges)
objective = BinaryQuadraticModel(vartype=Vartype.BINARY)
objective.add_linear_from((v, 1) for v in bqm.variables)
if nodes is None:
max_weight = 1.
else:
for v, weight in nodes:
objective.set_linear(v, weight)
max_weight = objective.linear.max(default=1)
if strength is None:
bqm *= max_weight*strength_multiplier
bqm -= objective
else:
bqm *= strength
bqm -= objective
bqm.offset = 0 # otherwise subtracting the objective gives -0 offset
return bqm
|
def maximum_weight_independent_set(edges: Iterable[Tuple[Variable, Variable]],
nodes: Optional[Iterable[Tuple[Variable, float]]] = None,
*,
strength: Optional[float] = None,
strength_multiplier: float = 2,
) -> BinaryQuadraticModel:
"""Return a binary quadratic model encoding a maximum-weight independent set problem.
Given a graph `G`, an independent set is a set of nodes such that the
subgraph of `G` induced by these nodes contains no edges.
A maximum-weight independent set is the independent set with the highest
total node weight.
Args:
edges: The edges of the graph as an iterable of two-tuples.
nodes: The nodes of the graph as an iterable of two-tuples where the
first element of the tuple is the node label and the second element
is the node weight. Nodes not specified are given a weight of ``1``.
strength: The strength of the quadratic biases. Must be strictly
greater than ``1`` in order to enforce the independent set
constraint. If not given, the strength is determined by the
``strength_multiplier``.
strength_multiplier: The strength of the quadratic biases is given by
the maximum node weight multiplied by ``strength_multiplier``.
Returns:
A binary quadratic model. The binary quadratic model will have
variables and interactions corresponding to ``nodes`` and ``edges``.
Examples:
>>> from dimod.generators import maximum_weight_independent_set
Get a maximum independent set binary quadratic model from a list of
edges and nodes.
>>> maximum_weight_independent_set([(0, 1)], [(0, .25), (1, .5), (2, 1)])
BinaryQuadraticModel({0: -0.25, 1: -0.5, 2: -1.0}, {(1, 0): 2.0}, 0.0, 'BINARY')
Get a maximum-weight independent set binary quadratic model from a
:class:`networkx.Graph`.
>>> import networkx as nx
>>> G = nx.Graph()
>>> G.add_edges_from([(0, 1), (1, 2)])
>>> G.add_nodes_from([0, 2], weight=.25)
>>> G.add_node(1, weight=.5)
>>> maximum_weight_independent_set(G.edges, G.nodes('weight'))
BinaryQuadraticModel({0: -0.25, 1: -0.5, 2: -0.25}, {(1, 0): 1.0, (2, 1): 1.0}, 0.0, 'BINARY')
"""
bqm = independent_set(edges)
objective = BinaryQuadraticModel(vartype=Vartype.BINARY)
objective.add_linear_from((v, 1) for v in bqm.variables)
if nodes is None:
max_weight = 1.
else:
for v, weight in nodes:
objective.set_linear(v, weight)
max_weight = objective.linear.max(default=1)
if strength is None:
bqm *= max_weight*strength_multiplier
bqm -= objective
else:
bqm *= strength
bqm -= objective
bqm.offset = 0 # otherwise subtracting the objective gives -0 offset
return bqm
|
56,356 |
def call_fixture_func(fixturefunc, request, kwargs):
yieldctx = is_generator(fixturefunc)
if yieldctx:
generator = fixturefunc(**kwargs)
try:
fixture_return_value = next(generator)
except StopIteration:
raise ValueError(
"Fixture {} did not yield a value".format(fixturefunc.__name__)
)
else:
finalizer = functools.partial(
_teardown_yield_fixture, fixturefunc, generator
)
request.addfinalizer(finalizer)
else:
fixture_return_value = fixturefunc(**kwargs)
return fixture_return_value
|
def call_fixture_func(fixturefunc, request, kwargs):
yieldctx = is_generator(fixturefunc)
if yieldctx:
generator = fixturefunc(**kwargs)
try:
fixture_return_value = next(generator)
except StopIteration:
raise ValueError(
"Fixture {} did not yield a value".format(fixturefunc.__name__)
)
finalizer = functools.partial(_teardown_yield_fixture, fixturefunc, generator)
request.addfinalizer(finalizer)
else:
fixture_return_value = fixturefunc(**kwargs)
return fixture_return_value
|
12,953 |
def test_prepare_url():
redirect_url = "https://www.example.com"
params = urlencode({"param1": "abc", "param2": "xyz"})
result = prepare_url(params, redirect_url)
assert result == "https://www.example.com?param1=abc¶m2=xyz"
|
def test_prepare_url():
redirect_url = "https://www.example.com"
params = urlencode({"param1": "abc", "param2": "xyz"})
result = prepare_url(params, redirect_url)
assert result == f"{redirect_url}?param1=abc¶m2=xyz"
|
52,312 |
def define_custom_scopes(scopes):
"""Define custom scopes
Scopes must start with `custom:`.
It is recommended to name custom scopes with a pattern like::
custom:$your-project:$action:$resource
e.g.::
custom:jupyter_server:read:contents
That makes them easy to parse and avoids collisions across projects.
All scopes must have at least a `definition`,
which will be displayed on the oauth authorization page,
and _may_ have a `subscopes` list of other scopes if having one scope
should imply having other, more specific scopes.
Args:
scopes: dict
A dictionary of scope definitions.
The keys are the scopes,
while the values are dictionaries with.
Custom scopes must start with `custom:`
and contain only lowercase ascii letters, numbers, hyphen, underscore, colon, and asterisk (-_:*).
The part after `custom:` must start with a letter or number.
Scopes may not end with a hyphen or colon.
Examples::
define_custom_scopes(
{
"custom:jupyter_server:read:contents": {
"description": "read-only access to files in a Jupyter server",
},
"custom:jupyter_server:read": {
"description": "read-only access to a Jupyter server",
"subscopes": [
"custom:jupyter_server:read:contents",
"custom:jupyter_server:read:kernels",
"...",
},
}
)
"""
for scope, scope_definition in scopes.items():
if scope in scope_definitions and scope_definitions[scope] != scope_definition:
raise ValueError(
f"Cannot redefine scope {scope}={scope_definition}. Already have {scope}={scope_definitions[scope]}"
)
if not _custom_scope_pattern.match(scope):
raise ValueError(
f"Invalid scope: {scope!r}. Scopes must start with 'custom:' and contain only lowercase ascii letters, numbers, hyphen, underscore, colon, and asterisk."
)
if "description" not in scope_definition:
raise ValueError(
f"scope {scope}={scope_definition} missing key 'description'"
)
if "subscopes" in scope_definition:
subscopes = scope_definition["subscopes"]
if not isinstance(subscopes, list) or not all(
isinstance(s, str) for s in subscopes
):
raise ValueError(
f"subscopes must be a list of scope strings, got {subscopes!r}"
)
for subscope in subscopes:
if subscope not in scope_definitions and subscope not in scopes:
raise ValueError(
f"subscope {subscope} in {scope}={scope_definition} not found. All scopes must be defined."
)
extra_keys = set(scope_definition.keys()).difference(
["description", "subscopes"]
)
if extra_keys:
warnings.warn(
f"Ignoring unrecognized key(s) {', '.join(extra_keys)!r} in {scope}={scope_definition}",
UserWarning,
stacklevel=2,
)
app_log.info(f"Defining custom scope {scope}")
# deferred evaluation for debug-logging
app_log.debug("Defining custom scope %s=%s", scope, scope_definition)
scope_definitions[scope] = scope_definition
|
def define_custom_scopes(scopes):
"""Define custom scopes
Scopes must start with `custom:`.
It is recommended to name custom scopes with a pattern like::
custom:$your-project:$action:$resource
e.g.::
custom:jupyter_server:read:contents
That makes them easy to parse and avoids collisions across projects.
All scopes must have at least a `description`,
which will be displayed on the oauth authorization page,
Examples::
define_custom_scopes(
{
"custom:jupyter_server:read:contents": {
"description": "read-only access to files in a Jupyter server",
},
"custom:jupyter_server:read": {
"description": "read-only access to a Jupyter server",
"subscopes": [
"custom:jupyter_server:read:contents",
"custom:jupyter_server:read:kernels",
"...",
},
}
)
"""
for scope, scope_definition in scopes.items():
if scope in scope_definitions and scope_definitions[scope] != scope_definition:
raise ValueError(
f"Cannot redefine scope {scope}={scope_definition}. Already have {scope}={scope_definitions[scope]}"
)
if not _custom_scope_pattern.match(scope):
raise ValueError(
f"Invalid scope: {scope!r}. Scopes must start with 'custom:' and contain only lowercase ascii letters, numbers, hyphen, underscore, colon, and asterisk."
)
if "description" not in scope_definition:
raise ValueError(
f"scope {scope}={scope_definition} missing key 'description'"
)
if "subscopes" in scope_definition:
subscopes = scope_definition["subscopes"]
if not isinstance(subscopes, list) or not all(
isinstance(s, str) for s in subscopes
):
raise ValueError(
f"subscopes must be a list of scope strings, got {subscopes!r}"
)
for subscope in subscopes:
if subscope not in scope_definitions and subscope not in scopes:
raise ValueError(
f"subscope {subscope} in {scope}={scope_definition} not found. All scopes must be defined."
)
extra_keys = set(scope_definition.keys()).difference(
["description", "subscopes"]
)
if extra_keys:
warnings.warn(
f"Ignoring unrecognized key(s) {', '.join(extra_keys)!r} in {scope}={scope_definition}",
UserWarning,
stacklevel=2,
)
app_log.info(f"Defining custom scope {scope}")
# deferred evaluation for debug-logging
app_log.debug("Defining custom scope %s=%s", scope, scope_definition)
scope_definitions[scope] = scope_definition
|
10,517 |
def present(module, dest, regexp, line, insertafter, insertbefore, create,
backup, backrefs, firstmatch):
diff = {'before': '',
'after': '',
'before_header': '%s (content)' % dest,
'after_header': '%s (content)' % dest}
b_dest = to_bytes(dest, errors='surrogate_or_strict')
if not os.path.exists(b_dest):
if not create:
module.fail_json(rc=257, msg='Destination %s does not exist !' % dest)
b_destpath = os.path.dirname(b_dest)
if b_destpath and not os.path.exists(b_destpath) and not module.check_mode:
try:
os.makedirs(b_destpath)
except Exception as e:
module.fail_json(msg='Error creating %s Error description: %s' % (b_destpath, to_native(e)))
b_lines = []
else:
with open(b_dest, 'rb') as f:
b_lines = f.readlines()
if module._diff:
diff['before'] = to_native(b''.join(b_lines))
if regexp is not None:
bre_m = re.compile(to_bytes(regexp, errors='surrogate_or_strict'))
if insertafter not in (None, 'BOF', 'EOF'):
bre_ins = re.compile(to_bytes(insertafter, errors='surrogate_or_strict'))
elif insertbefore not in (None, 'BOF'):
bre_ins = re.compile(to_bytes(insertbefore, errors='surrogate_or_strict'))
else:
bre_ins = None
# index[0] is the line num where regexp has been found
# index[1] is the line num where insertafter/insertbefore has been found
index = [-1, -1]
match = None
exact_line_match = False
b_line = to_bytes(line, errors='surrogate_or_strict')
# The module's doc says
# "If regular expressions are passed to both regexp and
# insertafter, insertafter is only honored if no match for regexp is found."
# Therefore:
# 1. regexp was found -> ignore insertafter, replace the founded line
# 2. regexp was not found -> insert the line after 'insertafter' or 'insertbefore' line
# Given the above:
# 1. First check that there is no match for regexp:
if regexp is not None:
for lineno, b_cur_line in enumerate(b_lines):
match_found = bre_m.search(b_cur_line)
if match_found:
index[0] = lineno
match = match_found
if firstmatch:
break
# 2. When no match found on the previous step,
# parse for searching insertafter/insertbefore:
if not match:
for lineno, b_cur_line in enumerate(b_lines):
if b_line == b_cur_line.rstrip(b'\r\n'):
index[0] = lineno
exact_line_match = True
elif bre_ins is not None and bre_ins.search(b_cur_line):
if insertafter:
# + 1 for the next line
index[1] = lineno + 1
if firstmatch:
break
if insertbefore:
# index[1] for the previous line
index[1] = lineno
if firstmatch:
break
msg = ''
changed = False
b_linesep = to_bytes(os.linesep, errors='surrogate_or_strict')
# Exact line or Regexp matched a line in the file
if index[0] != -1:
if backrefs and match:
b_new_line = match.expand(b_line)
else:
# Don't do backref expansion if not asked.
b_new_line = b_line
if not b_new_line.endswith(b_linesep):
b_new_line += b_linesep
# If no regexp was given and no line match is found anywhere in the file,
# insert the line appropriately if using insertbefore or insertafter
if regexp is None and match is None and not exact_line_match:
# Insert lines
if insertafter and insertafter != 'EOF':
# Ensure there is a line separator after the found string
# at the end of the file.
if b_lines and not b_lines[-1][-1:] in (b'\n', b'\r'):
b_lines[-1] = b_lines[-1] + b_linesep
# If the line to insert after is at the end of the file
# use the appropriate index value.
if len(b_lines) == index[1]:
if b_lines[index[1] - 1].rstrip(b'\r\n') != b_line:
b_lines.append(b_line + b_linesep)
msg = 'line added'
changed = True
elif b_lines[index[1]].rstrip(b'\r\n') != b_line:
b_lines.insert(index[1], b_line + b_linesep)
msg = 'line added'
changed = True
elif insertbefore and insertbefore != 'BOF':
# If the line to insert before is at the beginning of the file
# use the appropriate index value.
if index[1] <= 0:
if b_lines[index[1]].rstrip(b'\r\n') != b_line:
b_lines.insert(index[1], b_line + b_linesep)
msg = 'line added'
changed = True
elif b_lines[index[1] - 1].rstrip(b'\r\n') != b_line:
b_lines.insert(index[1], b_line + b_linesep)
msg = 'line added'
changed = True
elif b_lines[index[0]] != b_new_line:
b_lines[index[0]] = b_new_line
msg = 'line replaced'
changed = True
elif backrefs:
# Do absolutely nothing, since it's not safe generating the line
# without the regexp matching to populate the backrefs.
pass
# Add it to the beginning of the file
elif insertbefore == 'BOF' or insertafter == 'BOF':
b_lines.insert(0, b_line + b_linesep)
msg = 'line added'
changed = True
# Add it to the end of the file if requested or
# if insertafter/insertbefore didn't match anything
# (so default behaviour is to add at the end)
elif insertafter == 'EOF' or index[1] == -1:
# If the file is not empty then ensure there's a newline before the added line
if b_lines and not b_lines[-1][-1:] in (b'\n', b'\r'):
b_lines.append(b_linesep)
b_lines.append(b_line + b_linesep)
msg = 'line added'
changed = True
elif insertafter and index[1] != -1:
# Don't insert the line if it already matches at the index.
# If the line to insert after is at the end of the file use the appropriate index value.
if len(b_lines) == index[1]:
if b_lines[index[1] - 1].rstrip(b'\r\n') != b_line:
b_lines.append(b_line + b_linesep)
msg = 'line added'
changed = True
elif b_line != b_lines[index[1]].rstrip(b'\n\r'):
b_lines.insert(index[1], b_line + b_linesep)
msg = 'line added'
changed = True
# insert matched, but not the regexp
else:
b_lines.insert(index[1], b_line + b_linesep)
msg = 'line added'
changed = True
if module._diff:
diff['after'] = to_native(b''.join(b_lines))
backupdest = ""
if changed and not module.check_mode:
if backup and os.path.exists(b_dest):
backupdest = module.backup_local(dest)
write_changes(module, b_lines, dest)
if module.check_mode and not os.path.exists(b_dest):
module.exit_json(changed=changed, msg=msg, backup=backupdest, diff=diff)
attr_diff = {}
msg, changed = check_file_attrs(module, changed, msg, attr_diff)
attr_diff['before_header'] = '%s (file attributes)' % dest
attr_diff['after_header'] = '%s (file attributes)' % dest
difflist = [diff, attr_diff]
module.exit_json(changed=changed, msg=msg, backup=backupdest, diff=difflist)
|
def present(module, dest, regexp, line, insertafter, insertbefore, create,
backup, backrefs, firstmatch):
diff = {'before': '',
'after': '',
'before_header': '%s (content)' % dest,
'after_header': '%s (content)' % dest}
b_dest = to_bytes(dest, errors='surrogate_or_strict')
if not os.path.exists(b_dest):
if not create:
module.fail_json(rc=257, msg='Destination %s does not exist !' % dest)
b_destpath = os.path.dirname(b_dest)
if b_destpath and not os.path.exists(b_destpath) and not module.check_mode:
try:
os.makedirs(b_destpath)
except Exception as e:
module.fail_json(msg='Error creating %s Error description: %s' % (to_text(b_destpath), to_native(e)))
b_lines = []
else:
with open(b_dest, 'rb') as f:
b_lines = f.readlines()
if module._diff:
diff['before'] = to_native(b''.join(b_lines))
if regexp is not None:
bre_m = re.compile(to_bytes(regexp, errors='surrogate_or_strict'))
if insertafter not in (None, 'BOF', 'EOF'):
bre_ins = re.compile(to_bytes(insertafter, errors='surrogate_or_strict'))
elif insertbefore not in (None, 'BOF'):
bre_ins = re.compile(to_bytes(insertbefore, errors='surrogate_or_strict'))
else:
bre_ins = None
# index[0] is the line num where regexp has been found
# index[1] is the line num where insertafter/insertbefore has been found
index = [-1, -1]
match = None
exact_line_match = False
b_line = to_bytes(line, errors='surrogate_or_strict')
# The module's doc says
# "If regular expressions are passed to both regexp and
# insertafter, insertafter is only honored if no match for regexp is found."
# Therefore:
# 1. regexp was found -> ignore insertafter, replace the founded line
# 2. regexp was not found -> insert the line after 'insertafter' or 'insertbefore' line
# Given the above:
# 1. First check that there is no match for regexp:
if regexp is not None:
for lineno, b_cur_line in enumerate(b_lines):
match_found = bre_m.search(b_cur_line)
if match_found:
index[0] = lineno
match = match_found
if firstmatch:
break
# 2. When no match found on the previous step,
# parse for searching insertafter/insertbefore:
if not match:
for lineno, b_cur_line in enumerate(b_lines):
if b_line == b_cur_line.rstrip(b'\r\n'):
index[0] = lineno
exact_line_match = True
elif bre_ins is not None and bre_ins.search(b_cur_line):
if insertafter:
# + 1 for the next line
index[1] = lineno + 1
if firstmatch:
break
if insertbefore:
# index[1] for the previous line
index[1] = lineno
if firstmatch:
break
msg = ''
changed = False
b_linesep = to_bytes(os.linesep, errors='surrogate_or_strict')
# Exact line or Regexp matched a line in the file
if index[0] != -1:
if backrefs and match:
b_new_line = match.expand(b_line)
else:
# Don't do backref expansion if not asked.
b_new_line = b_line
if not b_new_line.endswith(b_linesep):
b_new_line += b_linesep
# If no regexp was given and no line match is found anywhere in the file,
# insert the line appropriately if using insertbefore or insertafter
if regexp is None and match is None and not exact_line_match:
# Insert lines
if insertafter and insertafter != 'EOF':
# Ensure there is a line separator after the found string
# at the end of the file.
if b_lines and not b_lines[-1][-1:] in (b'\n', b'\r'):
b_lines[-1] = b_lines[-1] + b_linesep
# If the line to insert after is at the end of the file
# use the appropriate index value.
if len(b_lines) == index[1]:
if b_lines[index[1] - 1].rstrip(b'\r\n') != b_line:
b_lines.append(b_line + b_linesep)
msg = 'line added'
changed = True
elif b_lines[index[1]].rstrip(b'\r\n') != b_line:
b_lines.insert(index[1], b_line + b_linesep)
msg = 'line added'
changed = True
elif insertbefore and insertbefore != 'BOF':
# If the line to insert before is at the beginning of the file
# use the appropriate index value.
if index[1] <= 0:
if b_lines[index[1]].rstrip(b'\r\n') != b_line:
b_lines.insert(index[1], b_line + b_linesep)
msg = 'line added'
changed = True
elif b_lines[index[1] - 1].rstrip(b'\r\n') != b_line:
b_lines.insert(index[1], b_line + b_linesep)
msg = 'line added'
changed = True
elif b_lines[index[0]] != b_new_line:
b_lines[index[0]] = b_new_line
msg = 'line replaced'
changed = True
elif backrefs:
# Do absolutely nothing, since it's not safe generating the line
# without the regexp matching to populate the backrefs.
pass
# Add it to the beginning of the file
elif insertbefore == 'BOF' or insertafter == 'BOF':
b_lines.insert(0, b_line + b_linesep)
msg = 'line added'
changed = True
# Add it to the end of the file if requested or
# if insertafter/insertbefore didn't match anything
# (so default behaviour is to add at the end)
elif insertafter == 'EOF' or index[1] == -1:
# If the file is not empty then ensure there's a newline before the added line
if b_lines and not b_lines[-1][-1:] in (b'\n', b'\r'):
b_lines.append(b_linesep)
b_lines.append(b_line + b_linesep)
msg = 'line added'
changed = True
elif insertafter and index[1] != -1:
# Don't insert the line if it already matches at the index.
# If the line to insert after is at the end of the file use the appropriate index value.
if len(b_lines) == index[1]:
if b_lines[index[1] - 1].rstrip(b'\r\n') != b_line:
b_lines.append(b_line + b_linesep)
msg = 'line added'
changed = True
elif b_line != b_lines[index[1]].rstrip(b'\n\r'):
b_lines.insert(index[1], b_line + b_linesep)
msg = 'line added'
changed = True
# insert matched, but not the regexp
else:
b_lines.insert(index[1], b_line + b_linesep)
msg = 'line added'
changed = True
if module._diff:
diff['after'] = to_native(b''.join(b_lines))
backupdest = ""
if changed and not module.check_mode:
if backup and os.path.exists(b_dest):
backupdest = module.backup_local(dest)
write_changes(module, b_lines, dest)
if module.check_mode and not os.path.exists(b_dest):
module.exit_json(changed=changed, msg=msg, backup=backupdest, diff=diff)
attr_diff = {}
msg, changed = check_file_attrs(module, changed, msg, attr_diff)
attr_diff['before_header'] = '%s (file attributes)' % dest
attr_diff['after_header'] = '%s (file attributes)' % dest
difflist = [diff, attr_diff]
module.exit_json(changed=changed, msg=msg, backup=backupdest, diff=difflist)
|
5,809 |
def simpson(y, x=None, dx=1.0, axis=-1, even='avg'):
"""
Integrate y(x) using samples along the given axis and the composite
Simpson's rule. If x is None, spacing of dx is assumed.
If there are an even number of samples, N, then there are an odd
number of intervals (N-1), but Simpson's rule requires an even number
of intervals. The parameter 'even' controls how this is handled.
Parameters
----------
y : array_like
Array to be integrated.
x : array_like, optional
If given, the points at which `y` is sampled.
dx : float, optional
Spacing of integration points along axis of `x`. Only used when
`x` is None. Default is 1.
axis : int, optional
Axis along which to integrate. Default is the last axis.
even : str {'avg', 'first', 'last'}, optional
'avg' : Average two results:1) use the first N-2 intervals with
a trapezoidal rule on the last interval and 2) use the last
N-2 intervals with a trapezoidal rule on the first interval.
'first' : Use Simpson's rule for the first N-2 intervals with
a trapezoidal rule on the last interval.
'last' : Use Simpson's rule for the last N-2 intervals with a
trapezoidal rule on the first interval.
See Also
--------
quad : adaptive quadrature using QUADPACK
romberg : adaptive Romberg quadrature
quadrature : adaptive Gaussian quadrature
fixed_quad : fixed-order Gaussian quadrature
dblquad : double integrals
tplquad : triple integrals
romb : integrators for sampled data
cumulative_trapezoid : cumulative integration for sampled data
ode : ODE integrators
odeint : ODE integrators
Notes
-----
For an odd number of samples that are equally spaced the result is
exact if the function is a polynomial of order 3 or less. If
the samples are not equally spaced, then the result is exact only
if the function is a polynomial of order 2 or less.
Examples
--------
>>> from scipy import integrate
>>> x = np.arange(0, 10)
>>> y = np.arange(0, 10)
>>> integrate.simpson(y, x)
40.5
>>> y = np.power(x, 3)
>>> integrate.simpson(y, x)
1642.5
>>> integrate.quad(lambda x: x**3, 0, 9)[0]
1640.25
>>> integrate.simpson(y, x, even='first')
1644.5
"""
y = np.asarray(y)
nd = len(y.shape)
N = y.shape[axis]
last_dx = dx
first_dx = dx
returnshape = 0
if x is not None:
x = np.asarray(x)
if np.all((x==x[0])):
return 0
if len(x.shape) == 1:
shapex = [1] * nd
shapex[axis] = x.shape[0]
saveshape = x.shape
returnshape = 1
x = x.reshape(tuple(shapex))
elif len(x.shape) != len(y.shape):
raise ValueError("If given, shape of x must be 1-D or the "
"same as y.")
if x.shape[axis] != N:
raise ValueError("If given, length of x along axis must be the "
"same as y.")
if N % 2 == 0:
val = 0.0
result = 0.0
slice1 = (slice(None),)*nd
slice2 = (slice(None),)*nd
if even not in ['avg', 'last', 'first']:
raise ValueError("Parameter 'even' must be "
"'avg', 'last', or 'first'.")
# Compute using Simpson's rule on first intervals
if even in ['avg', 'first']:
slice1 = tupleset(slice1, axis, -1)
slice2 = tupleset(slice2, axis, -2)
if x is not None:
last_dx = x[slice1] - x[slice2]
val += 0.5*last_dx*(y[slice1]+y[slice2])
result = _basic_simpson(y, 0, N-3, x, dx, axis)
# Compute using Simpson's rule on last set of intervals
if even in ['avg', 'last']:
slice1 = tupleset(slice1, axis, 0)
slice2 = tupleset(slice2, axis, 1)
if x is not None:
first_dx = x[tuple(slice2)] - x[tuple(slice1)]
val += 0.5*first_dx*(y[slice2]+y[slice1])
result += _basic_simpson(y, 1, N-2, x, dx, axis)
if even == 'avg':
val /= 2.0
result /= 2.0
result = result + val
else:
result = _basic_simpson(y, 0, N-2, x, dx, axis)
if returnshape:
x = x.reshape(saveshape)
return result
|
def simpson(y, x=None, dx=1.0, axis=-1, even='avg'):
"""
Integrate y(x) using samples along the given axis and the composite
Simpson's rule. If x is None, spacing of dx is assumed.
If there are an even number of samples, N, then there are an odd
number of intervals (N-1), but Simpson's rule requires an even number
of intervals. The parameter 'even' controls how this is handled.
Parameters
----------
y : array_like
Array to be integrated.
x : array_like, optional
If given, the points at which `y` is sampled.
dx : float, optional
Spacing of integration points along axis of `x`. Only used when
`x` is None. Default is 1.
axis : int, optional
Axis along which to integrate. Default is the last axis.
even : str {'avg', 'first', 'last'}, optional
'avg' : Average two results:1) use the first N-2 intervals with
a trapezoidal rule on the last interval and 2) use the last
N-2 intervals with a trapezoidal rule on the first interval.
'first' : Use Simpson's rule for the first N-2 intervals with
a trapezoidal rule on the last interval.
'last' : Use Simpson's rule for the last N-2 intervals with a
trapezoidal rule on the first interval.
See Also
--------
quad : adaptive quadrature using QUADPACK
romberg : adaptive Romberg quadrature
quadrature : adaptive Gaussian quadrature
fixed_quad : fixed-order Gaussian quadrature
dblquad : double integrals
tplquad : triple integrals
romb : integrators for sampled data
cumulative_trapezoid : cumulative integration for sampled data
ode : ODE integrators
odeint : ODE integrators
Notes
-----
For an odd number of samples that are equally spaced the result is
exact if the function is a polynomial of order 3 or less. If
the samples are not equally spaced, then the result is exact only
if the function is a polynomial of order 2 or less.
Examples
--------
>>> from scipy import integrate
>>> x = np.arange(0, 10)
>>> y = np.arange(0, 10)
>>> integrate.simpson(y, x)
40.5
>>> y = np.power(x, 3)
>>> integrate.simpson(y, x)
1642.5
>>> integrate.quad(lambda x: x**3, 0, 9)[0]
1640.25
>>> integrate.simpson(y, x, even='first')
1644.5
"""
y = np.asarray(y)
nd = len(y.shape)
N = y.shape[axis]
last_dx = dx
first_dx = dx
returnshape = 0
if x is not None:
x = np.asarray(x)
if np.all((x == x[0])):
return 0
if len(x.shape) == 1:
shapex = [1] * nd
shapex[axis] = x.shape[0]
saveshape = x.shape
returnshape = 1
x = x.reshape(tuple(shapex))
elif len(x.shape) != len(y.shape):
raise ValueError("If given, shape of x must be 1-D or the "
"same as y.")
if x.shape[axis] != N:
raise ValueError("If given, length of x along axis must be the "
"same as y.")
if N % 2 == 0:
val = 0.0
result = 0.0
slice1 = (slice(None),)*nd
slice2 = (slice(None),)*nd
if even not in ['avg', 'last', 'first']:
raise ValueError("Parameter 'even' must be "
"'avg', 'last', or 'first'.")
# Compute using Simpson's rule on first intervals
if even in ['avg', 'first']:
slice1 = tupleset(slice1, axis, -1)
slice2 = tupleset(slice2, axis, -2)
if x is not None:
last_dx = x[slice1] - x[slice2]
val += 0.5*last_dx*(y[slice1]+y[slice2])
result = _basic_simpson(y, 0, N-3, x, dx, axis)
# Compute using Simpson's rule on last set of intervals
if even in ['avg', 'last']:
slice1 = tupleset(slice1, axis, 0)
slice2 = tupleset(slice2, axis, 1)
if x is not None:
first_dx = x[tuple(slice2)] - x[tuple(slice1)]
val += 0.5*first_dx*(y[slice2]+y[slice1])
result += _basic_simpson(y, 1, N-2, x, dx, axis)
if even == 'avg':
val /= 2.0
result /= 2.0
result = result + val
else:
result = _basic_simpson(y, 0, N-2, x, dx, axis)
if returnshape:
x = x.reshape(saveshape)
return result
|
31,343 |
def main() -> None:
"""main function, parses params and runs command functions
:return:
:rtype:
"""
''' EXECUTION '''
#LOG('command is %s' % (demisto.command(), ))
demisto.debug(f'Command being called is {demisto.command()}')
try:
LOG('Command being called is {command}'.format(command=demisto.command()))
if demisto.command() == 'Picus-GetAccessToken':
getAccessToken()
elif demisto.command() == 'Picus-Vector-Compare': # Makes a comparison of the given vector's results
token = getAccessToken()
demisto.results(vectorCompare(token))
elif demisto.command() == 'Picus-Attack-Result-List': # Returns the list of the attack results\nhave optional parameters for pagination and filtration
token = getAccessToken()
demisto.results(attackResultList(token))
elif demisto.command() == 'Picus-Specific-Threats-Results': # Returns the list of the attack results of a single threat\nhave optional
token = getAccessToken()
demisto.results(specificThreatsResults(token))
elif demisto.command() == 'Picus-Peer-List': # Returns the peer list with current statuses
token = getAccessToken()
demisto.results(peerList(token))
elif demisto.command() == 'Picus-EMail-Peer-List': # Returns the E-Mail peer list with current statuses
token = getAccessToken()
demisto.results(eMailPeerList(token))
elif demisto.command() == 'Picus-Attack-All-Vectors': # Schedules given attack on all possible vectors
token = getAccessToken()
demisto.results(attackAllVectors(token))
elif demisto.command() == 'Picus-Attack-Single': # Schedules a single attack on requested vector
token = getAccessToken()
demisto.results(attackSingle(token))
elif demisto.command() == 'Picus-Trigger-Update': # Triggers the update mechanism manually, returns if the update-command is taken successfully
token = getAccessToken()
demisto.results(triggerUpdate(token))
elif demisto.command() == 'Picus-Version': # Returns the current version and the update time config
token = getAccessToken()
demisto.results(version(token))
elif demisto.command() == 'Picus-Threat-List': # Returns the list of the threats\nhave optional parameters for pagination and filtration
token = getAccessToken()
demisto.results(threatList(token))
elif demisto.command() == 'Picus-Mitigation-List': # Returns the list of the mitigations of threats\nhave optional parameters for pagination and filtration, this route may not be used associated with your license
token = getAccessToken()
demisto.results(mitigationList(token))
elif demisto.command() == 'Picus-Mitre-Matrix': # Returns the mitre matrix metadata\ntakes no parameters
token = getAccessToken()
demisto.results(mitreMatrix(token))
elif demisto.command() == 'Picus-Sigma-Rules-List': # Returns the list of the sigma rules of scenario actions\nhave optional parameters for pagination and filtration, this route may not be used associated with your license
token = getAccessToken()
demisto.results(sigmaRulesList(token))
elif demisto.command() == 'Picus-Vector-List': # Returns the list of the vectors all disabled and enabled ones\nhave optional parameters for pagination
token = getAccessToken()
demisto.results(vectorList(token))
elif demisto.command() == 'test-module':
demisto.results(test_module())
# Log exceptions and return errors
except Exception as e:
demisto.error(traceback.format_exc()) # print the traceback
return_error(f'Failed to execute {demisto.command()} command.\nError:\n{str(e)}')
|
def main() -> None:
"""main function, parses params and runs command functions
:return:
:rtype:
"""
''' EXECUTION '''
#LOG('command is %s' % (demisto.command(), ))
demisto.debug(f'Command being called is {demisto.command()}')
try:
LOG('Command being called is {command}'.format(command=demisto.command()))
if demisto.command() == 'Picus-GetAccessToken':
getAccessToken()
elif demisto.command() == 'picus-vector-compare': # Makes a comparison of the given vector's results
token = getAccessToken()
demisto.results(vectorCompare(token))
elif demisto.command() == 'Picus-Attack-Result-List': # Returns the list of the attack results\nhave optional parameters for pagination and filtration
token = getAccessToken()
demisto.results(attackResultList(token))
elif demisto.command() == 'Picus-Specific-Threats-Results': # Returns the list of the attack results of a single threat\nhave optional
token = getAccessToken()
demisto.results(specificThreatsResults(token))
elif demisto.command() == 'Picus-Peer-List': # Returns the peer list with current statuses
token = getAccessToken()
demisto.results(peerList(token))
elif demisto.command() == 'Picus-EMail-Peer-List': # Returns the E-Mail peer list with current statuses
token = getAccessToken()
demisto.results(eMailPeerList(token))
elif demisto.command() == 'Picus-Attack-All-Vectors': # Schedules given attack on all possible vectors
token = getAccessToken()
demisto.results(attackAllVectors(token))
elif demisto.command() == 'Picus-Attack-Single': # Schedules a single attack on requested vector
token = getAccessToken()
demisto.results(attackSingle(token))
elif demisto.command() == 'Picus-Trigger-Update': # Triggers the update mechanism manually, returns if the update-command is taken successfully
token = getAccessToken()
demisto.results(triggerUpdate(token))
elif demisto.command() == 'Picus-Version': # Returns the current version and the update time config
token = getAccessToken()
demisto.results(version(token))
elif demisto.command() == 'Picus-Threat-List': # Returns the list of the threats\nhave optional parameters for pagination and filtration
token = getAccessToken()
demisto.results(threatList(token))
elif demisto.command() == 'Picus-Mitigation-List': # Returns the list of the mitigations of threats\nhave optional parameters for pagination and filtration, this route may not be used associated with your license
token = getAccessToken()
demisto.results(mitigationList(token))
elif demisto.command() == 'Picus-Mitre-Matrix': # Returns the mitre matrix metadata\ntakes no parameters
token = getAccessToken()
demisto.results(mitreMatrix(token))
elif demisto.command() == 'Picus-Sigma-Rules-List': # Returns the list of the sigma rules of scenario actions\nhave optional parameters for pagination and filtration, this route may not be used associated with your license
token = getAccessToken()
demisto.results(sigmaRulesList(token))
elif demisto.command() == 'Picus-Vector-List': # Returns the list of the vectors all disabled and enabled ones\nhave optional parameters for pagination
token = getAccessToken()
demisto.results(vectorList(token))
elif demisto.command() == 'test-module':
demisto.results(test_module())
# Log exceptions and return errors
except Exception as e:
demisto.error(traceback.format_exc()) # print the traceback
return_error(f'Failed to execute {demisto.command()} command.\nError:\n{str(e)}')
|
42,340 |
def get_role_list(collection=None, playbook_dir=None, **kwargs):
'''
Run an ``ansible-doc`` command to get list of installed collection roles.
Only roles that have an argument specification defined are returned.
.. note:: Version added: 2.2
:param str collection: A fully qualified collection name used to filter the results.
:param str playbook_dir: This parameter is used to sets the relative path to handle playbook adjacent installed roles.
:param str runner_mode: The applicable values are ``pexpect`` and ``subprocess``. Default is set to ``subprocess``.
:param str host_cwd: The host current working directory to be mounted within the container (if enabled) and will be
the work directory within container.
:param dict envvars: Environment variables to be used when running Ansible. Environment variables will also be
read from ``env/envvars`` in ``private_data_dir``
:param dict passwords: A dictionary containing password prompt patterns and response values used when processing output from
Ansible. Passwords will also be read from ``env/passwords`` in ``private_data_dir``.
:param dict settings: A dictionary containing settings values for the ``ansible-runner`` runtime environment. These will also
be read from ``env/settings`` in ``private_data_dir``.
:param str ssh_key: The ssh private key passed to ``ssh-agent`` as part of the ansible-playbook run.
:param bool quiet: Disable all output
:param bool json_mode: Store event data in place of stdout on the console and in the stdout file
:param str artifact_dir: The path to the directory where artifacts should live, this defaults to 'artifacts' under the private data dir
:param str project_dir: The path to the playbook content, this defaults to 'project' within the private data dir
:param int rotate_artifacts: Keep at most n artifact directories, disable with a value of 0 which is the default
:param int timeout: The timeout value in seconds that will be passed to either ``pexpect`` of ``subprocess`` invocation
(based on ``runner_mode`` selected) while executing command. It the timeout is triggered it will force cancel the execution.
:param bool process_isolation: Enable process isolation, using a container engine (e.g. podman).
:param str process_isolation_executable: Process isolation executable or container engine used to isolate execution. (default: podman)
:param str container_image: Container image to use when running an ansible task (default: quay.io/ansible/ansible-runner:devel)
:param list container_volume_mounts: List of bind mounts in the form 'host_dir:/container_dir:labels. (default: None)
:param list container_options: List of container options to pass to execution engine.
:param str container_workdir: The working directory within the container.
:param str fact_cache: A string that will be used as the name for the subdirectory of the fact cache in artifacts directory.
This is only used for 'jsonfile' type fact caches.
:param str fact_cache_type: A string of the type of fact cache to use. Defaults to 'jsonfile'.
:param str private_data_dir: The directory containing all runner metadata needed to invoke the runner
module. Output artifacts will also be stored here for later consumption.
:param str ident: The run identifier for this invocation of Runner. Will be used to create and name
the artifact directory holding the results of the invocation.
:param function event_handler: An optional callback that will be invoked any time an event is received by Runner itself, return True to keep the event
:param function cancel_callback: An optional callback that can inform runner to cancel (returning True) or not (returning False)
:param function finished_callback: An optional callback that will be invoked at shutdown after process cleanup.
:param function status_handler: An optional callback that will be invoked any time the status changes (e.g...started, running, failed, successful, timeout)
:param function artifacts_handler: An optional callback that will be invoked at the end of the run to deal with the artifacts from the run.
:param bool check_job_event_data: Check if job events data is completely generated. If event data is not completely generated and if
value is set to 'True' it will raise 'AnsibleRunnerException' exception, if set to 'False' it log a debug message and continue execution.
Default value is 'False'
:returns: A tuple of response and error string. The response is a python dictionary object
(as returned by ansible-doc JSON output) containing each role found, or an empty dict
if none are found.
'''
event_callback_handler = kwargs.pop('event_handler', None)
status_callback_handler = kwargs.pop('status_handler', None)
artifacts_handler = kwargs.pop('artifacts_handler', None)
cancel_callback = kwargs.pop('cancel_callback', None)
finished_callback = kwargs.pop('finished_callback', None)
rd = DocConfig(**kwargs)
rd.prepare_role_list_command(collection, playbook_dir)
r = Runner(rd,
event_handler=event_callback_handler,
status_handler=status_callback_handler,
artifacts_handler=artifacts_handler,
cancel_callback=cancel_callback,
finished_callback=finished_callback)
r.run()
response = r.stdout.read()
error = r.stderr.read()
if response:
response = json.loads(sanitize_json_response(response))
return response, error
|
def get_role_list(collection=None, playbook_dir=None, **kwargs):
'''
Run an ``ansible-doc`` command to get list of installed collection roles.
Only roles that have an argument specification defined are returned.
.. note:: Version added: 2.2
:param str collection: A fully qualified collection name used to filter the results.
:param str playbook_dir: This parameter is used to sets the relative path to handle playbook adjacent installed roles.
:param str runner_mode: The applicable values are ``pexpect`` and ``subprocess``. Default is set to ``subprocess``.
:param str host_cwd: The host current working directory to be mounted within the container (if enabled) and will be
the work directory within container.
:param dict envvars: Environment variables to be used when running Ansible. Environment variables will also be
read from ``env/envvars`` in ``private_data_dir``
:param dict passwords: A dictionary containing password prompt patterns and response values used when processing output from
Ansible. Passwords will also be read from ``env/passwords`` in ``private_data_dir``.
:param dict settings: A dictionary containing settings values for the ``ansible-runner`` runtime environment. These will also
be read from ``env/settings`` in ``private_data_dir``.
:param str ssh_key: The ssh private key passed to ``ssh-agent`` as part of the ansible-playbook run.
:param bool quiet: Disable all output
:param bool json_mode: Store event data in place of stdout on the console and in the stdout file
:param str artifact_dir: The path to the directory where artifacts should live, this defaults to 'artifacts' under the private data dir
:param str project_dir: The path to the playbook content, this defaults to 'project' within the private data dir
:param int rotate_artifacts: Keep at most n artifact directories, disable with a value of 0 which is the default
:param int timeout: The timeout value in seconds that will be passed to either ``pexpect`` of ``subprocess`` invocation
(based on ``runner_mode`` selected) while executing command. It the timeout is triggered it will force cancel the execution.
:param bool process_isolation: Enable process isolation, using a container engine (e.g. podman).
:param str process_isolation_executable: Process isolation executable or container engine used to isolate execution. (default: podman)
:param str container_image: Container image to use when running an ansible task (default: quay.io/ansible/ansible-runner:devel)
:param list container_volume_mounts: List of bind mounts in the form ``host_dir:/container_dir:labels``. (default: None)
:param list container_options: List of container options to pass to execution engine.
:param str container_workdir: The working directory within the container.
:param str fact_cache: A string that will be used as the name for the subdirectory of the fact cache in artifacts directory.
This is only used for 'jsonfile' type fact caches.
:param str fact_cache_type: A string of the type of fact cache to use. Defaults to 'jsonfile'.
:param str private_data_dir: The directory containing all runner metadata needed to invoke the runner
module. Output artifacts will also be stored here for later consumption.
:param str ident: The run identifier for this invocation of Runner. Will be used to create and name
the artifact directory holding the results of the invocation.
:param function event_handler: An optional callback that will be invoked any time an event is received by Runner itself, return True to keep the event
:param function cancel_callback: An optional callback that can inform runner to cancel (returning True) or not (returning False)
:param function finished_callback: An optional callback that will be invoked at shutdown after process cleanup.
:param function status_handler: An optional callback that will be invoked any time the status changes (e.g...started, running, failed, successful, timeout)
:param function artifacts_handler: An optional callback that will be invoked at the end of the run to deal with the artifacts from the run.
:param bool check_job_event_data: Check if job events data is completely generated. If event data is not completely generated and if
value is set to 'True' it will raise 'AnsibleRunnerException' exception, if set to 'False' it log a debug message and continue execution.
Default value is 'False'
:returns: A tuple of response and error string. The response is a python dictionary object
(as returned by ansible-doc JSON output) containing each role found, or an empty dict
if none are found.
'''
event_callback_handler = kwargs.pop('event_handler', None)
status_callback_handler = kwargs.pop('status_handler', None)
artifacts_handler = kwargs.pop('artifacts_handler', None)
cancel_callback = kwargs.pop('cancel_callback', None)
finished_callback = kwargs.pop('finished_callback', None)
rd = DocConfig(**kwargs)
rd.prepare_role_list_command(collection, playbook_dir)
r = Runner(rd,
event_handler=event_callback_handler,
status_handler=status_callback_handler,
artifacts_handler=artifacts_handler,
cancel_callback=cancel_callback,
finished_callback=finished_callback)
r.run()
response = r.stdout.read()
error = r.stderr.read()
if response:
response = json.loads(sanitize_json_response(response))
return response, error
|
8,670 |
def load_settings(options):
"""Load Sopel settings using the command line's ``options``.
:param options: parsed arguments
:return: sopel configuration
:rtype: :class:`sopel.config.Config`
:raise sopel.config.NotFound: raised when configuration file is not found
:raise sopel.config.ConfigurationError: raised when configuration is
invalid
This function load Sopel settings from one of these sources:
* value of ``options.config``, if given,
* or the ``default`` configuration is loaded,
then loads the settings and returns it as a :class:`~sopel.config.Config`
object.
If the configuration file can not be found, a :exc:`sopel.config.NotFound`
error will be raised.
.. note::
To use this function effectively, the
:func:`sopel.cli.utils.add_config_arguments` function should be used to
add the proper option to the argument parser.
"""
name = options.config or 'default'
filename = find_config(config.DEFAULT_HOMEDIR, name)
if not os.path.isfile(filename):
raise config.NotFound(filename=filename)
return config.Config(filename)
|
def load_settings(options):
"""Load Sopel settings using the command line's ``options``.
:param options: parsed arguments
:return: sopel configuration
:rtype: :class:`sopel.config.Config`
:raise sopel.config.NotFound: raised when configuration file is not found
:raise sopel.config.ConfigurationError: raised when configuration is
invalid
This function loads Sopel's settings from one of these sources:
* value of ``options.config``, if given,
* or the ``default`` configuration is loaded,
then loads the settings and returns it as a :class:`~sopel.config.Config`
object.
If the configuration file can not be found, a :exc:`sopel.config.NotFound`
error will be raised.
.. note::
To use this function effectively, the
:func:`sopel.cli.utils.add_config_arguments` function should be used to
add the proper option to the argument parser.
"""
name = options.config or 'default'
filename = find_config(config.DEFAULT_HOMEDIR, name)
if not os.path.isfile(filename):
raise config.NotFound(filename=filename)
return config.Config(filename)
|
20,087 |
def _dict_matches(d, **kwargs):
# Check if `d` dictionary matches `**kwargs`, e.g. if `d` is
# {'a': 1, 'b': 2}, then _dict_matches(d, a=1) returns True, and
# _dict_matches(d, c=3) returns False. Nested matching is supported, e.g.:
# for d = {'a': {'q': 0, 'w': 1}, 'b': 'foo'}, _dict_matches(d, a__w==1)
# will return True.
for k, v in kwargs.items():
try:
head, tail = k.split('__', 1)
except ValueError:
head, tail = k, None
if tail and isinstance(d[head], dict):
if not _dict_matches(d[head], **{tail: v}):
return False
elif v != d[k]:
return False
return True
|
def _dict_matches(d, **kwargs):
# Check if `d` dictionary matches `**kwargs`, e.g. if `d` is
# {'a': 1, 'b': 2}, then _dict_matches(d, a=1) returns True, and
# _dict_matches(d, c=3) returns False. Nested matching is supported, e.g.:
# for d = {'a': {'q': 0, 'w': 1}, 'b': 'foo'}, _dict_matches(d, a__w==1)
# will return True.
for k, v in kwargs.items():
head, _, tail = k.partition('__')
if tail and isinstance(d[head], dict):
if not _dict_matches(d[head], **{tail: v}):
return False
elif v != d[k]:
return False
return True
|
4,688 |
def save_diff_image(expected, actual, output):
'''
Parameters
----------
expected : str
File path of expected image.
actual : str
File path of actual image.
output : str
File path to save difference image to.
'''
# Drop alpha channels, similarly to compare_images.
expected_image = _png.read_png(expected)[..., :3]
actual_image = _png.read_png(actual)[..., :3]
actual_image, expected_image = crop_to_same(
actual, actual_image, expected, expected_image)
expected_image = np.array(expected_image).astype(float)
actual_image = np.array(actual_image).astype(float)
if expected_image.shape != actual_image.shape:
raise _imageComparisonFailure(
"_image sizes do not match expected size: {} "
"actual size {}".format(expected_image.shape, actual_image.shape))
abs_diff_image = np.abs(expected_image - actual_image)
# expand differences in luminance domain
abs_diff_image *= 255 * 10
save_image_np = np.clip(abs_diff_image, 0, 255).astype(np.uint8)
height, width, depth = save_image_np.shape
# The PDF renderer doesn't produce an alpha channel, but the
# matplotlib PNG writer requires one, so expand the array
if depth == 3:
with_alpha = np.empty((height, width, 4), dtype=np.uint8)
with_alpha[:, :, 0:3] = save_image_np
save_image_np = with_alpha
# Hard-code the alpha channel to fully solid
save_image_np[:, :, 3] = 255
_png.write_png(save_image_np, output)
|
def save_diff_image(expected, actual, output):
'''
Parameters
----------
expected : str
File path of expected image.
actual : str
File path of actual image.
output : str
File path to save difference image to.
'''
# Drop alpha channels, similarly to compare_images.
expected_image = _png.read_png(expected)[..., :3]
actual_image = _png.read_png(actual)[..., :3]
actual_image, expected_image = crop_to_same(
actual, actual_image, expected, expected_image)
expected_image = np.array(expected_image).astype(float)
actual_image = np.array(actual_image).astype(float)
if expected_image.shape != actual_image.shape:
raise _imageComparisonFailure(
"Image sizes do not match expected size: {} "
"actual size {}".format(expected_image.shape, actual_image.shape))
abs_diff_image = np.abs(expected_image - actual_image)
# expand differences in luminance domain
abs_diff_image *= 255 * 10
save_image_np = np.clip(abs_diff_image, 0, 255).astype(np.uint8)
height, width, depth = save_image_np.shape
# The PDF renderer doesn't produce an alpha channel, but the
# matplotlib PNG writer requires one, so expand the array
if depth == 3:
with_alpha = np.empty((height, width, 4), dtype=np.uint8)
with_alpha[:, :, 0:3] = save_image_np
save_image_np = with_alpha
# Hard-code the alpha channel to fully solid
save_image_np[:, :, 3] = 255
_png.write_png(save_image_np, output)
|
22,860 |
def up(threedDvec):
x = threedDvec[0]
y = threedDvec[1]
z = threedDvec[2]
return x*w1 + y*w2 + z*w3 + w0
|
def up(threedDvec):
x, y, z = threedDvec
return x*w1 + y*w2 + z*w3 + w0
|
34,311 |
def add_no_plot_param(
parser: argparse.ArgumentParser, default: bool = False, required: bool = False,
):
parser.add_argument(
"--no-plot",
action="store_true",
default=default,
help=f"Don't render plots of confusion matrix and histogram",
required=required,
)
|
def add_no_plot_param(
parser: argparse.ArgumentParser, default: bool = False, required: bool = False,
) -> None:
parser.add_argument(
"--no-plot",
action="store_true",
default=default,
help=f"Don't render plots of confusion matrix and histogram",
required=required,
)
|
32,350 |
def test_get_analysis_iocs_command_success(requests_mock):
# Arrange
analysis_id = 'analysis_id'
_setup_access_token(requests_mock)
requests_mock.get(
f'{full_url}/analyses/{analysis_id}',
json={
'result': {
'analysis_id': analysis_id,
'sub_verdict': 'trusted',
'sha256': 'sha256',
'verdict': 'trusted',
'analysis_url': 'bla'
},
'status': 'succeeded'
}
)
requests_mock.get(
f'{full_url}/analyses/{analysis_id}/iocs',
json={
'result': {
'files': [
{
'path': 'test_file_1.csv',
'sha256': 'eeb1199f7db006e4d20086171cc312cf5bdf53682cc37997223ad0c15a27dc88',
'verdict': 'malicious',
'family': 'Turla',
'type': 'Main file',
},
{
'path': 'TMP/example_file',
'sha256': '5712d70b05e8dc39bc1b60e264a262f57b4aae42f0ce3cc6c80be6198155baba',
'verdict': 'unknown',
'family': None,
'type': 'Extracted file',
}
],
'network': [
{
'ioc': '185.555.111.133',
'source': [
'Network communication'
],
'type': 'ip'
},
{
'ioc': 'raw.exampledomain.com',
'source': [
'Network communication'
],
'type': 'domain'
},
{
'ioc': '185.199.444.133',
'source': [
'Network communication'
],
'type': 'ip'
}
]
}
}
)
args = dict(analysis_id=analysis_id)
# Act
command_results = get_analysis_iocs_command(intezer_api, args)
# Assert
outputs = command_results.outputs['Intezer.Analysis(obj.ID == val.ID)']
assert outputs['ID'] == analysis_id
assert 'IOCs' in outputs
|
def test_get_analysis_iocs_command_success(requests_mock):
# Arrange
analysis_id = 'analysis_id'
_setup_access_token(requests_mock)
requests_mock.get(
f'{full_url}/analyses/{analysis_id}',
json={
'result': {
'analysis_id': analysis_id,
'sub_verdict': 'trusted',
'sha256': 'sha256',
'verdict': 'trusted',
'analysis_url': 'bla'
},
'status': 'succeeded'
}
)
requests_mock.get(
f'{full_url}/analyses/{analysis_id}/iocs',
json={
'result': {
'files': [
{
'path': 'test_file_1.csv',
'sha256': 'eeb1199f7db006e4d20086171cc312cf5bdf53682cc37997223ad0c15a27dc88',
'verdict': 'malicious',
'family': 'Turla',
'type': 'Main file',
},
{
'path': 'TMP/example_file',
'sha256': '5712d70b05e8dc39bc1b60e264a262f57b4aae42f0ce3cc6c80be6198155baba',
'verdict': 'unknown',
'family': None,
'type': 'Extracted file',
}
],
'network': [
{
'ioc': '185.555.111.133',
'source': [
'Network communication'
],
'type': 'ip'
},
{
'ioc': 'raw.exampledomain.com',
'source': [
'Network communication'
],
'type': 'domain'
},
{
'ioc': '185.199.444.133',
'source': [
'Network communication'
],
'type': 'ip'
}
]
}
}
)
args = dict(analysis_id=analysis_id)
# Act
command_results = get_analysis_iocs_command(intezer_api, args)
# Assert
outputs = command_results.outputs['Intezer.Analysis(obj.ID == val.ID)']
assert outputs.get('ID') == analysis_id
assert 'IOCs' in outputs
|
49,003 |
def test_dag_import():
"""Test that the DAG file can be successfully imported.
This tests that the DAG can be parsed, but does not run it in an Airflow
environment. This is a recommended confidence check by the official Airflow
docs: https://airflow.incubator.apache.org/tutorial.html#testing
"""
import data_analytics_dag_expansion as module
internal_unit_testing.assert_has_valid_dag(module)
|
def test_dag_import():
"""Test that the DAG file can be successfully imported.
This tests that the DAG can be parsed, but does not run it in an Airflow
environment. This is a recommended confidence check by the official Airflow
docs: https://airflow.incubator.apache.org/tutorial.html#testing
"""
from . import data_analytics_dag_expansion
internal_unit_testing.assert_has_valid_dag(data_analytics_dag_expansion)
|
31,718 |
def list_to_headers_and_lines(list_data, list_separator):
lines_and_headers = [line.split(list_separator) for line in list_data.split('\n')]
headers = lines_and_headers[0]
return headers, lines_and_headers[1:]
|
def list_to_headers_and_lines(list_data, list_separator: str):
lines_and_headers = [line.split(list_separator) for line in list_data.split('\n')]
headers = lines_and_headers[0]
return headers, lines_and_headers[1:]
|
9,314 |
def main():
"""
:return: token
"""
# define the available arguments/parameters that a user can pass to
# the module
# the AnsibleModule object will be our abstraction working with Ansible
# this includes instantiation, a couple of common attr would be the
# args/params passed to the execution, as well as if the module
# supports check mode
module = AnsibleModule(
argument_spec=dict(
iap_port=dict(type='str', required=True),
iap_fqdn=dict(type='str', required=True),
username=dict(type='str', required=True),
password=dict(type='str', required=True),
https=(dict(type='bool', default=False))
)
)
get_token(module)
|
def main():
"""
:return: token
"""
# define the available arguments/parameters that a user can pass to
# the module
# the AnsibleModule object will be our abstraction working with Ansible
# this includes instantiation, a couple of common attr would be the
# args/params passed to the execution, as well as if the module
# supports check mode
module = AnsibleModule(
argument_spec=dict(
iap_port=dict(type='str', required=True),
iap_fqdn=dict(type='str', required=True),
username=dict(type='str', required=True),
password=dict(type='str', required=True, no_log=True),
https=(dict(type='bool', default=False))
)
)
get_token(module)
|
32,447 |
def main() -> None: # pragma: no cover
params = demisto.params()
url = params.get('url')
api_version = params.get('api_version')
token = demisto.params().get('credentials', {}).get('password')
base_url = urljoin(url, f'/api/{api_version}/')
verify_certificate = not demisto.params().get('insecure', False)
proxy = demisto.params().get('proxy', False)
first_fetch = params.get('first_fetch')
max_fetch = params.get('max_fetch')
vendor, product = params.get('vendor', 'netskope'), params.get('product', 'netskope')
demisto.debug(f'Command being called is {demisto.command()}')
try:
client = Client(base_url, token, api_version, verify_certificate, proxy)
last_run = demisto.getLastRun()
if 'alert' not in last_run and 'application' not in last_run and 'audit' not in last_run \
and 'network' not in last_run:
last_run = arg_to_seconds_timestamp(first_fetch)
last_run = {
'alert': last_run,
'application': last_run,
'audit': last_run,
'network': last_run
}
if demisto.command() == 'test-module':
# This is the call made when pressing the integration Test button.
result = test_module(client, api_version, last_run)
return_results(result)
elif demisto.command() == 'netskope-get-events':
if api_version == 'v1':
return_results(v1_get_events_command(client, demisto.args(), last_run))
else:
return_results(v2_get_events_command(client, demisto.args(), last_run))
elif demisto.command() == 'fetch-events':
if api_version == 'v1':
events = client.get_events_request_v1(last_run, max_fetch)
alerts = client.v1_get_alerts_request(last_run, max_fetch)
if alerts:
events.extend(alerts)
demisto.setLastRun(get_last_run(events, last_run))
demisto.debug(f'Setting the last_run to: {last_run}')
send_events_to_xsiam(events=events, vendor=vendor, product=product)
else:
events = client.get_events_request_v2(last_run, max_fetch)
demisto.setLastRun(get_last_run(events, last_run))
demisto.debug(f'Setting the last_run to: {last_run}')
send_events_to_xsiam(events=events, vendor=vendor, product=product)
# Log exceptions and return errors
except Exception as e:
return_error(f'Failed to execute {demisto.command()} command.\nError:\n{str(e)}')
|
def main() -> None: # pragma: no cover
params = demisto.params()
url = params.get('url')
api_version = params.get('api_version')
token = demisto.params().get('credentials', {}).get('password')
base_url = urljoin(url, f'/api/{api_version}/')
verify_certificate = not params.get('insecure', False)
proxy = demisto.params().get('proxy', False)
first_fetch = params.get('first_fetch')
max_fetch = params.get('max_fetch')
vendor, product = params.get('vendor', 'netskope'), params.get('product', 'netskope')
demisto.debug(f'Command being called is {demisto.command()}')
try:
client = Client(base_url, token, api_version, verify_certificate, proxy)
last_run = demisto.getLastRun()
if 'alert' not in last_run and 'application' not in last_run and 'audit' not in last_run \
and 'network' not in last_run:
last_run = arg_to_seconds_timestamp(first_fetch)
last_run = {
'alert': last_run,
'application': last_run,
'audit': last_run,
'network': last_run
}
if demisto.command() == 'test-module':
# This is the call made when pressing the integration Test button.
result = test_module(client, api_version, last_run)
return_results(result)
elif demisto.command() == 'netskope-get-events':
if api_version == 'v1':
return_results(v1_get_events_command(client, demisto.args(), last_run))
else:
return_results(v2_get_events_command(client, demisto.args(), last_run))
elif demisto.command() == 'fetch-events':
if api_version == 'v1':
events = client.get_events_request_v1(last_run, max_fetch)
alerts = client.v1_get_alerts_request(last_run, max_fetch)
if alerts:
events.extend(alerts)
demisto.setLastRun(get_last_run(events, last_run))
demisto.debug(f'Setting the last_run to: {last_run}')
send_events_to_xsiam(events=events, vendor=vendor, product=product)
else:
events = client.get_events_request_v2(last_run, max_fetch)
demisto.setLastRun(get_last_run(events, last_run))
demisto.debug(f'Setting the last_run to: {last_run}')
send_events_to_xsiam(events=events, vendor=vendor, product=product)
# Log exceptions and return errors
except Exception as e:
return_error(f'Failed to execute {demisto.command()} command.\nError:\n{str(e)}')
|
55,380 |
def _build_image(image_name, entrypoint, mlflow_home=None, custom_setup_steps_hook=None,
python_only=False):
"""
Build an MLflow Docker image that can be used to serve a
The image is built locally and it requires Docker to run.
:param image_name: Docker image name.
:param entry_point: String containing ENTRYPOINT directive for docker image
:param mlflow_home: (Optional) Path to a local copy of the MLflow GitHub repository.
If specified, the image will install MLflow from this directory.
If None, it will install MLflow from pip.
:param custom_setup_steps_hook: (Optional) Single-argument function that takes the string path
of a dockerfile context directory and returns a string containing Dockerfile commands to
run during the image build step.
:param python_only: To build docker image for python flavor only.
"""
mlflow_home = os.path.abspath(mlflow_home) if mlflow_home else None
with TempDir() as tmp:
cwd = tmp.path()
install_mlflow = _get_mlflow_install_step(cwd, mlflow_home, python_only)
custom_setup_steps = custom_setup_steps_hook(cwd) \
if custom_setup_steps_hook else ""
if python_only:
with open(os.path.join(cwd, "Dockerfile"), "w") as f:
f.write(_DOCKERFILE_PYTHON_TEMPLATE.format(
install_mlflow=install_mlflow, custom_setup_steps=custom_setup_steps,
entrypoint=entrypoint))
else:
with open(os.path.join(cwd, "Dockerfile"), "w") as f:
f.write(_DOCKERFILE_TEMPLATE.format(
install_mlflow=install_mlflow, custom_setup_steps=custom_setup_steps,
entrypoint=entrypoint))
_logger.info("Building docker image with name %s", image_name)
os.system('find {cwd}/'.format(cwd=cwd))
proc = Popen(["docker", "build", "-t", image_name, "-f", "Dockerfile", "."],
cwd=cwd,
stdout=PIPE,
stderr=STDOUT,
universal_newlines=True)
for x in iter(proc.stdout.readline, ""):
eprint(x, end='')
|
def _build_image(image_name, entrypoint, mlflow_home=None, custom_setup_steps_hook=None,
python_only=False):
"""
Build an MLflow Docker image that can be used to serve a
The image is built locally and it requires Docker to run.
:param image_name: Docker image name.
:param entry_point: String containing ENTRYPOINT directive for docker image
:param mlflow_home: (Optional) Path to a local copy of the MLflow GitHub repository.
If specified, the image will install MLflow from this directory.
If None, it will install MLflow from pip.
:param custom_setup_steps_hook: (Optional) Single-argument function that takes the string path
of a dockerfile context directory and returns a string containing Dockerfile commands to
run during the image build step.
:param python_only: To build docker image for python flavor only.
"""
mlflow_home = os.path.abspath(mlflow_home) if mlflow_home else None
with TempDir() as tmp:
cwd = tmp.path()
install_mlflow = _get_mlflow_install_step(cwd, mlflow_home, python_only)
custom_setup_steps = custom_setup_steps_hook(cwd) \
if custom_setup_steps_hook else ""
dockerfile_template = _DOCKERFILE_PYTHON_TEMPLATE if python_only else _DOCKERFILE_TEMPLATE
with open(os.path.join(cwd, "Dockerfile"), "w") as f:
f.write(
dockerfile_template.format(
install_mlflow=install_mlflow,
custom_setup_steps=custom_setup_steps,
entrypoint=entrypoint,
)
)
_logger.info("Building docker image with name %s", image_name)
os.system('find {cwd}/'.format(cwd=cwd))
proc = Popen(["docker", "build", "-t", image_name, "-f", "Dockerfile", "."],
cwd=cwd,
stdout=PIPE,
stderr=STDOUT,
universal_newlines=True)
for x in iter(proc.stdout.readline, ""):
eprint(x, end='')
|
30,830 |
def get_security_profiles_command():
"""
Get information about profiles.
"""
security_profile = demisto.args().get('security_profile')
if security_profile:
xpath = f'{XPATH_RULEBASE}profiles/{security_profile}'
else:
xpath = f'{XPATH_RULEBASE}profiles'
result = get_security_profile(xpath)
if security_profile:
security_profiles = result.get('response', {}).get('result', {})
else:
security_profiles = result.get('response', {}).get('result', {}).get('profiles', {})
if '@dirtyId' in security_profiles:
LOG(f'Found uncommitted item:\n{security_profiles}')
raise Exception('Please commit the instance prior to getting the security profiles.')
human_readable = ''
content: List[Dict[str, Any]] = []
context = {}
if 'spyware' in security_profiles:
profiles = security_profiles.get('spyware').get('entry', {})
if isinstance(profiles, list):
for profile in profiles:
rules = profile.get('rules', {}).get('entry', [])
spyware_rules = prettify_profiles_rules(rules)
content.append({
'Name': profile['@name'],
'Rules': spyware_rules
})
else:
rules = profiles.get('rules', {}).get('entry', [])
spyware_rules = prettify_profiles_rules(rules)
content = {
'Name': profiles['@name'],
'Rules': spyware_rules
}
human_readable = tableToMarkdown('Anti Spyware Profiles', content)
context.update({"Panorama.Spyware(val.Name == obj.Name)": content})
if 'virus' in security_profiles:
profiles = security_profiles.get('virus').get('entry', [])
if isinstance(profiles, list):
for profile in profiles:
rules = profile.get('decoder', {}).get('entry', [])
antivirus_rules = prettify_profiles_rules(rules)
content.append({
'Name': profile['@name'],
'Decoder': antivirus_rules
})
else:
rules = profiles.get('decoder', {}).get('entry', [])
antivirus_rules = prettify_profiles_rules(rules)
content = {
'Name': profiles['@name'],
'Rules': antivirus_rules
}
human_readable += tableToMarkdown('Antivirus Profiles', content)
context.update({"Panorama.Antivirus(val.Name == obj.Name)": content})
if 'file-blocking' in security_profiles:
profiles = security_profiles.get('file-blocking').get('entry', {})
if isinstance(profiles, list):
for profile in profiles:
rules = profile.get('rules', {}).get('entry', [])
file_blocking_rules = prettify_profiles_rules(rules)
content.append({
'Name': profile['@name'],
'Rules': file_blocking_rules
})
else:
rules = profiles.get('rules', {}).get('entry', [])
file_blocking_rules = prettify_profiles_rules(rules)
content = {
'Name': profiles['@name'],
'Rules': file_blocking_rules
}
human_readable += tableToMarkdown('File Blocking Profiles', content)
context.update({"Panorama.FileBlocking(val.Name == obj.Name)": content})
if 'vulnerability' in security_profiles:
profiles = security_profiles.get('vulnerability').get('entry', {})
if isinstance(profiles, list):
for profile in profiles:
rules = profile.get('rules', {}).get('entry', [])
vulnerability_rules = prettify_profiles_rules(rules)
content.append({
'Name': profile['@name'],
'Rules': vulnerability_rules
})
else:
rules = profiles.get('rules', {}).get('entry', [])
vulnerability_rules = prettify_profiles_rules(rules)
content = {
'Name': profiles['@name'],
'Rules': vulnerability_rules
}
human_readable += tableToMarkdown('vulnerability Protection Profiles', content)
context.update({"Panorama.Vulnerability(val.Name == obj.Name)": content})
if 'data-filtering' in security_profiles:
profiles = security_profiles.get('data-filtering').get('entry', {})
if isinstance(profiles, list):
for profile in profiles:
rules = profile.get('rules', {}).get('entry', [])
data_filtering_rules = prettify_data_filtering_rules(rules)
content.append({
'Name': profile['@name'],
'Rules': data_filtering_rules
})
else:
rules = profiles.get('rules', {}).get('entry', [])
data_filtering_rules = prettify_data_filtering_rules(rules)
content = {
'Name': profiles['@name'],
'Rules': data_filtering_rules
}
human_readable += tableToMarkdown('Data Filtering Profiles', content)
context.update({"Panorama.DataFiltering(val.Name == obj.Name)": content})
if 'url-filtering' in security_profiles:
profiles = security_profiles.get('url-filtering').get('entry', [])
if isinstance(profiles, list):
for profile in profiles:
url_filtering_rules = prettify_get_url_filter(profile)
content.append({
'Name': profile['@name'],
'Rules': url_filtering_rules
})
else:
url_filtering_rules = prettify_get_url_filter(profiles)
content = {
'Name': profiles['@name'],
'Rules': url_filtering_rules
}
human_readable += tableToMarkdown('URL Filtering Profiles', content)
context.update({"Panorama.URLFilter(val.Name == obj.Name)": content})
if 'wildfire-analysis' in security_profiles:
profiles = security_profiles.get('wildfire-analysis').get('entry', [])
if isinstance(profiles, list):
for profile in profiles:
rules = profile.get('rules', {}).get('entry', [])
wildfire_rules = prettify_wildfire_rules(rules)
content.append({
'Name': profile['@name'],
'Rules': wildfire_rules
})
else:
rules = profiles.get('rules', {}).get('entry', [])
wildfire_rules = prettify_wildfire_rules(rules)
content = {
'Name': profiles['@name'],
'Rules': wildfire_rules
}
human_readable += tableToMarkdown('WildFire Profiles', content)
context.update({"Panorama.WildFire(val.Name == obj.Name)": content})
demisto.results({
'Type': entryTypes['note'],
'ContentsFormat': formats['json'],
'Contents': result,
'ReadableContentsFormat': formats['markdown'],
'HumanReadable': human_readable,
'EntryContext': context
})
|
def get_security_profiles_command():
"""
Get information about profiles.
"""
security_profile = demisto.args().get('security_profile')
if security_profile:
xpath = f'{XPATH_RULEBASE}profiles/{security_profile}'
else:
xpath = f'{XPATH_RULEBASE}profiles'
result = get_security_profile(xpath)
if security_profile:
security_profiles = result.get('response', {}).get('result', {})
else:
security_profiles = result.get('response', {}).get('result', {}).get('profiles', {})
if '@dirtyId' in security_profiles:
LOG(f'Found uncommitted item:\n{security_profiles}')
raise Exception('Please commit the instance prior to getting the security profiles.')
human_readable = ''
content: List[Dict[str, Any]] = []
context = {}
if 'spyware' in security_profiles:
profiles = security_profiles.get('spyware').get('entry', {})
if isinstance(profiles, list):
for profile in profiles:
rules = profile.get('rules', {}).get('entry', [])
spyware_rules = prettify_profiles_rules(rules)
content.append({
'Name': profile['@name'],
'Rules': spyware_rules
})
else:
rules = profiles.get('rules', {}).get('entry', [])
spyware_rules = prettify_profiles_rules(rules)
content = {
'Name': profiles['@name'],
'Rules': spyware_rules
}
human_readable = tableToMarkdown('Anti Spyware Profiles', content)
context.update({"Panorama.Spyware(val.Name == obj.Name)": content})
if 'virus' in security_profiles:
profiles = security_profiles.get('virus').get('entry', [])
if isinstance(profiles, list):
for profile in profiles:
rules = profile.get('decoder', {}).get('entry', [])
antivirus_rules = prettify_profiles_rules(rules)
content.append({
'Name': profile['@name'],
'Decoder': antivirus_rules
})
else:
rules = profiles.get('decoder', {}).get('entry', [])
antivirus_rules = prettify_profiles_rules(rules)
content = {
'Name': profiles['@name'],
'Rules': antivirus_rules
}
human_readable += tableToMarkdown('Antivirus Profiles', content)
context.update({"Panorama.Antivirus(val.Name == obj.Name)": content})
if 'file-blocking' in security_profiles:
profiles = security_profiles.get('file-blocking').get('entry', {})
if isinstance(profiles, list):
for profile in profiles:
rules = profile.get('rules', {}).get('entry', [])
file_blocking_rules = prettify_profiles_rules(rules)
content.append({
'Name': profile['@name'],
'Rules': file_blocking_rules
})
else:
rules = profiles.get('rules', {}).get('entry', [])
file_blocking_rules = prettify_profiles_rules(rules)
content = {
'Name': profiles['@name'],
'Rules': file_blocking_rules
}
human_readable += tableToMarkdown('File Blocking Profiles', content)
context.update({"Panorama.FileBlocking(val.Name == obj.Name)": content})
if 'vulnerability' in security_profiles:
profiles = security_profiles.get('vulnerability').get('entry', {})
if isinstance(profiles, list):
for profile in profiles:
rules = profile.get('rules', {}).get('entry', [])
vulnerability_rules = prettify_profiles_rules(rules)
content.append({
'Name': profile['@name'],
'Rules': vulnerability_rules
})
else:
rules = profiles.get('rules', {}).get('entry', [])
vulnerability_rules = prettify_profiles_rules(rules)
content = {
'Name': profiles['@name'],
'Rules': vulnerability_rules
}
human_readable += tableToMarkdown('Vulnerability Protection Profiles', content)
context.update({"Panorama.Vulnerability(val.Name == obj.Name)": content})
if 'data-filtering' in security_profiles:
profiles = security_profiles.get('data-filtering').get('entry', {})
if isinstance(profiles, list):
for profile in profiles:
rules = profile.get('rules', {}).get('entry', [])
data_filtering_rules = prettify_data_filtering_rules(rules)
content.append({
'Name': profile['@name'],
'Rules': data_filtering_rules
})
else:
rules = profiles.get('rules', {}).get('entry', [])
data_filtering_rules = prettify_data_filtering_rules(rules)
content = {
'Name': profiles['@name'],
'Rules': data_filtering_rules
}
human_readable += tableToMarkdown('Data Filtering Profiles', content)
context.update({"Panorama.DataFiltering(val.Name == obj.Name)": content})
if 'url-filtering' in security_profiles:
profiles = security_profiles.get('url-filtering').get('entry', [])
if isinstance(profiles, list):
for profile in profiles:
url_filtering_rules = prettify_get_url_filter(profile)
content.append({
'Name': profile['@name'],
'Rules': url_filtering_rules
})
else:
url_filtering_rules = prettify_get_url_filter(profiles)
content = {
'Name': profiles['@name'],
'Rules': url_filtering_rules
}
human_readable += tableToMarkdown('URL Filtering Profiles', content)
context.update({"Panorama.URLFilter(val.Name == obj.Name)": content})
if 'wildfire-analysis' in security_profiles:
profiles = security_profiles.get('wildfire-analysis').get('entry', [])
if isinstance(profiles, list):
for profile in profiles:
rules = profile.get('rules', {}).get('entry', [])
wildfire_rules = prettify_wildfire_rules(rules)
content.append({
'Name': profile['@name'],
'Rules': wildfire_rules
})
else:
rules = profiles.get('rules', {}).get('entry', [])
wildfire_rules = prettify_wildfire_rules(rules)
content = {
'Name': profiles['@name'],
'Rules': wildfire_rules
}
human_readable += tableToMarkdown('WildFire Profiles', content)
context.update({"Panorama.WildFire(val.Name == obj.Name)": content})
demisto.results({
'Type': entryTypes['note'],
'ContentsFormat': formats['json'],
'Contents': result,
'ReadableContentsFormat': formats['markdown'],
'HumanReadable': human_readable,
'EntryContext': context
})
|
45,897 |
def transform_bbox(trans_mat: torch.Tensor, boxes: torch.Tensor, mode: str = "xyxy") -> torch.Tensor:
r"""Function that applies a transformation matrix to a box or batch of boxes. Boxes must
be a tensor of the shape (N, 4) or a batch of boxes (B, N, 4) and trans_mat must be a (3, 3)
transformation matrix or a batch of transformation matrices (B, 3, 3)
Args:
trans_mat: The transformation matrix to be applied
boxes: The boxes to be transformed
mode: The format in which the boxes are provided. If set to 'xyxy' the boxes are assumed to be in the format
``xmin, ymin, xmax, ymax``. If set to 'xywh' the boxes are assumed to be in the format
``xmin, ymin, width, height``
Returns:
The set of transformed points in the specified mode
"""
if not isinstance(mode, str):
raise TypeError(f"Mode must be a string. Got {type(mode)}")
if mode not in ("xyxy", "xywh"):
raise ValueError(f"Mode must be one of 'xyxy', 'xywh'. Got {mode}")
# convert boxes to format xyxy
if mode == "xywh":
boxes[..., -2] = boxes[..., 0] + boxes[..., -2] # x + w
boxes[..., -1] = boxes[..., 1] + boxes[..., -1] # y + h
transformed_boxes: torch.Tensor = \
kornia.transform_points(trans_mat.view(-1, 3, 3), boxes.view(boxes.shape[0], -1, 2))
transformed_boxes = transformed_boxes.view_as(boxes)
if mode == 'xywh':
transformed_boxes[..., 2] = torch.abs(transformed_boxes[..., 2] - transformed_boxes[..., 0])
transformed_boxes[..., 3] = torch.abs(transformed_boxes[..., 3] - transformed_boxes[..., 1])
transformed_boxes[..., 0] = torch.min(transformed_boxes[..., 2], transformed_boxes[..., 0])
transformed_boxes[..., 1] = torch.min(transformed_boxes[..., 3], transformed_boxes[..., 1])
return transformed_boxes
|
def transform_bbox(trans_mat: torch.Tensor, boxes: torch.Tensor, mode: str = "xyxy") -> torch.Tensor:
r"""Function that applies a transformation matrix to a box or batch of boxes. Boxes must
be a tensor of the shape (N, 4) or a batch of boxes (B, N, 4) and trans_mat must be a (3, 3)
transformation matrix or a batch of transformation matrices (B, 3, 3)
Args:
trans_mat: The transformation matrix to be applied
boxes: The boxes to be transformed
mode: The format in which the boxes are provided. If set to 'xyxy' the boxes are assumed to be in the format
``xmin, ymin, xmax, ymax``. If set to 'xywh' the boxes are assumed to be in the format
``xmin, ymin, width, height``
Returns:
The set of transformed points in the specified mode
"""
if not isinstance(mode, str):
raise TypeError(f"Mode must be a string. Got {type(mode)}")
if mode not in ("xyxy", "xywh"):
raise ValueError(f"Mode must be one of 'xyxy', 'xywh'. Got {mode}")
# convert boxes to format xyxy
if mode == "xywh":
boxes[..., -2] = boxes[..., 0] + boxes[..., -2] # x + w
boxes[..., -1] = boxes[..., 1] + boxes[..., -1] # y + h
transformed_boxes: torch.Tensor = \
kornia.geometry.transform_points(trans_mat.view(-1, 3, 3), boxes.view(boxes.shape[0], -1, 2))
transformed_boxes = transformed_boxes.view_as(boxes)
if mode == 'xywh':
transformed_boxes[..., 2] = torch.abs(transformed_boxes[..., 2] - transformed_boxes[..., 0])
transformed_boxes[..., 3] = torch.abs(transformed_boxes[..., 3] - transformed_boxes[..., 1])
transformed_boxes[..., 0] = torch.min(transformed_boxes[..., 2], transformed_boxes[..., 0])
transformed_boxes[..., 1] = torch.min(transformed_boxes[..., 3], transformed_boxes[..., 1])
return transformed_boxes
|
34,168 |
def add_subparser(
subparsers: argparse._SubParsersAction, parents: List[argparse.ArgumentParser]
):
import rasa.nlu.convert as convert
data_parser = subparsers.add_parser(
"data",
conflict_handler="resolve",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
parents=parents,
help="Utils for the Rasa training files.",
)
data_parser.set_defaults(func=lambda _: data_parser.print_help(None))
data_subparsers = data_parser.add_subparsers()
convert_parser = data_subparsers.add_parser(
"convert",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
parents=parents,
help="Converts Rasa data between different formats.",
)
convert_parser.set_defaults(func=lambda _: convert_parser.print_help(None))
convert_subparsers = convert_parser.add_subparsers()
convert_nlu_parser = convert_subparsers.add_parser(
"nlu",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
parents=parents,
help="Converts NLU data between markdown and json.",
)
convert_nlu_parser.set_defaults(func=convert.main)
arguments.set_convert_arguments(convert_nlu_parser)
split_parser = data_subparsers.add_parser(
"split",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
parents=parents,
help="Splits Rasa data in training and test data.",
)
split_parser.set_defaults(func=lambda _: split_parser.print_help(None))
split_subparsers = split_parser.add_subparsers()
nlu_split_parser = split_subparsers.add_parser(
"nlu",
parents=parents,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
help="Performs a split of your NLU data according to the specified "
"percentages.",
)
nlu_split_parser.set_defaults(func=split_nlu_data)
arguments.set_split_arguments(nlu_split_parser)
|
def add_subparser(
subparsers: argparse._SubParsersAction, parents: List[argparse.ArgumentParser]
):
import rasa.nlu.convert as convert
data_parser = subparsers.add_parser(
"data",
conflict_handler="resolve",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
parents=parents,
help="Utils for the Rasa training files.",
)
data_parser.set_defaults(func=lambda _: data_parser.print_help(None))
data_subparsers = data_parser.add_subparsers()
convert_parser = data_subparsers.add_parser(
"convert",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
parents=parents,
help="Converts Rasa data between different formats.",
)
convert_parser.set_defaults(func=lambda _: convert_parser.print_help(None))
convert_subparsers = convert_parser.add_subparsers()
convert_nlu_parser = convert_subparsers.add_parser(
"nlu",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
parents=parents,
help="Converts NLU data between markdown and json.",
)
convert_nlu_parser.set_defaults(func=convert.main)
arguments.set_convert_arguments(convert_nlu_parser)
split_parser = data_subparsers.add_parser(
"split",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
parents=parents,
help="Splits Rasa data in training and test data.",
)
split_parser.set_defaults(func=lambda _: split_parser.print_help(None))
split_subparsers = split_parser.add_subparsers()
nlu_split_parser = split_subparsers.add_parser(
"nlu",
parents=parents,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
help="Performs a split of your NLU data into training and test data "
"percentages.",
)
nlu_split_parser.set_defaults(func=split_nlu_data)
arguments.set_split_arguments(nlu_split_parser)
|
31,707 |
def main():
input_args = demisto.args()
# If user did not provide a lower threshold then split is not needed.
threshold = input_args.get('LowerSimilarityThreshold')
if not threshold:
return
try:
threshold = float(threshold)
except ValueError as e:
raise DemistoException(f'Could not use threshold: {threshold}. Error: {e}')
root_context_path = 'EmailCampaign'
above_threshold_context_path = f'{root_context_path}.{ABOVE_THE_THRESHOLD_ITEMS_CONTEXT_PATH}'
below_threshold_context_path = f'{root_context_path}.{BELOW_THRESHOLD_ITEMS_CONTEXT_PATH}'
context = demisto.get(demisto.context(), f'{root_context_path}.incidents')
# If there are no incident to split
if not context:
return
only_lower_values, only_higher_values = filter_by_threshold(context, threshold)
result = []
result.append(save_to_context(only_lower_values, below_threshold_context_path))
result.append(save_to_context(only_higher_values, above_threshold_context_path, True))
return_results(result)
|
def main():
input_args = demisto.args()
# If user did not provide a lower threshold then split is not needed.
threshold = input_args.get('LowerSimilarityThreshold')
if not threshold:
return
try:
threshold = float(threshold)
except ValueError as e:
raise DemistoException(f'Could not use threshold: {threshold}. Error: {e}')
root_context_path = 'EmailCampaign'
above_threshold_context_path = f'{root_context_path}.{ABOVE_THE_THRESHOLD_ITEMS_CONTEXT_PATH}'
below_threshold_context_path = f'{root_context_path}.{BELOW_THRESHOLD_ITEMS_CONTEXT_PATH}'
context = demisto.get(demisto.context(), f'{above_threshold_context_path}')
# If there are no incident to split
if not context:
return
only_lower_values, only_higher_values = filter_by_threshold(context, threshold)
result = []
result.append(save_to_context(only_lower_values, below_threshold_context_path))
result.append(save_to_context(only_higher_values, above_threshold_context_path, True))
return_results(result)
|
30,627 |
def domain_reputation_command(client: Client, args: Dict[str, Any], default_threshold: int) -> CommandResults:
"""domain command: Returns domain reputation for a list of domains
:type client: ``Client``
:param Client: HelloWorld client to use
:type args: ``Dict[str, Any]``
:param args:
all command arguments, usually passed from ``demisto.args()``.
``args['domain']`` list of domains or a single domain
``args['threshold']`` threshold to determine whether a domain is malicious
:type default_threshold: ``int``
:param default_threshold:
default threshold to determine whether an domain is malicious
if threshold is not specified in the XSOAR arguments
:return:
A ``CommandResults`` object that is then passed to ``return_results``,
that contains Domains
:rtype: ``CommandResults``
"""
# INTEGRATION DEVELOPER TIP
# Reputation commands usually support multiple inputs (i.e. arrays), so
# they can be invoked once in XSOAR. In this case the API supports a single
# IP at a time, so we will cycle this for all the members of the array.
# We use argToList(), implemented in CommonServerPython.py to automatically
# return a list of a single element even if the provided input is a scalar.
domains = argToList(args.get('domain'))
if len(domains) == 0:
raise ValueError('domain(s) not specified')
threshold = int(args.get('threshold', default_threshold))
# Context standard for Domain class
domain_standard_list: List[Common.Domain] = []
domain_data_list: List[Dict[str, Any]] = []
for domain in domains:
domain_data = client.get_domain_reputation(domain)
domain_data['domain'] = domain
# INTEGRATION DEVELOPER TIP
# We want to convert the dates to ISO8601 as
# Cortex XSOAR customers and integrations use this format by default
if 'creation_date' in domain_data:
domain_data['creation_date'] = parse_domain_date(domain_data['creation_date'])
if 'expiration_date' in domain_data:
domain_data['expiration_date'] = parse_domain_date(domain_data['expiration_date'])
if 'updated_date' in domain_data:
domain_data['updated_date'] = parse_domain_date(domain_data['updated_date'])
# HelloWorld score to XSOAR reputation mapping
# See: https://xsoar.pan.dev/docs/integrations/dbot
# We are using Common.DBotScore as macros to simplify
# the mapping.
score = 0
reputation = int(domain_data.get('score', 0))
if reputation == 0:
score = Common.DBotScore.NONE # unknown
elif reputation >= threshold:
score = Common.DBotScore.BAD # bad
elif reputation >= threshold / 2:
score = Common.DBotScore.SUSPICIOUS # suspicious
else:
score = Common.DBotScore.GOOD # good
# INTEGRATION DEVELOPER TIP
# The context is bigger here than other commands, as it consists in 3
# parts: the vendor-specific context (HelloWorld), the standard-context
# (Domain) and the DBotScore.
# More information:
# https://xsoar.pan.dev/docs/integrations/context-and-outputs
# https://xsoar.pan.dev/docs/integrations/context-standards
# https://xsoar.pan.dev/docs/integrations/dbot
# Also check the sample Design Document
dbot_score = Common.DBotScore(
indicator=domain,
integration_name='HelloWorld',
indicator_type='domain',
score=score,
malicious_description=f'Hello World returned reputation {reputation}'
)
# Create the Domain Standard Context structure using Common.Domain and
# add dbot_score to it.
domain_standard_context = Common.Domain(
domain=domain,
creation_date=domain_data.get('creation_date', None),
expiration_date=domain_data.get('expiration_date', None),
updated_date=domain_data.get('updated_date', None),
organization=domain_data.get('org', None),
name_servers=domain_data.get('name_servers', None),
registrant_name=domain_data.get('name', None),
registrant_country=domain_data.get('country', None),
registrar_name=domain_data.get('registrar', None),
dbot_score=dbot_score
)
domain_standard_list.append(domain_standard_context)
domain_data_list.append(domain_data)
# In this case we want to use an custom markdown to specify the table title,
# but otherwise ``CommandResults()`` will call ``tableToMarkdown()``
# automatically
readable_output = tableToMarkdown('Domain List', domain_data_list)
# INTEGRATION DEVELOPER TIP
# The output key will be ``HelloWorld.Domain``, using ``domain`` as the key
# field.
# ``indicators`` is used to provide the context standard (Domain)
return CommandResults(
readable_output=readable_output,
outputs_prefix='HelloWorld.Domain',
outputs_key_field='domain',
outputs=domain_data_list,
indicators=domain_standard_list
)
|
def domain_reputation_command(client: Client, args: Dict[str, Any], default_threshold: int) -> CommandResults:
"""domain command: Returns domain reputation for a list of domains
:type client: ``Client``
:param Client: HelloWorld client to use
:type args: ``Dict[str, Any]``
:param args:
all command arguments, usually passed from ``demisto.args()``.
``args['domain']`` list of domains or a single domain
``args['threshold']`` threshold to determine whether a domain is malicious
:type default_threshold: ``int``
:param default_threshold:
default threshold to determine whether an domain is malicious
if threshold is not specified in the XSOAR arguments
:return:
A ``CommandResults`` object that is then passed to ``return_results``,
that contains Domains
:rtype: ``CommandResults``
"""
# INTEGRATION DEVELOPER TIP
# Reputation commands usually support multiple inputs (i.e. arrays), so
# they can be invoked once in XSOAR. In this case the API supports a single
# IP at a time, so we will cycle this for all the members of the array.
# We use argToList(), implemented in CommonServerPython.py to automatically
# return a list of a single element even if the provided input is a scalar.
domains = argToList(args.get('domain'))
if len(domains) == 0:
raise ValueError('domain(s) not specified')
threshold = int(args.get('threshold', default_threshold))
# Context standard for Domain class
domain_standard_list: List[Common.Domain] = []
domain_data_list: List[Dict[str, Any]] = []
for domain in domains:
domain_data = client.get_domain_reputation(domain)
domain_data['domain'] = domain
# INTEGRATION DEVELOPER TIP
# We want to convert the dates to ISO8601 as
# Cortex XSOAR customers and integrations use this format by default
if 'creation_date' in domain_data:
domain_data['creation_date'] = parse_domain_date(domain_data['creation_date'])
if 'expiration_date' in domain_data:
domain_data['expiration_date'] = parse_domain_date(domain_data['expiration_date'])
if 'updated_date' in domain_data:
domain_data['updated_date'] = parse_domain_date(domain_data['updated_date'])
# HelloWorld score to XSOAR reputation mapping
# See: https://xsoar.pan.dev/docs/integrations/dbot
# We are using Common.DBotScore as macros to simplify
# the mapping.
score = 0
reputation = int(domain_data.get('score', 0))
if reputation == 0:
score = Common.DBotScore.NONE # unknown
elif reputation >= threshold:
score = Common.DBotScore.BAD # bad
elif reputation >= threshold / 2:
score = Common.DBotScore.SUSPICIOUS # suspicious
else:
score = Common.DBotScore.GOOD # good
# INTEGRATION DEVELOPER TIP
# The context is bigger here than other commands, as it consists in 3
# parts: the vendor-specific context (HelloWorld), the standard-context
# (Domain) and the DBotScore.
# More information:
# https://xsoar.pan.dev/docs/integrations/context-and-outputs
# https://xsoar.pan.dev/docs/integrations/context-standards
# https://xsoar.pan.dev/docs/integrations/dbot
# Also check the sample Design Document
dbot_score = Common.DBotScore(
indicator=domain,
integration_name='HelloWorld',
indicator_type=DBotScoreType.DOMAIN,
score=score,
malicious_description=f'Hello World returned reputation {reputation}'
)
# Create the Domain Standard Context structure using Common.Domain and
# add dbot_score to it.
domain_standard_context = Common.Domain(
domain=domain,
creation_date=domain_data.get('creation_date', None),
expiration_date=domain_data.get('expiration_date', None),
updated_date=domain_data.get('updated_date', None),
organization=domain_data.get('org', None),
name_servers=domain_data.get('name_servers', None),
registrant_name=domain_data.get('name', None),
registrant_country=domain_data.get('country', None),
registrar_name=domain_data.get('registrar', None),
dbot_score=dbot_score
)
domain_standard_list.append(domain_standard_context)
domain_data_list.append(domain_data)
# In this case we want to use an custom markdown to specify the table title,
# but otherwise ``CommandResults()`` will call ``tableToMarkdown()``
# automatically
readable_output = tableToMarkdown('Domain List', domain_data_list)
# INTEGRATION DEVELOPER TIP
# The output key will be ``HelloWorld.Domain``, using ``domain`` as the key
# field.
# ``indicators`` is used to provide the context standard (Domain)
return CommandResults(
readable_output=readable_output,
outputs_prefix='HelloWorld.Domain',
outputs_key_field='domain',
outputs=domain_data_list,
indicators=domain_standard_list
)
|
49,519 |
def multidimensional_deconfliction(association_set):
"""Solves the Multidimensional Assignment Problem (MAP)
The assignment problem becomes more complex when time is added as a dimension.
This basic solution finds all the conflicts in an association set and then creates a
matrix of sums of conflicts in seconds, which is then passed to assign2D to solve as a
simple 2D assignment problem. Therefore, each object will only ever be assigned to one other
at any one time. In the case of an association that only partially overlaps, the time range
of the "weaker" one (the one eliminated by assign2D) will be trimmed
until there is no conflict.
Due to the possibility of more than two conflicting associations at the same time,
this algorithm is recursive, but it is not expected many (if any) recursions will be required
for most uses.
Parameters
----------
association_set: The :class:`AssociationSet` to de-conflict
Returns
-------
: :class:`AssociationSet`
The association set without contradictory associations
"""
# Check if there are any conflicts
no_conflicts = True
for assoc1 in association_set:
for assoc2 in association_set:
if conflicts(assoc1, assoc2):
no_conflicts = False
if no_conflicts:
return association_set
objects = list(association_set.object_set)
length = len(objects)
totals = numpy.zeros((length, length)) # Time objects i and j are associated for in total
for association in association_set.associations:
if len(association.objects) != 2:
raise ValueError("Supplied set must only contain pairs of associated objects")
obj_indices = [objects.index(list(association.objects)[0]),
objects.index(list(association.objects)[1])]
totals[obj_indices[0], obj_indices[1]] = association.time_range.duration.total_seconds()
make_symmetric(totals)
totals = numpy.rint(totals).astype(int)
numpy.fill_diagonal(totals, 0) # Don't want to count associations of an object with itself
solved_2d = assign2D(totals, maximize=True)[1]
winning_indices = [] # Pairs that are chosen by assign2D
for i in range(length):
if i != solved_2d[i]:
winning_indices.append([i, solved_2d[i]])
cleaned_set = AssociationSet()
if len(winning_indices) == 0:
raise ValueError("Problem unsolvable using this method")
for winner in winning_indices:
assoc = association_set.associations_including_objects({objects[winner[0]],
objects[winner[1]]})
cleaned_set.add(assoc)
association_set.remove(assoc)
# Recursive step
runners_up = set()
for assoc1 in association_set.associations:
for assoc2 in association_set.associations:
if conflicts(assoc1, assoc2):
runners_up = multidimensional_deconfliction(association_set).associations
# At this point, none of association_set should conflict with one another
for runner_up in runners_up:
for winner in cleaned_set:
if conflicts(runner_up, winner):
runner_up.time_range.minus(winner.time_range)
if runner_up.time_range is not None:
cleaned_set.add(runner_up)
else:
runners_up.remove(runner_up)
return cleaned_set
|
def multidimensional_deconfliction(association_set):
"""Solves the Multidimensional Assignment Problem (MAP)
The assignment problem becomes more complex when time is added as a dimension.
This basic solution finds all the conflicts in an association set and then creates a
matrix of sums of conflicts in seconds, which is then passed to assign2D to solve as a
simple 2D assignment problem. Therefore, each object will only ever be assigned to one other
at any one time. In the case of an association that only partially overlaps, the time range
of the "weaker" one (the one eliminated by assign2D) will be trimmed
until there is no conflict.
Due to the possibility of more than two conflicting associations at the same time,
this algorithm is recursive, but it is not expected many (if any) recursions will be required
for most uses.
Parameters
----------
association_set: The :class:`AssociationSet` to de-conflict
Returns
-------
: :class:`AssociationSet`
The association set without contradictory associations
"""
# Check if there are any conflicts
for assoc1 in association_set:
for assoc2 in association_set:
if conflicts(assoc1, assoc2):
break
else:
continue
break
else:
return association_set
for assoc2 in association_set:
if conflicts(assoc1, assoc2):
no_conflicts = False
if no_conflicts:
return association_set
objects = list(association_set.object_set)
length = len(objects)
totals = numpy.zeros((length, length)) # Time objects i and j are associated for in total
for association in association_set.associations:
if len(association.objects) != 2:
raise ValueError("Supplied set must only contain pairs of associated objects")
obj_indices = [objects.index(list(association.objects)[0]),
objects.index(list(association.objects)[1])]
totals[obj_indices[0], obj_indices[1]] = association.time_range.duration.total_seconds()
make_symmetric(totals)
totals = numpy.rint(totals).astype(int)
numpy.fill_diagonal(totals, 0) # Don't want to count associations of an object with itself
solved_2d = assign2D(totals, maximize=True)[1]
winning_indices = [] # Pairs that are chosen by assign2D
for i in range(length):
if i != solved_2d[i]:
winning_indices.append([i, solved_2d[i]])
cleaned_set = AssociationSet()
if len(winning_indices) == 0:
raise ValueError("Problem unsolvable using this method")
for winner in winning_indices:
assoc = association_set.associations_including_objects({objects[winner[0]],
objects[winner[1]]})
cleaned_set.add(assoc)
association_set.remove(assoc)
# Recursive step
runners_up = set()
for assoc1 in association_set.associations:
for assoc2 in association_set.associations:
if conflicts(assoc1, assoc2):
runners_up = multidimensional_deconfliction(association_set).associations
# At this point, none of association_set should conflict with one another
for runner_up in runners_up:
for winner in cleaned_set:
if conflicts(runner_up, winner):
runner_up.time_range.minus(winner.time_range)
if runner_up.time_range is not None:
cleaned_set.add(runner_up)
else:
runners_up.remove(runner_up)
return cleaned_set
|
45,043 |
def test_log_manager_batches_logs(logger, log_manager, monkeypatch):
monkeypatch.setattr(prefect.utilities.logging, "MAX_BATCH_LOG_SIZE", 1000)
monkeypatch.setattr(prefect.utilities.logging, "MAX_LOG_SIZE", 500)
# Fill up log queue with multiple logs exceeding the total batch length
for i in range(10):
logger.info(str(i) * 50)
time.sleep(0.5)
assert log_manager.queue.empty()
messages = [
l["message"]
for call in log_manager.client.write_run_logs.call_args_list
for l in call[0][0]
]
assert messages == [f"{i}" * 50 for i in range(10)]
assert log_manager.client.write_run_logs.call_count == 5
for upload in log_manager.client.write_run_logs.call_args_list:
log_entries = [log_entry for log_entry in upload[0][0]]
payload = json.dumps(log_entries)
assert len(payload) <= 1000, payload
|
def test_log_manager_batches_logs(logger, log_manager, monkeypatch):
monkeypatch.setattr(prefect.utilities.logging, "MAX_BATCH_LOG_SIZE", 1000)
monkeypatch.setattr(prefect.utilities.logging, "MAX_LOG_SIZE", 500)
# Fill up log queue with multiple logs exceeding the MAX_BATCH_LOG_SIZE
for i in range(10):
logger.info(str(i) * 50)
time.sleep(0.5)
assert log_manager.queue.empty()
messages = [
l["message"]
for call in log_manager.client.write_run_logs.call_args_list
for l in call[0][0]
]
assert messages == [f"{i}" * 50 for i in range(10)]
assert log_manager.client.write_run_logs.call_count == 5
for upload in log_manager.client.write_run_logs.call_args_list:
log_entries = [log_entry for log_entry in upload[0][0]]
payload = json.dumps(log_entries)
assert len(payload) <= 1000, payload
|
3,838 |
def test_all_simple_paths_with_two_targets_in_line():
G = nx.path_graph(4)
paths = nx.all_simple_paths(G, 0, [2, 3])
assert_equal(set(tuple(p) for p in paths), {(0, 1, 2), (0, 1, 2, 3)})
|
def test_all_simple_paths_with_two_targets_in_line_emits_two_paths():
G = nx.path_graph(4)
paths = nx.all_simple_paths(G, 0, [2, 3])
assert_equal(set(tuple(p) for p in paths), {(0, 1, 2), (0, 1, 2, 3)})
|
22,240 |
def persist_uploads(params, trans):
"""
Turn any uploads in the submitted form to persisted files.
"""
if 'files' in params:
new_files = []
for upload_dataset in params['files']:
f = upload_dataset['file_data']
if isinstance(f, cgi_FieldStorage):
assert not isinstance(f.file, StringIO)
assert f.file.name != '<fdopen>'
local_filename = util.mkstemp_ln(f.file.name, 'upload_file_data_')
f.file.close()
upload_dataset['file_data'] = dict(filename=f.filename,
local_filename=local_filename)
elif type(f) == dict and 'local_filename' not in f:
raise Exception('Uploaded file was encoded in a way not understood by Galaxy.')
if 'url_paste' in upload_dataset and upload_dataset['url_paste']:
upload_dataset['url_paste'] = upload_dataset['url_paste'].strip()
if upload_dataset['url_paste']:
upload_dataset['url_paste'] = datatypes.sniff.stream_to_file(
StringIO(validate_url(upload_dataset['url_paste'], trans.app.config.fetch_url_whitelist_ips)),
prefix="strio_url_paste_"
)
else:
upload_dataset['url_paste'] = None
new_files.append(upload_dataset)
params['files'] = new_files
return params
|
def persist_uploads(params, trans):
"""
Turn any uploads in the submitted form to persisted files.
"""
if 'files' in params:
new_files = []
for upload_dataset in params['files']:
f = upload_dataset['file_data']
if isinstance(f, cgi_FieldStorage):
assert not isinstance(f.file, StringIO)
assert f.file.name != '<fdopen>'
local_filename = util.mkstemp_ln(f.file.name, 'upload_file_data_')
f.file.close()
upload_dataset['file_data'] = dict(filename=f.filename,
local_filename=local_filename)
elif type(f) == dict and 'local_filename' not in f:
raise Exception('Uploaded file was encoded in a way not understood by Galaxy.')
if 'url_paste' in upload_dataset:
upload_dataset['url_paste'] = upload_dataset['url_paste'].strip()
if upload_dataset['url_paste']:
upload_dataset['url_paste'] = datatypes.sniff.stream_to_file(
StringIO(validate_url(upload_dataset['url_paste'], trans.app.config.fetch_url_whitelist_ips)),
prefix="strio_url_paste_"
)
else:
upload_dataset['url_paste'] = None
new_files.append(upload_dataset)
params['files'] = new_files
return params
|
32,177 |
def execute_command_seacrh_and_compliance_not_deleted_yet(command, args):
if command == 'o365-sc-get-search' and args:
return [{'Status': 'Completed'}]
elif command == 'o365-sc-list-search-action':
return []
elif command == 'o365-sc-new-search-action':
return None
elif command == 'o365-sc-get-search-action':
return {'Status': 'Starting'}
|
def execute_command_search_and_compliance_not_deleted_yet(command, args):
if command == 'o365-sc-get-search' and args:
return [{'Status': 'Completed'}]
elif command == 'o365-sc-list-search-action':
return []
elif command == 'o365-sc-new-search-action':
return None
elif command == 'o365-sc-get-search-action':
return {'Status': 'Starting'}
|
40,543 |
def get_custom_locations_oid(cmd, cl_oid):
try:
sp_graph_client = get_graph_client_service_principals(cmd.cli_ctx)
sub_filters = []
sub_filters.append("displayName eq '{}'".format("Custom Locations RP"))
result = list(sp_graph_client.list(filter=(' and '.join(sub_filters))))
if len(result) != 0:
if cl_oid is not None and cl_oid != result[0].object_id:
logger.warning("The 'Custom-locations' OID passed is different from the actual OID({}) of the Custom Locations RP app. Proceeding with the correct one...".format(result[0].object_id))
return result[0].object_id # Using the fetched OID
if cl_oid is None:
logger.warning("Unable to fetch oid of 'custom-locations' app. Proceeding without enabling the feature.")
telemetry.set_exception(exception='Unable to fetch oid of custom locations app.', fault_type=consts.Custom_Locations_OID_Fetch_Fault_Type,
summary='Unable to fetch oid for custom locations app.')
return ""
else:
return cl_oid
except Exception as e:
log_string = "Unable to fetch oid of 'custom-locations' app. "
telemetry.set_exception(exception=e, fault_type=consts.Custom_Locations_OID_Fetch_Fault_Type,
summary='Unable to fetch oid for custom locations app.')
if cl_oid:
log_string += "Proceeding with the OID passed to enable the 'custom-locations' feature."
logger.warning(log_string)
return cl_oid
log_string += "Proceeding without enabling the feature. " + str(e)
logger.warning(log_string)
return ""
|
def get_custom_locations_oid(cmd, cl_oid):
try:
sp_graph_client = get_graph_client_service_principals(cmd.cli_ctx)
sub_filters = []
sub_filters.append("displayName eq '{}'".format("Custom Locations RP"))
result = list(sp_graph_client.list(filter=(' and '.join(sub_filters))))
if len(result) != 0:
if cl_oid is not None and cl_oid != result[0].object_id:
logger.warning("The 'Custom-locations' OID passed is different from the actual OID({}) of the Custom Locations RP app. Proceeding with the correct one...".format(result[0].object_id))
return result[0].object_id # Using the fetched OID
if cl_oid is None:
logger.warning("Unable to fetch oid of 'custom-locations' app. Proceeding without enabling the feature.")
telemetry.set_exception(exception='Unable to fetch oid of custom locations app.', fault_type=consts.Custom_Locations_OID_Fetch_Fault_Type,
summary='Unable to fetch oid for custom locations app.')
return ""
else:
return cl_oid
except Exception as e:
log_string = "Unable to fetch oid of 'custom-locations' app. "
telemetry.set_exception(exception=e, fault_type=consts.Custom_Locations_OID_Fetch_Fault_Type,
summary='Unable to fetch oid for custom locations app.')
if cl_oid:
log_string += "Proceeding with the Object ID provided to enable the 'custom-locations' feature."
logger.warning(log_string)
return cl_oid
log_string += "Proceeding without enabling the feature. " + str(e)
logger.warning(log_string)
return ""
|
42,044 |
def test_plot_slice() -> None:
# Test with no trial.
study = prepare_study_with_trials(no_trials=True)
figure = plot_slice(study)
assert len(figure.findobj(PathCollection)) == 0
study = prepare_study_with_trials(with_c_d=False)
# Test with a trial.
figure = plot_slice(study)
assert len(figure) == 2
assert len(figure[0].findobj(PathCollection)) == 1
assert len(figure[1].findobj(PathCollection)) == 1
assert figure[0].yaxis.label.get_text() == "Objective Value"
# Scatter plot data is available as PathCollection
data0 = figure[0].findobj(PathCollection)[0].get_offsets().data
data1 = figure[1].findobj(PathCollection)[0].get_offsets().data
assert np.allclose(data0, [[1.0, 0.0], [2.5, 1.0]])
assert np.allclose(data1, [[2.0, 0.0], [0.0, 2.0], [1.0, 1.0]])
# Test with a trial to select parameter.
figure = plot_slice(study, params=["param_a"])
assert len(figure.findobj(PathCollection)) == 1
assert figure.yaxis.label.get_text() == "Objective Value"
data0 = figure.findobj(PathCollection)[0].get_offsets().data
np.allclose(data0, [[1.0, 2.0], [2.5, 1.0]])
# Test with a customized target value.
with pytest.warns(UserWarning):
figure = plot_slice(study, params=["param_a"], target=lambda t: t.params["param_b"])
assert len(figure.findobj(PathCollection)) == 1
assert figure.yaxis.label.get_text() == "Objective Value"
data0 = figure.findobj(PathCollection)[0].get_offsets().data
np.allclose(data0, [[1.0, 2.0], [2.5, 1.0]])
# Test with a customized target name.
figure = plot_slice(study, target_name="Target Name")
assert len(figure) == 2
assert len(figure[0].get_lines()) == 0
assert len(figure[1].get_lines()) == 0
assert len(figure[0].findobj(PathCollection)) == 1
assert len(figure[1].findobj(PathCollection)) == 1
assert figure[0].yaxis.label.get_text() == "Target Name"
# Test with wrong parameters.
with pytest.raises(ValueError):
plot_slice(study, params=["optuna"])
# Ignore failed trials.
def fail_objective(_: Trial) -> float:
raise ValueError
study = create_study()
study.optimize(fail_objective, n_trials=1, catch=(ValueError,))
figure = plot_slice(study)
assert len(figure.get_lines()) == 0
assert len(figure.findobj(PathCollection)) == 0
|
def test_plot_slice() -> None:
# Test with no trial.
study = prepare_study_with_trials(no_trials=True)
figure = plot_slice(study)
assert len(figure.findobj(PathCollection)) == 0
study = prepare_study_with_trials(with_c_d=False)
# Test with a trial.
figure = plot_slice(study)
assert len(figure) == 2
assert len(figure[0].findobj(PathCollection)) == 1
assert len(figure[1].findobj(PathCollection)) == 1
assert figure[0].yaxis.label.get_text() == "Objective Value"
# Scatter plot data is available as PathCollection
data0 = figure[0].findobj(PathCollection)[0].get_offsets().data
data1 = figure[1].findobj(PathCollection)[0].get_offsets().data
assert np.allclose(data0, [[1.0, 0.0], [2.5, 1.0]])
assert np.allclose(data1, [[2.0, 0.0], [0.0, 2.0], [1.0, 1.0]])
# Test with a trial to select parameter.
figure = plot_slice(study, params=["param_a"])
assert len(figure.findobj(PathCollection)) == 1
assert figure.yaxis.label.get_text() == "Objective Value"
data0 = figure.findobj(PathCollection)[0].get_offsets().data
np.allclose(data0, [[1.0, 2.0], [2.5, 1.0]])
# Test with a customized target value.
with pytest.warns(UserWarning):
figure = plot_slice(study, params=["param_a"], target=lambda t: t.params["param_b"])
assert len(figure.findobj(PathCollection)) == 1
assert figure.yaxis.label.get_text() == "Objective Value"
data0 = figure.findobj(PathCollection)[0].get_offsets().data
assert np.allclose(data0, [[1.0, 2.0], [2.5, 1.0]])
# Test with a customized target name.
figure = plot_slice(study, target_name="Target Name")
assert len(figure) == 2
assert len(figure[0].get_lines()) == 0
assert len(figure[1].get_lines()) == 0
assert len(figure[0].findobj(PathCollection)) == 1
assert len(figure[1].findobj(PathCollection)) == 1
assert figure[0].yaxis.label.get_text() == "Target Name"
# Test with wrong parameters.
with pytest.raises(ValueError):
plot_slice(study, params=["optuna"])
# Ignore failed trials.
def fail_objective(_: Trial) -> float:
raise ValueError
study = create_study()
study.optimize(fail_objective, n_trials=1, catch=(ValueError,))
figure = plot_slice(study)
assert len(figure.get_lines()) == 0
assert len(figure.findobj(PathCollection)) == 0
|
3,419 |
def _transform_single_condition(
condition: Union[Condition, BooleanCondition]
) -> Tuple[Optional[Union[Condition, BooleanCondition]], StatusFilter]:
if isinstance(condition, Condition):
if condition.lhs == Function("ifNull", parameters=[Column("session.status"), ""]):
# HACK: metrics tags are never null. We should really
# write our own parser for this.
condition = replace(condition, lhs=Column("session.status"))
if condition.lhs == Column("session.status"):
if condition.op == Op.EQ:
return None, _parse_session_status(condition.rhs)
if condition.op == Op.NEQ:
return None, ALL_STATUSES - _parse_session_status(condition.rhs)
if condition.op == Op.IN:
return None, frozenset.union(
*[_parse_session_status(status) for status in condition.rhs]
)
if condition.op == Op.NOT_IN:
return None, ALL_STATUSES - frozenset.union(
*[_parse_session_status(status) for status in condition.rhs]
)
raise InvalidParams("Unable to resolve session.status filter")
if "session.status" in str(condition):
# Anything not handled by the code above cannot be parsed for now,
# for two reasons:
# 1) Queries like session.status:healthy OR release:foo are hard to
# translate, because they would require different conditions on the separate
# metric fields.
# 2) AND and OR conditions come in the form `Condition(Function("or", [...]), Op.EQ, 1)`
# where [...] can again contain any condition encoded as a Function. For this, we would
# have to replicate the translation code above.
raise InvalidParams("Unable to parse condition with session.status")
return condition, None
|
def _transform_single_condition(
condition: Union[Condition, BooleanCondition]
) -> Tuple[Optional[Union[Condition, BooleanCondition]], StatusFilter]:
if isinstance(condition, Condition):
if condition.lhs == Function("ifNull", parameters=[Column("session.status"), ""]):
# HACK: metrics tags are never null. We should really
# write our own parser for this.
condition = replace(condition, lhs=Column("session.status"))
elif condition.lhs == Column("session.status"):
if condition.op == Op.EQ:
return None, _parse_session_status(condition.rhs)
if condition.op == Op.NEQ:
return None, ALL_STATUSES - _parse_session_status(condition.rhs)
if condition.op == Op.IN:
return None, frozenset.union(
*[_parse_session_status(status) for status in condition.rhs]
)
if condition.op == Op.NOT_IN:
return None, ALL_STATUSES - frozenset.union(
*[_parse_session_status(status) for status in condition.rhs]
)
raise InvalidParams("Unable to resolve session.status filter")
if "session.status" in str(condition):
# Anything not handled by the code above cannot be parsed for now,
# for two reasons:
# 1) Queries like session.status:healthy OR release:foo are hard to
# translate, because they would require different conditions on the separate
# metric fields.
# 2) AND and OR conditions come in the form `Condition(Function("or", [...]), Op.EQ, 1)`
# where [...] can again contain any condition encoded as a Function. For this, we would
# have to replicate the translation code above.
raise InvalidParams("Unable to parse condition with session.status")
return condition, None
|
39,113 |
def kv_of(event: Dict[str, Any], name_field: str,
value_field: str) -> Tuple[Any, Any]:
"""Enriches main event with values from side input data
Args:
event: Primary event data fetched from pubsub
name_field: Lookup key field name
value_field: Lookup value field name
Returns:
Look up information representing product name and lookup value
"""
return (event[name_field], event[value_field])
|
def kv_of(event: Dict[str, Any], name_field: str,
value_field: str) -> Tuple[Any, Any]:
"""Enriches main event with values from side input data
Args:
event: Primary event data fetched from pubsub
name_field: Lookup key field name
value_field: Lookup value field name
Returns:
Look up information representing product name and lookup value
"""
return (event[name_field], event.get(value_field))
|
13,400 |
def test__m_series_nvme_enclosures(fs):
fake_nvme_enclosure = Mock()
middleware = Mock(
call_sync=Mock(
side_effect=lambda method, *args: {
"system.dmidecode_info": lambda: {"system-product-name": "TRUENAS-M60-HA"},
"enclosure.fake_nvme_enclosure": fake_nvme_enclosure,
}[method](*args)
)
)
fs.create_file("/sys/bus/pci/slots/0-1/address", contents="0000:60:00\n")
with patch("middlewared.plugins.enclosure_.nvme.pyudev") as pyudev:
pyudev.Context = Mock(
return_value=Mock(
list_devices=Mock(
return_value=[
Mock(
attributes={"path": b"\\_SB_.PC03.BR3A"},
sys_path="/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:03/device:c5",
),
],
)
)
)
child = Mock(sys_name="nvme1")
child.parent = Mock(sys_name="0000:60:00.0")
pyudev.Devices = Mock(
from_path=Mock(
side_effect=lambda context, path: {
"/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:03/device:c5/physical_node": Mock(
children=[child]
)
}[path],
)
)
EnclosureService(middleware).map_nvme()\
fake_nvme_enclosure.assert_called_once_with(
"m60_plx_enclosure",
"Rear NVME U.2 Hotswap Bays",
"M60 Series",
4,
{1: "nvme1"},
)
|
def test__m_series_nvme_enclosures(fs):
fake_nvme_enclosure = Mock()
middleware = Mock(
call_sync=Mock(
side_effect=lambda method, *args: {
"system.dmidecode_info": lambda: {"system-product-name": "TRUENAS-M60-HA"},
"enclosure.fake_nvme_enclosure": fake_nvme_enclosure,
}[method](*args)
)
)
fs.create_file("/sys/bus/pci/slots/0-1/address", contents="0000:60:00\n")
with patch("middlewared.plugins.enclosure_.nvme.pyudev") as pyudev:
pyudev.Context = Mock(
return_value=Mock(
list_devices=Mock(
return_value=[
Mock(
attributes={"path": b"\\_SB_.PC03.BR3A"},
sys_path="/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:03/device:c5",
),
],
)
)
)
child = Mock(sys_name="nvme1")
child.parent = Mock(sys_name="0000:60:00.0")
pyudev.Devices = Mock(
from_path=Mock(
side_effect=lambda context, path: {
"/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:03/device:c5/physical_node": Mock(
children=[child]
)
}[path],
)
)
EnclosureService(middleware).map_nvme()
fake_nvme_enclosure.assert_called_once_with(
"m60_plx_enclosure",
"Rear NVME U.2 Hotswap Bays",
"M60 Series",
4,
{1: "nvme1"},
)
|
26,012 |
def elastic_pool_update(
cmd,
instance,
max_capacity=None,
min_capacity=None,
max_size_bytes=None,
zone_redundant=None,
tier=None,
family=None,
capacity=None,
maintenance_configuration_id=None,
high_availability_replica_count=None):
'''
Updates an elastic pool. Custom update function to apply parameters to instance.
'''
#####
# Set sku-related properties
#####
# Update sku
_db_elastic_pool_update_sku(
cmd,
instance,
None, # service_objective
tier,
family,
capacity,
find_sku_from_capabilities_func=_find_elastic_pool_sku_from_capabilities)
#####
# Set other properties
#####
if max_capacity:
instance.per_database_settings.max_capacity = max_capacity
if min_capacity:
instance.per_database_settings.min_capacity = min_capacity
if max_size_bytes:
instance.max_size_bytes = max_size_bytes
if zone_redundant is not None:
instance.zone_redundant = zone_redundant
instance.maintenance_configuration_id = _complete_maintenance_configuration_id(
cmd.cli_ctx,
maintenance_configuration_id)
if high_availability_replica_count:
instance.high_availability_replica_count = high_availability_replica_count
return instance
|
def elastic_pool_update(
cmd,
instance,
max_capacity=None,
min_capacity=None,
max_size_bytes=None,
zone_redundant=None,
tier=None,
family=None,
capacity=None,
maintenance_configuration_id=None,
high_availability_replica_count=None):
'''
Updates an elastic pool. Custom update function to apply parameters to instance.
'''
#####
# Set sku-related properties
#####
# Update sku
_db_elastic_pool_update_sku(
cmd,
instance,
None, # service_objective
tier,
family,
capacity,
find_sku_from_capabilities_func=_find_elastic_pool_sku_from_capabilities)
#####
# Set other properties
#####
if max_capacity:
instance.per_database_settings.max_capacity = max_capacity
if min_capacity:
instance.per_database_settings.min_capacity = min_capacity
if max_size_bytes:
instance.max_size_bytes = max_size_bytes
if zone_redundant is not None:
instance.zone_redundant = zone_redundant
instance.maintenance_configuration_id = _complete_maintenance_configuration_id(
cmd.cli_ctx,
maintenance_configuration_id)
if high_availability_replica_count is not None:
instance.high_availability_replica_count = high_availability_replica_count
return instance
|
32,085 |
def main():
install_logging('Prepare_Content_Packs_For_Testing.log', logger=logging)
option = option_handler()
packs_artifacts_path = option.packs_artifacts_path
id_set = open_id_set_file(option.id_set_path)
extract_destination_path = option.extract_path
storage_bucket_name = option.bucket_name
service_account = option.service_account
target_packs = option.pack_names if option.pack_names else ""
build_number = option.ci_build_number if option.ci_build_number else str(uuid.uuid4())
override_all_packs = option.override_all_packs
signature_key = option.key_string
packs_dependencies_mapping = load_json(option.pack_dependencies) if option.pack_dependencies else {}
storage_base_path = option.storage_base_path
remove_test_playbooks = option.remove_test_playbooks
is_bucket_upload_flow = option.bucket_upload
private_bucket_name = option.private_bucket_name
ci_branch = option.ci_branch
force_upload = option.force_upload
marketplace = option.marketplace
is_create_dependencies_zip = option.create_dependencies_zip
# google cloud storage client initialized
storage_client = init_storage_client(service_account)
storage_bucket = storage_client.bucket(storage_bucket_name)
# Relevant when triggering test upload flow
if storage_bucket_name:
GCPConfig.PRODUCTION_BUCKET = storage_bucket_name
# download and extract index from public bucket
index_folder_path, index_blob, index_generation = download_and_extract_index(storage_bucket,
extract_destination_path,
storage_base_path)
# content repo client initialized
content_repo = get_content_git_client(CONTENT_ROOT_PATH)
current_commit_hash, previous_commit_hash = get_recent_commits_data(content_repo, index_folder_path,
is_bucket_upload_flow, ci_branch)
# detect packs to upload
pack_names = get_packs_names(target_packs, previous_commit_hash)
extract_packs_artifacts(packs_artifacts_path, extract_destination_path)
packs_list = [Pack(pack_name, os.path.join(extract_destination_path, pack_name), marketplace) for pack_name in pack_names
if os.path.exists(os.path.join(extract_destination_path, pack_name))]
diff_files_list = content_repo.commit(current_commit_hash).diff(content_repo.commit(previous_commit_hash))
# taking care of private packs
is_private_content_updated, private_packs, updated_private_packs_ids = handle_private_content(
index_folder_path, private_bucket_name, extract_destination_path, storage_client, pack_names, storage_base_path
)
if not option.override_all_packs:
check_if_index_is_updated(index_folder_path, content_repo, current_commit_hash, previous_commit_hash,
storage_bucket, is_private_content_updated)
# initiate the statistics handler for marketplace packs
statistics_handler = StatisticsHandler(service_account, index_folder_path)
# clean index and gcs from non existing or invalid packs
clean_non_existing_packs(index_folder_path, private_packs, storage_bucket, storage_base_path, id_set, marketplace)
# packs that depends on new packs that are not in the previous index.zip
packs_with_missing_dependencies = []
# starting iteration over packs
for pack in packs_list:
if not prepare_and_zip_pack(pack, signature_key, marketplace, remove_test_playbooks):
continue
task_status = pack.upload_integration_images(storage_bucket, storage_base_path, diff_files_list, True)
if not task_status:
pack.status = PackStatus.FAILED_IMAGES_UPLOAD.name
pack.cleanup()
continue
task_status = pack.upload_author_image(storage_bucket, storage_base_path, diff_files_list, True)
if not task_status:
pack.status = PackStatus.FAILED_AUTHOR_IMAGE_UPLOAD.name
pack.cleanup()
continue
task_status, modified_rn_files_paths, pack_was_modified = pack.detect_modified(
content_repo, index_folder_path, current_commit_hash, previous_commit_hash)
if not task_status:
pack.status = PackStatus.FAILED_DETECTING_MODIFIED_FILES.name
pack.cleanup()
continue
task_status, is_missing_dependencies = pack.format_metadata(index_folder_path,
packs_dependencies_mapping, build_number,
current_commit_hash, pack_was_modified,
statistics_handler, pack_names, id_set, marketplace)
if is_missing_dependencies:
# If the pack is dependent on a new pack, therefore it is not yet in the index.zip as it might not have
# been iterated yet, we will note that it is missing dependencies, and after updating the index.zip with
# all new packs - we will go over the pack again to add what was missing.
# See issue #37290
packs_with_missing_dependencies.append(pack)
if not task_status:
pack.status = PackStatus.FAILED_METADATA_PARSING.name
pack.cleanup()
continue
task_status, not_updated_build = pack.prepare_release_notes(index_folder_path, build_number, pack_was_modified,
modified_rn_files_paths)
if not task_status:
pack.status = PackStatus.FAILED_RELEASE_NOTES.name
pack.cleanup()
continue
if not_updated_build:
pack.status = PackStatus.PACK_IS_NOT_UPDATED_IN_RUNNING_BUILD.name
pack.cleanup()
continue
task_status, skipped_upload, _ = pack.upload_to_storage(pack.zip_path, pack.latest_version, storage_bucket,
override_all_packs or pack_was_modified,
storage_base_path)
if not task_status:
pack.status = PackStatus.FAILED_UPLOADING_PACK.name
pack.cleanup()
continue
task_status, exists_in_index = pack.check_if_exists_in_index(index_folder_path)
if not task_status:
pack.status = PackStatus.FAILED_SEARCHING_PACK_IN_INDEX.name
pack.cleanup()
continue
task_status = pack.prepare_for_index_upload()
if not task_status:
pack.status = PackStatus.FAILED_PREPARING_INDEX_FOLDER.name
pack.cleanup()
continue
task_status = update_index_folder(index_folder_path=index_folder_path, pack_name=pack.name, pack_path=pack.path,
pack_version=pack.latest_version, hidden_pack=pack.hidden)
if not task_status:
pack.status = PackStatus.FAILED_UPDATING_INDEX_FOLDER.name
pack.cleanup()
continue
# in case that pack already exist at cloud storage path and in index, don't show that the pack was changed
if skipped_upload and exists_in_index and pack not in packs_with_missing_dependencies:
pack.status = PackStatus.PACK_ALREADY_EXISTS.name
pack.cleanup()
continue
pack.status = PackStatus.SUCCESS.name
logging.info(f"packs_with_missing_dependencies: {packs_with_missing_dependencies}")
# Going over all packs that were marked as missing dependencies,
# updating them with the new data for the new packs that were added to the index.zip
for pack in packs_with_missing_dependencies:
task_status, _ = pack.format_metadata(index_folder_path, packs_dependencies_mapping,
build_number, current_commit_hash, False, statistics_handler,
pack_names, id_set, marketplace, format_dependencies_only=True)
if not task_status:
pack.status = PackStatus.FAILED_METADATA_REFORMATING.name
pack.cleanup()
continue
task_status = update_index_folder(index_folder_path=index_folder_path, pack_name=pack.name, pack_path=pack.path,
pack_version=pack.latest_version, hidden_pack=pack.hidden)
if not task_status:
pack.status = PackStatus.FAILED_UPDATING_INDEX_FOLDER.name
pack.cleanup()
continue
pack.status = PackStatus.SUCCESS.name
# upload core packs json to bucket
create_corepacks_config(storage_bucket, build_number, index_folder_path,
os.path.dirname(packs_artifacts_path), storage_base_path, marketplace)
# finished iteration over content packs
upload_index_to_storage(index_folder_path=index_folder_path, extract_destination_path=extract_destination_path,
index_blob=index_blob, build_number=build_number, private_packs=private_packs,
current_commit_hash=current_commit_hash, index_generation=index_generation,
force_upload=force_upload, previous_commit_hash=previous_commit_hash,
landing_page_sections=statistics_handler.landing_page_sections,
artifacts_dir=os.path.dirname(packs_artifacts_path),
storage_bucket=storage_bucket,
)
# get the lists of packs divided by their status
successful_packs, skipped_packs, failed_packs = get_packs_summary(packs_list)
# Store successful and failed packs list in CircleCI artifacts - to be used in Upload Packs To Marketplace job
packs_results_file_path = os.path.join(os.path.dirname(packs_artifacts_path), BucketUploadFlow.PACKS_RESULTS_FILE)
store_successful_and_failed_packs_in_ci_artifacts(
packs_results_file_path, BucketUploadFlow.PREPARE_CONTENT_FOR_TESTING, successful_packs, failed_packs,
updated_private_packs_ids, images_data=get_images_data(packs_list)
)
# summary of packs status
print_packs_summary(successful_packs, skipped_packs, failed_packs, not is_bucket_upload_flow)
# marketplace v2 isn't currently supported - dependencies zip should only be used for v1
if is_create_dependencies_zip and marketplace == 'xsoar':
# handle packs with dependencies zip
upload_packs_with_dependencies_zip(extract_destination_path, packs_dependencies_mapping, signature_key,
storage_bucket, storage_base_path, id_set, packs_list, marketplace)
|
def main():
install_logging('Prepare_Content_Packs_For_Testing.log', logger=logging)
option = option_handler()
packs_artifacts_path = option.packs_artifacts_path
id_set = open_id_set_file(option.id_set_path)
extract_destination_path = option.extract_path
storage_bucket_name = option.bucket_name
service_account = option.service_account
target_packs = option.pack_names if option.pack_names else ""
build_number = option.ci_build_number if option.ci_build_number else str(uuid.uuid4())
override_all_packs = option.override_all_packs
signature_key = option.key_string
packs_dependencies_mapping = load_json(option.pack_dependencies) if option.pack_dependencies else {}
storage_base_path = option.storage_base_path
remove_test_playbooks = option.remove_test_playbooks
is_bucket_upload_flow = option.bucket_upload
private_bucket_name = option.private_bucket_name
ci_branch = option.ci_branch
force_upload = option.force_upload
marketplace = option.marketplace
is_create_dependencies_zip = option.create_dependencies_zip
# google cloud storage client initialized
storage_client = init_storage_client(service_account)
storage_bucket = storage_client.bucket(storage_bucket_name)
# Relevant when triggering test upload flow
if storage_bucket_name:
GCPConfig.PRODUCTION_BUCKET = storage_bucket_name
# download and extract index from public bucket
index_folder_path, index_blob, index_generation = download_and_extract_index(storage_bucket,
extract_destination_path,
storage_base_path)
# content repo client initialized
content_repo = get_content_git_client(CONTENT_ROOT_PATH)
current_commit_hash, previous_commit_hash = get_recent_commits_data(content_repo, index_folder_path,
is_bucket_upload_flow, ci_branch)
# detect packs to upload
pack_names = get_packs_names(target_packs, previous_commit_hash)
extract_packs_artifacts(packs_artifacts_path, extract_destination_path)
packs_list = [Pack(pack_name, os.path.join(extract_destination_path, pack_name), marketplace) for pack_name in pack_names
if os.path.exists(os.path.join(extract_destination_path, pack_name))]
diff_files_list = content_repo.commit(current_commit_hash).diff(content_repo.commit(previous_commit_hash))
# taking care of private packs
is_private_content_updated, private_packs, updated_private_packs_ids = handle_private_content(
index_folder_path, private_bucket_name, extract_destination_path, storage_client, pack_names, storage_base_path
)
if not option.override_all_packs:
check_if_index_is_updated(index_folder_path, content_repo, current_commit_hash, previous_commit_hash,
storage_bucket, is_private_content_updated)
# initiate the statistics handler for marketplace packs
statistics_handler = StatisticsHandler(service_account, index_folder_path)
# clean index and gcs from non existing or invalid packs
clean_non_existing_packs(index_folder_path, private_packs, storage_bucket, storage_base_path, id_set, marketplace)
# packs that depends on new packs that are not in the previous index.zip
packs_with_missing_dependencies = []
# starting iteration over packs
for pack in packs_list:
if not prepare_and_zip_pack(pack, signature_key, marketplace, remove_test_playbooks):
continue
task_status = pack.upload_integration_images(storage_bucket, storage_base_path, diff_files_list, True)
if not task_status:
pack.status = PackStatus.FAILED_IMAGES_UPLOAD.name
pack.cleanup()
continue
task_status = pack.upload_author_image(storage_bucket, storage_base_path, diff_files_list, True)
if not task_status:
pack.status = PackStatus.FAILED_AUTHOR_IMAGE_UPLOAD.name
pack.cleanup()
continue
task_status, modified_rn_files_paths, pack_was_modified = pack.detect_modified(
content_repo, index_folder_path, current_commit_hash, previous_commit_hash)
if not task_status:
pack.status = PackStatus.FAILED_DETECTING_MODIFIED_FILES.name
pack.cleanup()
continue
task_status, is_missing_dependencies = pack.format_metadata(index_folder_path,
packs_dependencies_mapping, build_number,
current_commit_hash, pack_was_modified,
statistics_handler, pack_names, id_set, marketplace)
if is_missing_dependencies:
# If the pack is dependent on a new pack, therefore it is not yet in the index.zip as it might not have
# been iterated yet, we will note that it is missing dependencies, and after updating the index.zip with
# all new packs - we will go over the packs again to add what was missing.
# See issue #37290
packs_with_missing_dependencies.append(pack)
if not task_status:
pack.status = PackStatus.FAILED_METADATA_PARSING.name
pack.cleanup()
continue
task_status, not_updated_build = pack.prepare_release_notes(index_folder_path, build_number, pack_was_modified,
modified_rn_files_paths)
if not task_status:
pack.status = PackStatus.FAILED_RELEASE_NOTES.name
pack.cleanup()
continue
if not_updated_build:
pack.status = PackStatus.PACK_IS_NOT_UPDATED_IN_RUNNING_BUILD.name
pack.cleanup()
continue
task_status, skipped_upload, _ = pack.upload_to_storage(pack.zip_path, pack.latest_version, storage_bucket,
override_all_packs or pack_was_modified,
storage_base_path)
if not task_status:
pack.status = PackStatus.FAILED_UPLOADING_PACK.name
pack.cleanup()
continue
task_status, exists_in_index = pack.check_if_exists_in_index(index_folder_path)
if not task_status:
pack.status = PackStatus.FAILED_SEARCHING_PACK_IN_INDEX.name
pack.cleanup()
continue
task_status = pack.prepare_for_index_upload()
if not task_status:
pack.status = PackStatus.FAILED_PREPARING_INDEX_FOLDER.name
pack.cleanup()
continue
task_status = update_index_folder(index_folder_path=index_folder_path, pack_name=pack.name, pack_path=pack.path,
pack_version=pack.latest_version, hidden_pack=pack.hidden)
if not task_status:
pack.status = PackStatus.FAILED_UPDATING_INDEX_FOLDER.name
pack.cleanup()
continue
# in case that pack already exist at cloud storage path and in index, don't show that the pack was changed
if skipped_upload and exists_in_index and pack not in packs_with_missing_dependencies:
pack.status = PackStatus.PACK_ALREADY_EXISTS.name
pack.cleanup()
continue
pack.status = PackStatus.SUCCESS.name
logging.info(f"packs_with_missing_dependencies: {packs_with_missing_dependencies}")
# Going over all packs that were marked as missing dependencies,
# updating them with the new data for the new packs that were added to the index.zip
for pack in packs_with_missing_dependencies:
task_status, _ = pack.format_metadata(index_folder_path, packs_dependencies_mapping,
build_number, current_commit_hash, False, statistics_handler,
pack_names, id_set, marketplace, format_dependencies_only=True)
if not task_status:
pack.status = PackStatus.FAILED_METADATA_REFORMATING.name
pack.cleanup()
continue
task_status = update_index_folder(index_folder_path=index_folder_path, pack_name=pack.name, pack_path=pack.path,
pack_version=pack.latest_version, hidden_pack=pack.hidden)
if not task_status:
pack.status = PackStatus.FAILED_UPDATING_INDEX_FOLDER.name
pack.cleanup()
continue
pack.status = PackStatus.SUCCESS.name
# upload core packs json to bucket
create_corepacks_config(storage_bucket, build_number, index_folder_path,
os.path.dirname(packs_artifacts_path), storage_base_path, marketplace)
# finished iteration over content packs
upload_index_to_storage(index_folder_path=index_folder_path, extract_destination_path=extract_destination_path,
index_blob=index_blob, build_number=build_number, private_packs=private_packs,
current_commit_hash=current_commit_hash, index_generation=index_generation,
force_upload=force_upload, previous_commit_hash=previous_commit_hash,
landing_page_sections=statistics_handler.landing_page_sections,
artifacts_dir=os.path.dirname(packs_artifacts_path),
storage_bucket=storage_bucket,
)
# get the lists of packs divided by their status
successful_packs, skipped_packs, failed_packs = get_packs_summary(packs_list)
# Store successful and failed packs list in CircleCI artifacts - to be used in Upload Packs To Marketplace job
packs_results_file_path = os.path.join(os.path.dirname(packs_artifacts_path), BucketUploadFlow.PACKS_RESULTS_FILE)
store_successful_and_failed_packs_in_ci_artifacts(
packs_results_file_path, BucketUploadFlow.PREPARE_CONTENT_FOR_TESTING, successful_packs, failed_packs,
updated_private_packs_ids, images_data=get_images_data(packs_list)
)
# summary of packs status
print_packs_summary(successful_packs, skipped_packs, failed_packs, not is_bucket_upload_flow)
# marketplace v2 isn't currently supported - dependencies zip should only be used for v1
if is_create_dependencies_zip and marketplace == 'xsoar':
# handle packs with dependencies zip
upload_packs_with_dependencies_zip(extract_destination_path, packs_dependencies_mapping, signature_key,
storage_bucket, storage_base_path, id_set, packs_list, marketplace)
|
890 |
def _af_rmuln(*permutations):
"""
Given [a, b, c, ...] return the product of ...*c*b*a using array forms.
The ith value is a[b[c[i]]].
Examples
========
>>> from sympy.combinatorics.permutations import _af_rmuln, Permutation
>>> a, b, c = [1, 0, 2], [0, 2, 1], [2, 0, 1]
>>> _af_rmuln(a, b, c)
[0, 1, 2]
>>> [a[b[c[i]]] for i in range(3)]
[0, 1, 2]
This handles the operands in reverse order compared to the ``*`` operator:
>>> a = Permutation(a); b = Permutation(b)
>>> list(a*b)
[2, 0, 1]
>>> [b(a(i)) for i in range(3)]
[2, 0, 1]
See Also
========
rmul, _af_rmul
"""
if len(permutations)==0:
raise ValueError("List must not be empty")
else:
result, perms = permutations[0], permutations[1:]
for p in perms:
result = [result[i] for i in p]
return result
|
def _af_rmuln(*permutations):
"""
Given [a, b, c, ...] return the product of ...*c*b*a using array forms.
The ith value is a[b[c[i]]].
Examples
========
>>> from sympy.combinatorics.permutations import _af_rmuln, Permutation
>>> a, b, c = [1, 0, 2], [0, 2, 1], [2, 0, 1]
>>> _af_rmuln(a, b, c)
[0, 1, 2]
>>> [a[b[c[i]]] for i in range(3)]
[0, 1, 2]
This handles the operands in reverse order compared to the ``*`` operator:
>>> a = Permutation(a); b = Permutation(b)
>>> list(a*b)
[2, 0, 1]
>>> [b(a(i)) for i in range(3)]
[2, 0, 1]
See Also
========
rmul, _af_rmul
"""
if len(permutations) == 0:
raise ValueError("List must not be empty")
else:
result, perms = permutations[0], permutations[1:]
for p in perms:
result = [result[i] for i in p]
return result
|
35,984 |
def print_header(file_format, outfile, debug, **kwargs):
"""Print header for export"""
from tabulate import tabulate
from aiida.cmdline.utils import echo
from aiida.tools.importexport.common.config import EXPORT_VERSION
title = 'EXPORT - !!! DEBUG MODE !!!' if debug else 'EXPORT'
parameters = [['Archive', outfile], ['Format', file_format], ['Export version', EXPORT_VERSION]]
result = '\n{}'.format(tabulate(parameters, headers=[title, '']))
include_comments = kwargs.get('include_comments', True)
include_logs = kwargs.get('include_logs', True)
input_forward = kwargs.get('input_forward', False)
create_reversed = kwargs.get('create_reversed', True)
return_reversed = kwargs.get('return_reversed', False)
call_reversed = kwargs.get('call_reversed', False)
inclusions = [['Include Comments', include_comments], ['Include Logs', include_logs]]
result += '\n\n{}'.format(tabulate(inclusions, headers=['Included', '']))
traversal_rules = [['Follow INPUT Links forwards',
input_forward], ['Follow CREATE Links backwards', create_reversed],
['Follow RETURN Links backwards', return_reversed],
['Follow CALL Links backwards', call_reversed]]
result += '\n\n{}\n'.format(tabulate(traversal_rules, headers=['Traversal rules', '']))
echo.echo(result)
|
def print_header(file_format, outfile, debug, **kwargs):
"""Print header for export"""
from tabulate import tabulate
from aiida.cmdline.utils import echo
from aiida.tools.importexport.common.config import EXPORT_VERSION
title = 'EXPORT - !!! DEBUG MODE !!!' if debug else 'EXPORT'
parameters = [['Archive', outfile], ['Format', file_format], ['Export version', EXPORT_VERSION]]
result = '\n{}'.format(tabulate(parameters, headers=[title, '']))
include_comments = kwargs.get('include_comments', True)
include_logs = kwargs.get('include_logs', True)
input_forward = kwargs.get('input_forward', False)
create_reversed = kwargs.get('create_reversed', True)
return_reversed = kwargs.get('return_reversed', False)
call_reversed = kwargs.get('call_reversed', False)
inclusions = [['Include Comments', include_comments], ['Include Logs', include_logs]]
result += '\n\n{}'.format(tabulate(inclusions, headers=['Inclusion rules', '']))
traversal_rules = [['Follow INPUT Links forwards',
input_forward], ['Follow CREATE Links backwards', create_reversed],
['Follow RETURN Links backwards', return_reversed],
['Follow CALL Links backwards', call_reversed]]
result += '\n\n{}\n'.format(tabulate(traversal_rules, headers=['Traversal rules', '']))
echo.echo(result)
|
40,278 |
def test_data():
torch_geometric.set_debug(True)
x = torch.tensor([[1, 3, 5], [2, 4, 6]], dtype=torch.float).t()
edge_index = torch.tensor([[0, 0, 1, 1, 2], [1, 1, 0, 2, 1]])
data = Data(x=x, edge_index=edge_index).to(torch.device('cpu'))
N = data.num_nodes
assert N == 3
assert data.x.tolist() == x.tolist()
assert data['x'].tolist() == x.tolist()
assert sorted(data.keys) == ['edge_index', 'x']
assert len(data) == 2
assert 'x' in data and 'edge_index' in data and 'pos' not in data
D = data.to_dict()
assert len(D) == 2
assert 'x' in D and 'edge_index' in D
D = data.to_namedtuple()
assert len(D) == 2
assert D.x is not None and D.edge_index is not None
assert data.__cat_dim__('x', data.x) == 0
assert data.__cat_dim__('edge_index', data.edge_index) == -1
assert data.__inc__('x', data.x) == 0
assert data.__inc__('edge_index', data.edge_index) == data.num_nodes
assert not data.x.is_contiguous()
data.contiguous()
assert data.x.is_contiguous()
assert not data.is_coalesced()
data = data.coalesce()
assert data.is_coalesced()
clone = data.clone()
assert clone != data
assert len(clone) == len(data)
assert clone.x.tolist() == data.x.tolist()
assert clone.edge_index.tolist() == data.edge_index.tolist()
# test to_heterogenous
hetero_data = data.to_heterogeneous()
assert torch.allclose(data.x, hetero_data.node_stores[0]['x'])
assert torch.allclose(data.edge_index,
hetero_data.edge_stores[0]['edge_index'])
data.edge_type = torch.tensor([0, 0, 1, 0])
hetero_data = data.to_heterogeneous()
assert torch.allclose(data.x, hetero_data.node_stores[0]['x'])
assert [3, 1] == [i.edge_index.size(1) for i in hetero_data.edge_stores]
data.edge_type = None
data['x'] = x + 1
assert data.x.tolist() == (x + 1).tolist()
assert str(data) == 'Data(x=[3, 2], edge_index=[2, 4])'
dictionary = {'x': data.x, 'edge_index': data.edge_index}
data = Data.from_dict(dictionary)
assert sorted(data.keys) == ['edge_index', 'x']
assert not data.has_isolated_nodes()
assert not data.has_self_loops()
assert data.is_undirected()
assert not data.is_directed()
assert data.num_nodes == 3
assert data.num_edges == 4
assert data.num_faces is None
assert data.num_node_features == 2
assert data.num_features == 2
data.edge_attr = torch.randn(data.num_edges, 2)
assert data.num_edge_features == 2
data.edge_attr = None
data.x = None
assert data.num_nodes == 3
data.edge_index = None
assert data.num_nodes is None
assert data.num_edges == 0
data.num_nodes = 4
assert data.num_nodes == 4
data = Data(x=x, attribute=x)
assert len(data) == 2
assert data.x.tolist() == x.tolist()
assert data.attribute.tolist() == x.tolist()
face = torch.tensor([[0, 1], [1, 2], [2, 3]])
data = Data(num_nodes=4, face=face)
assert data.num_faces == 2
assert data.num_nodes == 4
data = Data(title='test')
assert str(data) == "Data(title='test')"
assert data.num_node_features == 0
assert data.num_edge_features == 0
torch_geometric.set_debug(False)
|
def test_data():
torch_geometric.set_debug(True)
x = torch.tensor([[1, 3, 5], [2, 4, 6]], dtype=torch.float).t()
edge_index = torch.tensor([[0, 0, 1, 1, 2], [1, 1, 0, 2, 1]])
data = Data(x=x, edge_index=edge_index).to(torch.device('cpu'))
N = data.num_nodes
assert N == 3
assert data.x.tolist() == x.tolist()
assert data['x'].tolist() == x.tolist()
assert sorted(data.keys) == ['edge_index', 'x']
assert len(data) == 2
assert 'x' in data and 'edge_index' in data and 'pos' not in data
D = data.to_dict()
assert len(D) == 2
assert 'x' in D and 'edge_index' in D
D = data.to_namedtuple()
assert len(D) == 2
assert D.x is not None and D.edge_index is not None
assert data.__cat_dim__('x', data.x) == 0
assert data.__cat_dim__('edge_index', data.edge_index) == -1
assert data.__inc__('x', data.x) == 0
assert data.__inc__('edge_index', data.edge_index) == data.num_nodes
assert not data.x.is_contiguous()
data.contiguous()
assert data.x.is_contiguous()
assert not data.is_coalesced()
data = data.coalesce()
assert data.is_coalesced()
clone = data.clone()
assert clone != data
assert len(clone) == len(data)
assert clone.x.tolist() == data.x.tolist()
assert clone.edge_index.tolist() == data.edge_index.tolist()
# test to_heterogenous
hetero_data = data.to_heterogeneous()
assert torch.allclose(data.x, hetero_data.node_stores[0]['x'])
assert torch.allclose(data.edge_index,
hetero_data['0', '0'].edge_index)
data.edge_type = torch.tensor([0, 0, 1, 0])
hetero_data = data.to_heterogeneous()
assert torch.allclose(data.x, hetero_data.node_stores[0]['x'])
assert [3, 1] == [i.edge_index.size(1) for i in hetero_data.edge_stores]
data.edge_type = None
data['x'] = x + 1
assert data.x.tolist() == (x + 1).tolist()
assert str(data) == 'Data(x=[3, 2], edge_index=[2, 4])'
dictionary = {'x': data.x, 'edge_index': data.edge_index}
data = Data.from_dict(dictionary)
assert sorted(data.keys) == ['edge_index', 'x']
assert not data.has_isolated_nodes()
assert not data.has_self_loops()
assert data.is_undirected()
assert not data.is_directed()
assert data.num_nodes == 3
assert data.num_edges == 4
assert data.num_faces is None
assert data.num_node_features == 2
assert data.num_features == 2
data.edge_attr = torch.randn(data.num_edges, 2)
assert data.num_edge_features == 2
data.edge_attr = None
data.x = None
assert data.num_nodes == 3
data.edge_index = None
assert data.num_nodes is None
assert data.num_edges == 0
data.num_nodes = 4
assert data.num_nodes == 4
data = Data(x=x, attribute=x)
assert len(data) == 2
assert data.x.tolist() == x.tolist()
assert data.attribute.tolist() == x.tolist()
face = torch.tensor([[0, 1], [1, 2], [2, 3]])
data = Data(num_nodes=4, face=face)
assert data.num_faces == 2
assert data.num_nodes == 4
data = Data(title='test')
assert str(data) == "Data(title='test')"
assert data.num_node_features == 0
assert data.num_edge_features == 0
torch_geometric.set_debug(False)
|
24,870 |
def check_config_6(machine, old_conf, new_conf):
"""
Example code must not trigger the message,
Given an if-elif construct
When the body of the if ends with an if expression
Then no message shall be triggered.
"""
if old_conf:
if not new_conf:
machine.disable()
print("Processed old configuration...")
elif new_conf:
machine.enable(new_conf.value)
|
def not_triggered_if_indented_block_ends_with_ifexp(machine, old_conf, new_conf):
"""
Example code must not trigger the message,
Given an if-elif construct
When the body of the if ends with an if expression
Then no message shall be triggered.
"""
if old_conf:
if not new_conf:
machine.disable()
print("Processed old configuration...")
elif new_conf:
machine.enable(new_conf.value)
|
17,779 |
def fromString(instrumentString: str,
language: str = 'all'):
'''
Given a string with instrument content (from an orchestral score
for example), attempts to return an appropriate
:class:`~music21.instrument.Instrument`.
>>> from music21 import instrument
>>> t1 = instrument.fromString('Clarinet 2 in A')
>>> t1
<music21.instrument.Clarinet 'Clarinet 2 in A'>
>>> t1.transposition
<music21.interval.Interval m-3>
>>> t2 = instrument.fromString('Clarinetto 3')
>>> t2
<music21.instrument.Clarinet 'Clarinetto 3'>
>>> t3 = instrument.fromString('flauto 2')
>>> t3
<music21.instrument.Flute 'flauto 2'>
Excess information is ignored, and the useful information can be extracted
correctly as long as it's sequential.
>>> t4 = instrument.fromString('I <3 music saxofono tenore go beavers')
>>> t4
<music21.instrument.TenorSaxophone 'I <3 music saxofono tenore go beavers'>
Some more demos:
>>> t5 = instrument.fromString('Bb Clarinet')
>>> t5
<music21.instrument.Clarinet 'Bb Clarinet'>
>>> t5.transposition
<music21.interval.Interval M-2>
>>> t6 = instrument.fromString('Clarinet in B-flat')
>>> t5.__class__ == t6.__class__
True
>>> t5.transposition == t6.transposition
True
>>> t7 = instrument.fromString('B-flat Clarinet.')
>>> t5.__class__ == t7.__class__ and t5.transposition == t7.transposition
True
>>> t8 = instrument.fromString('Eb Clarinet')
>>> t5.__class__ == t8.__class__
True
>>> t8.transposition
<music21.interval.Interval m3>
Note that because of the ubiquity of B-flat clarinets and trumpets, and the
rareness of B-natural forms of those instruments, this gives a B-flat, not
B-natural clarinet, using the German form:
>>> t9 = instrument.fromString('Klarinette in B.')
>>> t9
<music21.instrument.Clarinet 'Klarinette in B.'>
>>> t9.transposition
<music21.interval.Interval M-2>
Use "H" or "b-natural" to get an instrument in B-major. Or donate one to me
and I'll change this back!
Standard abbreviations are acceptable:
>>> t10 = instrument.fromString('Cl in B-flat')
>>> t10
<music21.instrument.Clarinet 'Cl in B-flat'>
>>> t10.transposition
<music21.interval.Interval M-2>
This should work with or without a terminal period (for both 'Cl' and 'Cl.'):
>>> t11 = instrument.fromString('Cl. in B-flat')
>>> t11.__class__ == t10.__class__
True
Previously an exact instrument name was not always working:
>>> instrument.fromString('Flute')
<music21.instrument.Flute 'Flute'>
This common MIDI instrument was not previously working:
>>> instrument.fromString('Choir (Aahs)')
<music21.instrument.Choir 'Choir (Aahs)'>
By default, this function searches over all stored instrument names.
This includes multiple languages as well as the abbreviations
(an honorary 'language' for these purposes).
Alternatively, you can specify the language to search using the 'language' argument.
>>> t12 = instrument.fromString('Klarinette', language='german')
>>> t12
<music21.instrument.Clarinet 'Klarinette'>
This case works because the name 'Klarinette' is a recognised instrument name in German
and appears in the German language list.
If you search for a German name like 'Klarinette' on the French list (language='french'),
then it won't be found and an error will be raised.
An error is also raised if the specified langauge is not one of those currently supported:
'English', 'French', 'German', 'Italian', 'Russian', 'Spanish', and 'Abbreviation'.
Note that the language string is not case-sensitive, so 'french' and 'French' are equivalent.
'''
# pylint: disable=undefined-variable
from music21.languageExcerpts import instrumentLookup
if language == 'all':
sourceDict = instrumentLookup.allToClassName
else:
lang = language.lower()
if lang not in _currentlySupportedLanguages:
raise InstrumentException(f'Chosen language {language} not currently supported.')
sourceDict = getattr(instrumentLookup, lang + 'ToClassName')
instrumentStringOrig = instrumentString
instrumentString = instrumentString.replace('.', ' ') # sic, before removePunctuation
instrumentString = instrumentString.lower() # previously run on each substring separately
instrumentString = common.removePunctuation(instrumentString)
allCombinations = _combinations(instrumentString)
# First task: Find the best instrument.
bestInstClass = None
bestInstrument = None
bestName = None
this_module = importlib.import_module('music21.instrument')
for substring in allCombinations:
try:
className = sourceDict[substring]
thisInstClass = getattr(this_module, className)
# In case users have overridden the module and imported more things
if base.Music21Object not in thisInstClass.__mro__: # pragma: no cover
raise KeyError
thisInstrument = thisInstClass()
thisBestName = thisInstrument.bestName().lower()
if (bestInstClass is None
or len(thisBestName.split()) >= len(bestName.split())
and not issubclass(bestInstClass, thisInstClass)):
# priority is also given to same length instruments which fall later
# on in the string (i.e. Bb Piccolo Trumpet)
bestInstClass = thisInstClass
bestInstrument = thisInstrument
bestInstrument.instrumentName = instrumentStringOrig
bestName = thisBestName
except KeyError:
pass
if bestInstClass is None:
raise InstrumentException(
f'Could not match string with instrument: {instrumentStringOrig}')
if bestName not in instrumentLookup.transposition:
return bestInstrument
# A transposition table is defined for the instrument.
# Second task: Determine appropriate transposition (if any)
for substring in allCombinations:
try:
bestPitch = instrumentLookup.pitchFullNameToName[substring.lower()]
bestInterval = instrumentLookup.transposition[bestName][bestPitch]
if bestInstrument and bestInterval:
bestInstrument.transposition = interval.Interval(bestInterval)
break
except KeyError:
pass
return bestInstrument
|
def fromString(instrumentString: str,
language: str = 'all'):
'''
Given a string with instrument content (from an orchestral score
for example), attempts to return an appropriate
:class:`~music21.instrument.Instrument`.
>>> from music21 import instrument
>>> t1 = instrument.fromString('Clarinet 2 in A')
>>> t1
<music21.instrument.Clarinet 'Clarinet 2 in A'>
>>> t1.transposition
<music21.interval.Interval m-3>
>>> t2 = instrument.fromString('Clarinetto 3')
>>> t2
<music21.instrument.Clarinet 'Clarinetto 3'>
>>> t3 = instrument.fromString('flauto 2')
>>> t3
<music21.instrument.Flute 'flauto 2'>
Excess information is ignored, and the useful information can be extracted
correctly as long as it's sequential.
>>> t4 = instrument.fromString('I <3 music saxofono tenore go beavers')
>>> t4
<music21.instrument.TenorSaxophone 'I <3 music saxofono tenore go beavers'>
Some more demos:
>>> t5 = instrument.fromString('Bb Clarinet')
>>> t5
<music21.instrument.Clarinet 'Bb Clarinet'>
>>> t5.transposition
<music21.interval.Interval M-2>
>>> t6 = instrument.fromString('Clarinet in B-flat')
>>> t5.__class__ == t6.__class__
True
>>> t5.transposition == t6.transposition
True
>>> t7 = instrument.fromString('B-flat Clarinet.')
>>> t5.__class__ == t7.__class__ and t5.transposition == t7.transposition
True
>>> t8 = instrument.fromString('Eb Clarinet')
>>> t5.__class__ == t8.__class__
True
>>> t8.transposition
<music21.interval.Interval m3>
Note that because of the ubiquity of B-flat clarinets and trumpets, and the
rareness of B-natural forms of those instruments, this gives a B-flat, not
B-natural clarinet, using the German form:
>>> t9 = instrument.fromString('Klarinette in B.')
>>> t9
<music21.instrument.Clarinet 'Klarinette in B.'>
>>> t9.transposition
<music21.interval.Interval M-2>
Use "H" or "b-natural" to get an instrument in B-major. Or donate one to me
and I'll change this back!
Standard abbreviations are acceptable:
>>> t10 = instrument.fromString('Cl in B-flat')
>>> t10
<music21.instrument.Clarinet 'Cl in B-flat'>
>>> t10.transposition
<music21.interval.Interval M-2>
This should work with or without a terminal period (for both 'Cl' and 'Cl.'):
>>> t11 = instrument.fromString('Cl. in B-flat')
>>> t11.__class__ == t10.__class__
True
Previously an exact instrument name was not always working:
>>> instrument.fromString('Flute')
<music21.instrument.Flute 'Flute'>
This common MIDI instrument was not previously working:
>>> instrument.fromString('Choir (Aahs)')
<music21.instrument.Choir 'Choir (Aahs)'>
By default, this function searches over all stored instrument names.
This includes multiple languages as well as the abbreviations
(an honorary 'language' for these purposes).
Alternatively, you can specify the language to search using the 'language' argument.
>>> t12 = instrument.fromString('Klarinette', language='german')
>>> t12
<music21.instrument.Clarinet 'Klarinette'>
This case works because the name 'Klarinette' is a recognised instrument name in German
and appears in the German language list.
If you search for a German name like 'Klarinette' on the French list (language='french'),
then it won't be found and an error will be raised.
An error is also raised if the specified language is not one of those currently supported:
'English', 'French', 'German', 'Italian', 'Russian', 'Spanish', and 'Abbreviation'.
Note that the language string is not case-sensitive, so 'french' and 'French' are equivalent.
'''
# pylint: disable=undefined-variable
from music21.languageExcerpts import instrumentLookup
if language == 'all':
sourceDict = instrumentLookup.allToClassName
else:
lang = language.lower()
if lang not in _currentlySupportedLanguages:
raise InstrumentException(f'Chosen language {language} not currently supported.')
sourceDict = getattr(instrumentLookup, lang + 'ToClassName')
instrumentStringOrig = instrumentString
instrumentString = instrumentString.replace('.', ' ') # sic, before removePunctuation
instrumentString = instrumentString.lower() # previously run on each substring separately
instrumentString = common.removePunctuation(instrumentString)
allCombinations = _combinations(instrumentString)
# First task: Find the best instrument.
bestInstClass = None
bestInstrument = None
bestName = None
this_module = importlib.import_module('music21.instrument')
for substring in allCombinations:
try:
className = sourceDict[substring]
thisInstClass = getattr(this_module, className)
# In case users have overridden the module and imported more things
if base.Music21Object not in thisInstClass.__mro__: # pragma: no cover
raise KeyError
thisInstrument = thisInstClass()
thisBestName = thisInstrument.bestName().lower()
if (bestInstClass is None
or len(thisBestName.split()) >= len(bestName.split())
and not issubclass(bestInstClass, thisInstClass)):
# priority is also given to same length instruments which fall later
# on in the string (i.e. Bb Piccolo Trumpet)
bestInstClass = thisInstClass
bestInstrument = thisInstrument
bestInstrument.instrumentName = instrumentStringOrig
bestName = thisBestName
except KeyError:
pass
if bestInstClass is None:
raise InstrumentException(
f'Could not match string with instrument: {instrumentStringOrig}')
if bestName not in instrumentLookup.transposition:
return bestInstrument
# A transposition table is defined for the instrument.
# Second task: Determine appropriate transposition (if any)
for substring in allCombinations:
try:
bestPitch = instrumentLookup.pitchFullNameToName[substring.lower()]
bestInterval = instrumentLookup.transposition[bestName][bestPitch]
if bestInstrument and bestInterval:
bestInstrument.transposition = interval.Interval(bestInterval)
break
except KeyError:
pass
return bestInstrument
|
15,403 |
def get_entities(onewirehub: OneWireHub, config):
"""Get a list of entities."""
entities = []
device_names = {}
if CONF_NAMES in config:
if isinstance(config[CONF_NAMES], dict):
device_names = config[CONF_NAMES]
conf_type = config[CONF_TYPE]
# We have an owserver on a remote(or local) host/port
if conf_type == CONF_TYPE_OWSERVER:
for device in onewirehub.devices:
family = device["family"]
device_type = device["type"]
device_id = os.path.split(os.path.split(device["path"])[0])[1]
dev_type = "std"
device_path = device["path"]
if "EF" in family:
dev_type = "HobbyBoard"
family = device_type
if "7E" in family:
dev_type = "EDS00xx"
family = onewirehub.owproxy.read(f"{device_path}device_type").decode()
if family not in hb_info_from_type(dev_type):
_LOGGER.warning(
"Ignoring unknown family (%s) of sensor found for device: %s",
family,
device_id,
)
continue
device_info = {
"identifiers": {(DOMAIN, device_id)},
"manufacturer": "Maxim Integrated",
"model": device_type,
"name": device_id,
}
for entity_specs in hb_info_from_type(dev_type)[family]:
if entity_specs["type"] == SENSOR_TYPE_MOISTURE:
s_id = entity_specs["path"].split(".")[1]
is_leaf = int(
onewirehub.owproxy.read(
f"{device['path']}moisture/is_leaf.{s_id}"
).decode()
)
if is_leaf:
entity_specs["type"] = SENSOR_TYPE_WETNESS
entity_specs["name"] = f"Wetness {s_id}"
entity_path = os.path.join(
os.path.split(device["path"])[0], entity_specs["path"]
)
entities.append(
OneWireProxySensor(
device_id=device_id,
device_name=device_names.get(device_id, device_id),
device_info=device_info,
entity_path=entity_path,
entity_specs=entity_specs,
owproxy=onewirehub.owproxy,
)
)
# We have a raw GPIO ow sensor on a Pi
elif conf_type == CONF_TYPE_SYSBUS:
base_dir = config[CONF_MOUNT_DIR]
_LOGGER.debug("Initializing using SysBus %s", base_dir)
for p1sensor in onewirehub.devices:
family = p1sensor.mac_address[:2]
sensor_id = f"{family}-{p1sensor.mac_address[2:]}"
if family not in DEVICE_SUPPORT_SYSBUS:
_LOGGER.warning(
"Ignoring unknown family (%s) of sensor found for device: %s",
family,
sensor_id,
)
continue
device_info = {
"identifiers": {(DOMAIN, sensor_id)},
"manufacturer": "Maxim Integrated",
"model": family,
"name": sensor_id,
}
device_file = f"/sys/bus/w1/devices/{sensor_id}/w1_slave"
entities.append(
OneWireDirectSensor(
device_names.get(sensor_id, sensor_id),
device_file,
device_info,
p1sensor,
)
)
if not entities:
_LOGGER.error(
"No onewire sensor found. Check if dtoverlay=w1-gpio "
"is in your /boot/config.txt. "
"Check the mount_dir parameter if it's defined"
)
# We have an owfs mounted
else: # pragma: no cover
# This part of the implementation does not conform to policy regarding 3rd-party libraries, and will not longer be updated.
# https://developers.home-assistant.io/docs/creating_platform_code_review/#5-communication-with-devicesservices
base_dir = config[CONF_MOUNT_DIR]
_LOGGER.debug("Initializing using OWFS %s", base_dir)
_LOGGER.warning(
"The OWFS implementation of 1-Wire sensors is deprecated, "
"and should be migrated to OWServer (on localhost:4304). "
"If migration to OWServer is not feasible on your installation, "
"please raise an issue at https://github.com/home-assistant/core/issues/new"
"?title=Unable%20to%20migrate%20onewire%20from%20OWFS%20to%20OWServer",
)
for family_file_path in glob(os.path.join(base_dir, "*", "family")):
with open(family_file_path) as family_file:
family = family_file.read()
if "EF" in family:
continue
if family in DEVICE_SENSORS:
for sensor_key, sensor_value in DEVICE_SENSORS[family].items():
sensor_id = os.path.split(os.path.split(family_file_path)[0])[1]
device_file = os.path.join(
os.path.split(family_file_path)[0], sensor_value
)
entities.append(
OneWireOWFSSensor(
device_names.get(sensor_id, sensor_id),
device_file,
sensor_key,
)
)
return entities
|
def get_entities(onewirehub: OneWireHub, config):
"""Get a list of entities."""
entities = []
device_names = {}
if CONF_NAMES in config:
if isinstance(config[CONF_NAMES], dict):
device_names = config[CONF_NAMES]
conf_type = config[CONF_TYPE]
# We have an owserver on a remote(or local) host/port
if conf_type == CONF_TYPE_OWSERVER:
for device in onewirehub.devices:
family = device["family"]
device_type = device["type"]
device_id = os.path.split(os.path.split(device["path"])[0])[1]
dev_type = "std"
device_path = device["path"]
if "EF" in family:
device_sub_type = "HobbyBoard"
family = device_type
if "7E" in family:
dev_type = "EDS00xx"
family = onewirehub.owproxy.read(f"{device_path}device_type").decode()
if family not in hb_info_from_type(dev_type):
_LOGGER.warning(
"Ignoring unknown family (%s) of sensor found for device: %s",
family,
device_id,
)
continue
device_info = {
"identifiers": {(DOMAIN, device_id)},
"manufacturer": "Maxim Integrated",
"model": device_type,
"name": device_id,
}
for entity_specs in hb_info_from_type(dev_type)[family]:
if entity_specs["type"] == SENSOR_TYPE_MOISTURE:
s_id = entity_specs["path"].split(".")[1]
is_leaf = int(
onewirehub.owproxy.read(
f"{device['path']}moisture/is_leaf.{s_id}"
).decode()
)
if is_leaf:
entity_specs["type"] = SENSOR_TYPE_WETNESS
entity_specs["name"] = f"Wetness {s_id}"
entity_path = os.path.join(
os.path.split(device["path"])[0], entity_specs["path"]
)
entities.append(
OneWireProxySensor(
device_id=device_id,
device_name=device_names.get(device_id, device_id),
device_info=device_info,
entity_path=entity_path,
entity_specs=entity_specs,
owproxy=onewirehub.owproxy,
)
)
# We have a raw GPIO ow sensor on a Pi
elif conf_type == CONF_TYPE_SYSBUS:
base_dir = config[CONF_MOUNT_DIR]
_LOGGER.debug("Initializing using SysBus %s", base_dir)
for p1sensor in onewirehub.devices:
family = p1sensor.mac_address[:2]
sensor_id = f"{family}-{p1sensor.mac_address[2:]}"
if family not in DEVICE_SUPPORT_SYSBUS:
_LOGGER.warning(
"Ignoring unknown family (%s) of sensor found for device: %s",
family,
sensor_id,
)
continue
device_info = {
"identifiers": {(DOMAIN, sensor_id)},
"manufacturer": "Maxim Integrated",
"model": family,
"name": sensor_id,
}
device_file = f"/sys/bus/w1/devices/{sensor_id}/w1_slave"
entities.append(
OneWireDirectSensor(
device_names.get(sensor_id, sensor_id),
device_file,
device_info,
p1sensor,
)
)
if not entities:
_LOGGER.error(
"No onewire sensor found. Check if dtoverlay=w1-gpio "
"is in your /boot/config.txt. "
"Check the mount_dir parameter if it's defined"
)
# We have an owfs mounted
else: # pragma: no cover
# This part of the implementation does not conform to policy regarding 3rd-party libraries, and will not longer be updated.
# https://developers.home-assistant.io/docs/creating_platform_code_review/#5-communication-with-devicesservices
base_dir = config[CONF_MOUNT_DIR]
_LOGGER.debug("Initializing using OWFS %s", base_dir)
_LOGGER.warning(
"The OWFS implementation of 1-Wire sensors is deprecated, "
"and should be migrated to OWServer (on localhost:4304). "
"If migration to OWServer is not feasible on your installation, "
"please raise an issue at https://github.com/home-assistant/core/issues/new"
"?title=Unable%20to%20migrate%20onewire%20from%20OWFS%20to%20OWServer",
)
for family_file_path in glob(os.path.join(base_dir, "*", "family")):
with open(family_file_path) as family_file:
family = family_file.read()
if "EF" in family:
continue
if family in DEVICE_SENSORS:
for sensor_key, sensor_value in DEVICE_SENSORS[family].items():
sensor_id = os.path.split(os.path.split(family_file_path)[0])[1]
device_file = os.path.join(
os.path.split(family_file_path)[0], sensor_value
)
entities.append(
OneWireOWFSSensor(
device_names.get(sensor_id, sensor_id),
device_file,
sensor_key,
)
)
return entities
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.